Branch data Line data Source code
1 : : /*
2 : : * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3 : : *
4 : : * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 : : * 2010 (c) MontaVista Software, LLC.
6 : : *
7 : : * Copied from ARMv6 code, with the low level code inspired
8 : : * by the ARMv7 Oprofile code.
9 : : *
10 : : * Cortex-A8 has up to 4 configurable performance counters and
11 : : * a single cycle counter.
12 : : * Cortex-A9 has up to 31 configurable performance counters and
13 : : * a single cycle counter.
14 : : *
15 : : * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 : : * counter and all 4 performance counters together can be reset separately.
17 : : */
18 : :
19 : : #ifdef CONFIG_CPU_V7
20 : :
21 : : /*
22 : : * Common ARMv7 event types
23 : : *
24 : : * Note: An implementation may not be able to count all of these events
25 : : * but the encodings are considered to be `reserved' in the case that
26 : : * they are not available.
27 : : */
28 : : enum armv7_perf_types {
29 : : ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
30 : : ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
31 : : ARMV7_PERFCTR_ITLB_REFILL = 0x02,
32 : : ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
33 : : ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
34 : : ARMV7_PERFCTR_DTLB_REFILL = 0x05,
35 : : ARMV7_PERFCTR_MEM_READ = 0x06,
36 : : ARMV7_PERFCTR_MEM_WRITE = 0x07,
37 : : ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
38 : : ARMV7_PERFCTR_EXC_TAKEN = 0x09,
39 : : ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
40 : : ARMV7_PERFCTR_CID_WRITE = 0x0B,
41 : :
42 : : /*
43 : : * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
44 : : * It counts:
45 : : * - all (taken) branch instructions,
46 : : * - instructions that explicitly write the PC,
47 : : * - exception generating instructions.
48 : : */
49 : : ARMV7_PERFCTR_PC_WRITE = 0x0C,
50 : : ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
51 : : ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
52 : : ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
53 : : ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
54 : : ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
55 : : ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
56 : :
57 : : /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
58 : : ARMV7_PERFCTR_MEM_ACCESS = 0x13,
59 : : ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
60 : : ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
61 : : ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
62 : : ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
63 : : ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
64 : : ARMV7_PERFCTR_BUS_ACCESS = 0x19,
65 : : ARMV7_PERFCTR_MEM_ERROR = 0x1A,
66 : : ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
67 : : ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
68 : : ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
69 : :
70 : : ARMV7_PERFCTR_CPU_CYCLES = 0xFF
71 : : };
72 : :
73 : : /* ARMv7 Cortex-A8 specific event types */
74 : : enum armv7_a8_perf_types {
75 : : ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
76 : : ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
77 : : ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
78 : : ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
79 : : };
80 : :
81 : : /* ARMv7 Cortex-A9 specific event types */
82 : : enum armv7_a9_perf_types {
83 : : ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
84 : : ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
85 : : ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
86 : : };
87 : :
88 : : /* ARMv7 Cortex-A5 specific event types */
89 : : enum armv7_a5_perf_types {
90 : : ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
91 : : ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
92 : : };
93 : :
94 : : /* ARMv7 Cortex-A15 specific event types */
95 : : enum armv7_a15_perf_types {
96 : : ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
97 : : ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
98 : : ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
99 : : ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
100 : :
101 : : ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
102 : : ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
103 : :
104 : : ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
105 : : ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
106 : : ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
107 : : ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
108 : :
109 : : ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
110 : : };
111 : :
112 : : /*
113 : : * Cortex-A8 HW events mapping
114 : : *
115 : : * The hardware events that we support. We do support cache operations but
116 : : * we have harvard caches and no way to combine instruction and data
117 : : * accesses/misses in hardware.
118 : : */
119 : : static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
120 : : [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
121 : : [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
122 : : [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
123 : : [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
124 : : [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
125 : : [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
126 : : [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
127 : : [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
128 : : [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
129 : : };
130 : :
131 : : static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
132 : : [PERF_COUNT_HW_CACHE_OP_MAX]
133 : : [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
134 : : [C(L1D)] = {
135 : : /*
136 : : * The performance counters don't differentiate between read
137 : : * and write accesses/misses so this isn't strictly correct,
138 : : * but it's the best we can do. Writes and reads get
139 : : * combined.
140 : : */
141 : : [C(OP_READ)] = {
142 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
143 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
144 : : },
145 : : [C(OP_WRITE)] = {
146 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
147 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
148 : : },
149 : : [C(OP_PREFETCH)] = {
150 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
151 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
152 : : },
153 : : },
154 : : [C(L1I)] = {
155 : : [C(OP_READ)] = {
156 : : [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
157 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
158 : : },
159 : : [C(OP_WRITE)] = {
160 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
162 : : },
163 : : [C(OP_PREFETCH)] = {
164 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
165 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
166 : : },
167 : : },
168 : : [C(LL)] = {
169 : : [C(OP_READ)] = {
170 : : [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
171 : : [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
172 : : },
173 : : [C(OP_WRITE)] = {
174 : : [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
175 : : [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
176 : : },
177 : : [C(OP_PREFETCH)] = {
178 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
179 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
180 : : },
181 : : },
182 : : [C(DTLB)] = {
183 : : [C(OP_READ)] = {
184 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
185 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
186 : : },
187 : : [C(OP_WRITE)] = {
188 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
189 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
190 : : },
191 : : [C(OP_PREFETCH)] = {
192 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
193 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
194 : : },
195 : : },
196 : : [C(ITLB)] = {
197 : : [C(OP_READ)] = {
198 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
199 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
200 : : },
201 : : [C(OP_WRITE)] = {
202 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
203 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
204 : : },
205 : : [C(OP_PREFETCH)] = {
206 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
207 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
208 : : },
209 : : },
210 : : [C(BPU)] = {
211 : : [C(OP_READ)] = {
212 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
213 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
214 : : },
215 : : [C(OP_WRITE)] = {
216 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
217 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
218 : : },
219 : : [C(OP_PREFETCH)] = {
220 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
221 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
222 : : },
223 : : },
224 : : [C(NODE)] = {
225 : : [C(OP_READ)] = {
226 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
227 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
228 : : },
229 : : [C(OP_WRITE)] = {
230 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
231 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
232 : : },
233 : : [C(OP_PREFETCH)] = {
234 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
235 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
236 : : },
237 : : },
238 : : };
239 : :
240 : : /*
241 : : * Cortex-A9 HW events mapping
242 : : */
243 : : static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
244 : : [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
245 : : [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
246 : : [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
247 : : [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
248 : : [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
249 : : [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
250 : : [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
251 : : [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
252 : : [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
253 : : };
254 : :
255 : : static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
256 : : [PERF_COUNT_HW_CACHE_OP_MAX]
257 : : [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
258 : : [C(L1D)] = {
259 : : /*
260 : : * The performance counters don't differentiate between read
261 : : * and write accesses/misses so this isn't strictly correct,
262 : : * but it's the best we can do. Writes and reads get
263 : : * combined.
264 : : */
265 : : [C(OP_READ)] = {
266 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
267 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
268 : : },
269 : : [C(OP_WRITE)] = {
270 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
271 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
272 : : },
273 : : [C(OP_PREFETCH)] = {
274 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
275 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
276 : : },
277 : : },
278 : : [C(L1I)] = {
279 : : [C(OP_READ)] = {
280 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
281 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
282 : : },
283 : : [C(OP_WRITE)] = {
284 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
285 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
286 : : },
287 : : [C(OP_PREFETCH)] = {
288 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
289 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
290 : : },
291 : : },
292 : : [C(LL)] = {
293 : : [C(OP_READ)] = {
294 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
295 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
296 : : },
297 : : [C(OP_WRITE)] = {
298 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
299 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
300 : : },
301 : : [C(OP_PREFETCH)] = {
302 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
303 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
304 : : },
305 : : },
306 : : [C(DTLB)] = {
307 : : [C(OP_READ)] = {
308 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
309 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
310 : : },
311 : : [C(OP_WRITE)] = {
312 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
313 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
314 : : },
315 : : [C(OP_PREFETCH)] = {
316 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
317 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
318 : : },
319 : : },
320 : : [C(ITLB)] = {
321 : : [C(OP_READ)] = {
322 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
323 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
324 : : },
325 : : [C(OP_WRITE)] = {
326 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
327 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
328 : : },
329 : : [C(OP_PREFETCH)] = {
330 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
331 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
332 : : },
333 : : },
334 : : [C(BPU)] = {
335 : : [C(OP_READ)] = {
336 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
337 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
338 : : },
339 : : [C(OP_WRITE)] = {
340 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
341 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
342 : : },
343 : : [C(OP_PREFETCH)] = {
344 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
345 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
346 : : },
347 : : },
348 : : [C(NODE)] = {
349 : : [C(OP_READ)] = {
350 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
351 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
352 : : },
353 : : [C(OP_WRITE)] = {
354 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
355 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
356 : : },
357 : : [C(OP_PREFETCH)] = {
358 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
359 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
360 : : },
361 : : },
362 : : };
363 : :
364 : : /*
365 : : * Cortex-A5 HW events mapping
366 : : */
367 : : static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
368 : : [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
369 : : [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
370 : : [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
371 : : [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
372 : : [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
373 : : [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
374 : : [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
375 : : [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
376 : : [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
377 : : };
378 : :
379 : : static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
380 : : [PERF_COUNT_HW_CACHE_OP_MAX]
381 : : [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
382 : : [C(L1D)] = {
383 : : [C(OP_READ)] = {
384 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
385 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
386 : : },
387 : : [C(OP_WRITE)] = {
388 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
389 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
390 : : },
391 : : [C(OP_PREFETCH)] = {
392 : : [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
393 : : [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
394 : : },
395 : : },
396 : : [C(L1I)] = {
397 : : [C(OP_READ)] = {
398 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
399 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
400 : : },
401 : : [C(OP_WRITE)] = {
402 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
403 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
404 : : },
405 : : /*
406 : : * The prefetch counters don't differentiate between the I
407 : : * side and the D side.
408 : : */
409 : : [C(OP_PREFETCH)] = {
410 : : [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
411 : : [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
412 : : },
413 : : },
414 : : [C(LL)] = {
415 : : [C(OP_READ)] = {
416 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
417 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
418 : : },
419 : : [C(OP_WRITE)] = {
420 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
421 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
422 : : },
423 : : [C(OP_PREFETCH)] = {
424 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
425 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
426 : : },
427 : : },
428 : : [C(DTLB)] = {
429 : : [C(OP_READ)] = {
430 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
431 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
432 : : },
433 : : [C(OP_WRITE)] = {
434 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
435 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
436 : : },
437 : : [C(OP_PREFETCH)] = {
438 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
439 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
440 : : },
441 : : },
442 : : [C(ITLB)] = {
443 : : [C(OP_READ)] = {
444 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
445 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
446 : : },
447 : : [C(OP_WRITE)] = {
448 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
449 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
450 : : },
451 : : [C(OP_PREFETCH)] = {
452 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
453 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
454 : : },
455 : : },
456 : : [C(BPU)] = {
457 : : [C(OP_READ)] = {
458 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
459 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
460 : : },
461 : : [C(OP_WRITE)] = {
462 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
463 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
464 : : },
465 : : [C(OP_PREFETCH)] = {
466 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
467 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
468 : : },
469 : : },
470 : : [C(NODE)] = {
471 : : [C(OP_READ)] = {
472 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
473 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
474 : : },
475 : : [C(OP_WRITE)] = {
476 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
477 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
478 : : },
479 : : [C(OP_PREFETCH)] = {
480 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
481 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
482 : : },
483 : : },
484 : : };
485 : :
486 : : /*
487 : : * Cortex-A15 HW events mapping
488 : : */
489 : : static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
490 : : [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
491 : : [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
492 : : [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
493 : : [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
494 : : [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
495 : : [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
496 : : [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
497 : : [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
498 : : [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
499 : : };
500 : :
501 : : static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
502 : : [PERF_COUNT_HW_CACHE_OP_MAX]
503 : : [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
504 : : [C(L1D)] = {
505 : : [C(OP_READ)] = {
506 : : [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
507 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
508 : : },
509 : : [C(OP_WRITE)] = {
510 : : [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
511 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
512 : : },
513 : : [C(OP_PREFETCH)] = {
514 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
515 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
516 : : },
517 : : },
518 : : [C(L1I)] = {
519 : : /*
520 : : * Not all performance counters differentiate between read
521 : : * and write accesses/misses so we're not always strictly
522 : : * correct, but it's the best we can do. Writes and reads get
523 : : * combined in these cases.
524 : : */
525 : : [C(OP_READ)] = {
526 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
527 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
528 : : },
529 : : [C(OP_WRITE)] = {
530 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
531 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
532 : : },
533 : : [C(OP_PREFETCH)] = {
534 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
535 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
536 : : },
537 : : },
538 : : [C(LL)] = {
539 : : [C(OP_READ)] = {
540 : : [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
541 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
542 : : },
543 : : [C(OP_WRITE)] = {
544 : : [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
545 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
546 : : },
547 : : [C(OP_PREFETCH)] = {
548 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
549 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
550 : : },
551 : : },
552 : : [C(DTLB)] = {
553 : : [C(OP_READ)] = {
554 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
555 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
556 : : },
557 : : [C(OP_WRITE)] = {
558 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
559 : : [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
560 : : },
561 : : [C(OP_PREFETCH)] = {
562 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
563 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
564 : : },
565 : : },
566 : : [C(ITLB)] = {
567 : : [C(OP_READ)] = {
568 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
569 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
570 : : },
571 : : [C(OP_WRITE)] = {
572 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
573 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
574 : : },
575 : : [C(OP_PREFETCH)] = {
576 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
577 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
578 : : },
579 : : },
580 : : [C(BPU)] = {
581 : : [C(OP_READ)] = {
582 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
583 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
584 : : },
585 : : [C(OP_WRITE)] = {
586 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
587 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
588 : : },
589 : : [C(OP_PREFETCH)] = {
590 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
591 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
592 : : },
593 : : },
594 : : [C(NODE)] = {
595 : : [C(OP_READ)] = {
596 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
597 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
598 : : },
599 : : [C(OP_WRITE)] = {
600 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
601 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
602 : : },
603 : : [C(OP_PREFETCH)] = {
604 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
605 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
606 : : },
607 : : },
608 : : };
609 : :
610 : : /*
611 : : * Cortex-A7 HW events mapping
612 : : */
613 : : static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
614 : : [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
615 : : [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
616 : : [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
617 : : [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
618 : : [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
619 : : [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
620 : : [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
621 : : [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
622 : : [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
623 : : };
624 : :
625 : : static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
626 : : [PERF_COUNT_HW_CACHE_OP_MAX]
627 : : [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
628 : : [C(L1D)] = {
629 : : /*
630 : : * The performance counters don't differentiate between read
631 : : * and write accesses/misses so this isn't strictly correct,
632 : : * but it's the best we can do. Writes and reads get
633 : : * combined.
634 : : */
635 : : [C(OP_READ)] = {
636 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
637 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
638 : : },
639 : : [C(OP_WRITE)] = {
640 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
641 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
642 : : },
643 : : [C(OP_PREFETCH)] = {
644 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
645 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
646 : : },
647 : : },
648 : : [C(L1I)] = {
649 : : [C(OP_READ)] = {
650 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
651 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
652 : : },
653 : : [C(OP_WRITE)] = {
654 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
655 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
656 : : },
657 : : [C(OP_PREFETCH)] = {
658 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
659 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
660 : : },
661 : : },
662 : : [C(LL)] = {
663 : : [C(OP_READ)] = {
664 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
665 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
666 : : },
667 : : [C(OP_WRITE)] = {
668 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
669 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
670 : : },
671 : : [C(OP_PREFETCH)] = {
672 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
674 : : },
675 : : },
676 : : [C(DTLB)] = {
677 : : [C(OP_READ)] = {
678 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
679 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
680 : : },
681 : : [C(OP_WRITE)] = {
682 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
683 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
684 : : },
685 : : [C(OP_PREFETCH)] = {
686 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
687 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
688 : : },
689 : : },
690 : : [C(ITLB)] = {
691 : : [C(OP_READ)] = {
692 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
693 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
694 : : },
695 : : [C(OP_WRITE)] = {
696 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
697 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
698 : : },
699 : : [C(OP_PREFETCH)] = {
700 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
701 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
702 : : },
703 : : },
704 : : [C(BPU)] = {
705 : : [C(OP_READ)] = {
706 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
707 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
708 : : },
709 : : [C(OP_WRITE)] = {
710 : : [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
711 : : [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
712 : : },
713 : : [C(OP_PREFETCH)] = {
714 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
715 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
716 : : },
717 : : },
718 : : [C(NODE)] = {
719 : : [C(OP_READ)] = {
720 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
721 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
722 : : },
723 : : [C(OP_WRITE)] = {
724 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
725 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
726 : : },
727 : : [C(OP_PREFETCH)] = {
728 : : [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
729 : : [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
730 : : },
731 : : },
732 : : };
733 : :
734 : : /*
735 : : * Perf Events' indices
736 : : */
737 : : #define ARMV7_IDX_CYCLE_COUNTER 0
738 : : #define ARMV7_IDX_COUNTER0 1
739 : : #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
740 : : (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
741 : :
742 : : #define ARMV7_MAX_COUNTERS 32
743 : : #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
744 : :
745 : : /*
746 : : * ARMv7 low level PMNC access
747 : : */
748 : :
749 : : /*
750 : : * Perf Event to low level counters mapping
751 : : */
752 : : #define ARMV7_IDX_TO_COUNTER(x) \
753 : : (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
754 : :
755 : : /*
756 : : * Per-CPU PMNC: config reg
757 : : */
758 : : #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
759 : : #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
760 : : #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
761 : : #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
762 : : #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
763 : : #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
764 : : #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
765 : : #define ARMV7_PMNC_N_MASK 0x1f
766 : : #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
767 : :
768 : : /*
769 : : * FLAG: counters overflow flag status reg
770 : : */
771 : : #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
772 : : #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
773 : :
774 : : /*
775 : : * PMXEVTYPER: Event selection reg
776 : : */
777 : : #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
778 : : #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
779 : :
780 : : /*
781 : : * Event filters for PMUv2
782 : : */
783 : : #define ARMV7_EXCLUDE_PL1 (1 << 31)
784 : : #define ARMV7_EXCLUDE_USER (1 << 30)
785 : : #define ARMV7_INCLUDE_HYP (1 << 27)
786 : :
787 : : static inline u32 armv7_pmnc_read(void)
788 : : {
789 : : u32 val;
790 : 0 : asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
791 : : return val;
792 : : }
793 : :
794 : : static inline void armv7_pmnc_write(u32 val)
795 : : {
796 : 0 : val &= ARMV7_PMNC_MASK;
797 : 81 : isb();
798 : 81 : asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
799 : : }
800 : :
801 : : static inline int armv7_pmnc_has_overflowed(u32 pmnc)
802 : : {
803 : : return pmnc & ARMV7_OVERFLOWED_MASK;
804 : : }
805 : :
806 : : static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
807 : : {
808 [ # # ][ # # ]: 0 : return idx >= ARMV7_IDX_CYCLE_COUNTER &&
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
809 : 0 : idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
810 : : }
811 : :
812 : : static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
813 : : {
814 : 0 : return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
815 : : }
816 : :
817 : : static inline int armv7_pmnc_select_counter(int idx)
818 : : {
819 : 0 : u32 counter = ARMV7_IDX_TO_COUNTER(idx);
820 : 0 : asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
821 : 0 : isb();
822 : :
823 : : return idx;
824 : : }
825 : :
826 : 0 : static inline u32 armv7pmu_read_counter(struct perf_event *event)
827 : : {
828 : 0 : struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
829 : : struct hw_perf_event *hwc = &event->hw;
830 : 0 : int idx = hwc->idx;
831 : : u32 value = 0;
832 : :
833 [ # # ]: 0 : if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
834 : 0 : pr_err("CPU%u reading wrong counter %d\n",
835 : : smp_processor_id(), idx);
836 [ # # ]: 0 : else if (idx == ARMV7_IDX_CYCLE_COUNTER)
837 : 0 : asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
838 : : else if (armv7_pmnc_select_counter(idx) == idx)
839 : 0 : asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
840 : :
841 : 0 : return value;
842 : : }
843 : :
844 : 0 : static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
845 : : {
846 : 0 : struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
847 : : struct hw_perf_event *hwc = &event->hw;
848 : 0 : int idx = hwc->idx;
849 : :
850 [ # # ]: 0 : if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
851 : 0 : pr_err("CPU%u writing wrong counter %d\n",
852 : : smp_processor_id(), idx);
853 [ # # ]: 0 : else if (idx == ARMV7_IDX_CYCLE_COUNTER)
854 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
855 : : else if (armv7_pmnc_select_counter(idx) == idx)
856 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
857 : 0 : }
858 : :
859 : : static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
860 : : {
861 : : if (armv7_pmnc_select_counter(idx) == idx) {
862 : 0 : val &= ARMV7_EVTYPE_MASK;
863 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
864 : : }
865 : : }
866 : :
867 : : static inline int armv7_pmnc_enable_counter(int idx)
868 : : {
869 : : u32 counter = ARMV7_IDX_TO_COUNTER(idx);
870 : 0 : asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
871 : : return idx;
872 : : }
873 : :
874 : : static inline int armv7_pmnc_disable_counter(int idx)
875 : : {
876 : 497 : u32 counter = ARMV7_IDX_TO_COUNTER(idx);
877 : 497 : asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
878 : : return idx;
879 : : }
880 : :
881 : : static inline int armv7_pmnc_enable_intens(int idx)
882 : : {
883 : : u32 counter = ARMV7_IDX_TO_COUNTER(idx);
884 : 0 : asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
885 : : return idx;
886 : : }
887 : :
888 : : static inline int armv7_pmnc_disable_intens(int idx)
889 : : {
890 : : u32 counter = ARMV7_IDX_TO_COUNTER(idx);
891 : 497 : asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
892 : 497 : isb();
893 : : /* Clear the overflow flag in case an interrupt is pending. */
894 : 497 : asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
895 : 497 : isb();
896 : :
897 : : return idx;
898 : : }
899 : :
900 : : static inline u32 armv7_pmnc_getreset_flags(void)
901 : : {
902 : : u32 val;
903 : :
904 : : /* Read */
905 : 0 : asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
906 : :
907 : : /* Write to clear flags */
908 : : val &= ARMV7_FLAG_MASK;
909 : 0 : asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
910 : :
911 : : return val;
912 : : }
913 : :
914 : : #ifdef DEBUG
915 : : static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
916 : : {
917 : : u32 val;
918 : : unsigned int cnt;
919 : :
920 : : printk(KERN_INFO "PMNC registers dump:\n");
921 : :
922 : : asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
923 : : printk(KERN_INFO "PMNC =0x%08x\n", val);
924 : :
925 : : asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
926 : : printk(KERN_INFO "CNTENS=0x%08x\n", val);
927 : :
928 : : asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
929 : : printk(KERN_INFO "INTENS=0x%08x\n", val);
930 : :
931 : : asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
932 : : printk(KERN_INFO "FLAGS =0x%08x\n", val);
933 : :
934 : : asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
935 : : printk(KERN_INFO "SELECT=0x%08x\n", val);
936 : :
937 : : asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
938 : : printk(KERN_INFO "CCNT =0x%08x\n", val);
939 : :
940 : : for (cnt = ARMV7_IDX_COUNTER0;
941 : : cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
942 : : armv7_pmnc_select_counter(cnt);
943 : : asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
944 : : printk(KERN_INFO "CNT[%d] count =0x%08x\n",
945 : : ARMV7_IDX_TO_COUNTER(cnt), val);
946 : : asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
947 : : printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
948 : : ARMV7_IDX_TO_COUNTER(cnt), val);
949 : : }
950 : : }
951 : : #endif
952 : :
953 : 0 : static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
954 : : struct cpupmu_regs *regs)
955 : : {
956 : : unsigned int cnt;
957 : 4967289 : asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
958 [ - + ]: 4965417 : if (!(regs->pmc & ARMV7_PMNC_E))
959 : : return;
960 : :
961 : 0 : asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
962 : 0 : asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
963 : 0 : asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
964 : 0 : asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
965 [ # # ]: 0 : for (cnt = ARMV7_IDX_COUNTER0;
966 : 0 : cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
967 : 0 : armv7_pmnc_select_counter(cnt);
968 : 0 : asm volatile("mrc p15, 0, %0, c9, c13, 1"
969 : : : "=r"(regs->pmxevttype[cnt]));
970 : 0 : asm volatile("mrc p15, 0, %0, c9, c13, 2"
971 : : : "=r"(regs->pmxevtcnt[cnt]));
972 : : }
973 : : return;
974 : : }
975 : :
976 : 0 : static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
977 : : struct cpupmu_regs *regs)
978 : : {
979 : : unsigned int cnt;
980 [ - + ]: 4410172 : if (!(regs->pmc & ARMV7_PMNC_E))
981 : 4410172 : return;
982 : :
983 : 0 : asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
984 : 0 : asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
985 : 0 : asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
986 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
987 [ # # ]: 4410172 : for (cnt = ARMV7_IDX_COUNTER0;
988 : 0 : cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
989 : 0 : armv7_pmnc_select_counter(cnt);
990 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 1"
991 : 0 : : : "r"(regs->pmxevttype[cnt]));
992 : 0 : asm volatile("mcr p15, 0, %0, c9, c13, 2"
993 : 0 : : : "r"(regs->pmxevtcnt[cnt]));
994 : : }
995 : 0 : asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
996 : : }
997 : :
998 : 0 : static void armv7pmu_enable_event(struct perf_event *event)
999 : : {
1000 : : unsigned long flags;
1001 : : struct hw_perf_event *hwc = &event->hw;
1002 : 0 : struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1003 : 0 : struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1004 : 0 : int idx = hwc->idx;
1005 : :
1006 [ # # ]: 0 : if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1007 : 0 : pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
1008 : : smp_processor_id(), idx);
1009 : 0 : return;
1010 : : }
1011 : :
1012 : : /*
1013 : : * Enable counter and interrupt, and set the counter to count
1014 : : * the event that we're interested in.
1015 : : */
1016 : 0 : raw_spin_lock_irqsave(&events->pmu_lock, flags);
1017 : :
1018 : : /*
1019 : : * Disable counter
1020 : : */
1021 : : armv7_pmnc_disable_counter(idx);
1022 : :
1023 : : /*
1024 : : * Set event (if destined for PMNx counters)
1025 : : * We only need to set the event for the cycle counter if we
1026 : : * have the ability to perform event filtering.
1027 : : */
1028 [ # # ][ # # ]: 0 : if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1029 : 0 : armv7_pmnc_write_evtsel(idx, hwc->config_base);
1030 : :
1031 : : /*
1032 : : * Enable interrupt for this counter
1033 : : */
1034 : : armv7_pmnc_enable_intens(idx);
1035 : :
1036 : : /*
1037 : : * Enable counter
1038 : : */
1039 : : armv7_pmnc_enable_counter(idx);
1040 : :
1041 : 0 : raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1042 : : }
1043 : :
1044 : 0 : static void armv7pmu_disable_event(struct perf_event *event)
1045 : : {
1046 : : unsigned long flags;
1047 : : struct hw_perf_event *hwc = &event->hw;
1048 : 0 : struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1049 : 0 : struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1050 : 0 : int idx = hwc->idx;
1051 : :
1052 [ # # ]: 0 : if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1053 : 0 : pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1054 : : smp_processor_id(), idx);
1055 : 0 : return;
1056 : : }
1057 : :
1058 : : /*
1059 : : * Disable counter and interrupt
1060 : : */
1061 : 0 : raw_spin_lock_irqsave(&events->pmu_lock, flags);
1062 : :
1063 : : /*
1064 : : * Disable counter
1065 : : */
1066 : : armv7_pmnc_disable_counter(idx);
1067 : :
1068 : : /*
1069 : : * Disable interrupt for this counter
1070 : : */
1071 : : armv7_pmnc_disable_intens(idx);
1072 : :
1073 : 0 : raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1074 : : }
1075 : :
1076 : 0 : static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1077 : : {
1078 : : u32 pmnc;
1079 : : struct perf_sample_data data;
1080 : : struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1081 : 0 : struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1082 : : struct pt_regs *regs;
1083 : : int idx;
1084 : :
1085 : : /*
1086 : : * Get and reset the IRQ flags
1087 : : */
1088 : : pmnc = armv7_pmnc_getreset_flags();
1089 : :
1090 : : /*
1091 : : * Did an overflow occur?
1092 : : */
1093 [ # # ]: 0 : if (!armv7_pmnc_has_overflowed(pmnc))
1094 : : return IRQ_NONE;
1095 : :
1096 : : /*
1097 : : * Handle the counter(s) overflow(s)
1098 : : */
1099 : : regs = get_irq_regs();
1100 : :
1101 [ # # ]: 0 : for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1102 : 0 : struct perf_event *event = cpuc->events[idx];
1103 : : struct hw_perf_event *hwc;
1104 : :
1105 : : /* Ignore if we don't have an event. */
1106 [ # # ]: 0 : if (!event)
1107 : 0 : continue;
1108 : :
1109 : : /*
1110 : : * We have a single interrupt for all counters. Check that
1111 : : * each counter has overflowed before we process it.
1112 : : */
1113 [ # # ]: 0 : if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1114 : 0 : continue;
1115 : :
1116 : : hwc = &event->hw;
1117 : 0 : armpmu_event_update(event);
1118 : 0 : perf_sample_data_init(&data, 0, hwc->last_period);
1119 [ # # ]: 0 : if (!armpmu_event_set_period(event))
1120 : 0 : continue;
1121 : :
1122 [ # # ]: 0 : if (perf_event_overflow(event, &data, regs))
1123 : 0 : cpu_pmu->disable(event);
1124 : : }
1125 : :
1126 : : /*
1127 : : * Handle the pending perf events.
1128 : : *
1129 : : * Note: this call *must* be run with interrupts disabled. For
1130 : : * platforms that can have the PMU interrupts raised as an NMI, this
1131 : : * will not work.
1132 : : */
1133 : 0 : irq_work_run();
1134 : :
1135 : 0 : return IRQ_HANDLED;
1136 : : }
1137 : :
1138 : 0 : static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1139 : : {
1140 : : unsigned long flags;
1141 : 0 : struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1142 : :
1143 : 0 : raw_spin_lock_irqsave(&events->pmu_lock, flags);
1144 : : /* Enable all counters */
1145 : : armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1146 : 0 : raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1147 : 0 : }
1148 : :
1149 : 0 : static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1150 : : {
1151 : : unsigned long flags;
1152 : 0 : struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1153 : :
1154 : 0 : raw_spin_lock_irqsave(&events->pmu_lock, flags);
1155 : : /* Disable all counters */
1156 : : armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1157 : 0 : raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1158 : 0 : }
1159 : :
1160 : 0 : static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1161 : : struct perf_event *event)
1162 : : {
1163 : : int idx;
1164 : 0 : struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1165 : : struct hw_perf_event *hwc = &event->hw;
1166 : 0 : unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1167 : :
1168 : : /* Always place a cycle counter into the cycle counter. */
1169 [ # # ]: 0 : if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1170 [ # # ]: 0 : if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1171 : : return -EAGAIN;
1172 : :
1173 : 0 : return ARMV7_IDX_CYCLE_COUNTER;
1174 : : }
1175 : :
1176 : : /*
1177 : : * For anything other than a cycle counter, try and use
1178 : : * the events counters
1179 : : */
1180 [ # # ]: 0 : for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1181 [ # # ]: 0 : if (!test_and_set_bit(idx, cpuc->used_mask))
1182 : : return idx;
1183 : : }
1184 : :
1185 : : /* The counters are all in use. */
1186 : : return -EAGAIN;
1187 : : }
1188 : :
1189 : : /*
1190 : : * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1191 : : */
1192 : 0 : static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1193 : : struct perf_event_attr *attr)
1194 : : {
1195 : : unsigned long config_base = 0;
1196 : :
1197 [ # # ]: 0 : if (attr->exclude_idle)
1198 : : return -EPERM;
1199 [ # # ]: 0 : if (attr->exclude_user)
1200 : : config_base |= ARMV7_EXCLUDE_USER;
1201 [ # # ]: 0 : if (attr->exclude_kernel)
1202 : 0 : config_base |= ARMV7_EXCLUDE_PL1;
1203 [ # # ]: 0 : if (!attr->exclude_hv)
1204 : 0 : config_base |= ARMV7_INCLUDE_HYP;
1205 : :
1206 : : /*
1207 : : * Install the filter into config_base as this is used to
1208 : : * construct the event type.
1209 : : */
1210 : 0 : event->config_base = config_base;
1211 : :
1212 : 0 : return 0;
1213 : : }
1214 : :
1215 : 0 : static void armv7pmu_reset(void *info)
1216 : : {
1217 : : struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1218 : 81 : u32 idx, nb_cnt = cpu_pmu->num_events;
1219 : :
1220 : : /* The counter and interrupt enable registers are unknown at reset. */
1221 [ + + ]: 578 : for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1222 : 497 : armv7_pmnc_disable_counter(idx);
1223 : : armv7_pmnc_disable_intens(idx);
1224 : : }
1225 : :
1226 : : /* Initialize & Reset PMNC: C and P bits */
1227 : : armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1228 : 81 : }
1229 : :
1230 : 0 : static int armv7_a8_map_event(struct perf_event *event)
1231 : : {
1232 : 0 : return armpmu_map_event(event, &armv7_a8_perf_map,
1233 : : &armv7_a8_perf_cache_map, 0xFF);
1234 : : }
1235 : :
1236 : 0 : static int armv7_a9_map_event(struct perf_event *event)
1237 : : {
1238 : 0 : return armpmu_map_event(event, &armv7_a9_perf_map,
1239 : : &armv7_a9_perf_cache_map, 0xFF);
1240 : : }
1241 : :
1242 : 0 : static int armv7_a5_map_event(struct perf_event *event)
1243 : : {
1244 : 0 : return armpmu_map_event(event, &armv7_a5_perf_map,
1245 : : &armv7_a5_perf_cache_map, 0xFF);
1246 : : }
1247 : :
1248 : 0 : static int armv7_a15_map_event(struct perf_event *event)
1249 : : {
1250 : 0 : return armpmu_map_event(event, &armv7_a15_perf_map,
1251 : : &armv7_a15_perf_cache_map, 0xFF);
1252 : : }
1253 : :
1254 : 0 : static int armv7_a7_map_event(struct perf_event *event)
1255 : : {
1256 : 0 : return armpmu_map_event(event, &armv7_a7_perf_map,
1257 : : &armv7_a7_perf_cache_map, 0xFF);
1258 : : }
1259 : :
1260 : : static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1261 : : {
1262 : 0 : cpu_pmu->handle_irq = armv7pmu_handle_irq;
1263 : 0 : cpu_pmu->enable = armv7pmu_enable_event;
1264 : 0 : cpu_pmu->disable = armv7pmu_disable_event;
1265 : 0 : cpu_pmu->read_counter = armv7pmu_read_counter;
1266 : 0 : cpu_pmu->write_counter = armv7pmu_write_counter;
1267 : 0 : cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1268 : 0 : cpu_pmu->start = armv7pmu_start;
1269 : 0 : cpu_pmu->stop = armv7pmu_stop;
1270 : 0 : cpu_pmu->reset = armv7pmu_reset;
1271 : 0 : cpu_pmu->save_regs = armv7pmu_save_regs;
1272 : 0 : cpu_pmu->restore_regs = armv7pmu_restore_regs;
1273 : 0 : cpu_pmu->max_period = (1LLU << 32) - 1;
1274 : : };
1275 : :
1276 : : static u32 armv7_read_num_pmnc_events(void)
1277 : : {
1278 : : u32 nb_cnt;
1279 : :
1280 : : /* Read the nb of CNTx counters supported from PMNC */
1281 : 0 : nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1282 : :
1283 : : /* Add the CPU cycles counter and return */
1284 : 0 : return nb_cnt + 1;
1285 : : }
1286 : :
1287 : 0 : static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1288 : : {
1289 : : armv7pmu_init(cpu_pmu);
1290 : 0 : cpu_pmu->name = "ARMv7_Cortex_A8";
1291 : 0 : cpu_pmu->map_event = armv7_a8_map_event;
1292 : 0 : cpu_pmu->num_events = armv7_read_num_pmnc_events();
1293 : 0 : return 0;
1294 : : }
1295 : :
1296 : 0 : static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1297 : : {
1298 : : armv7pmu_init(cpu_pmu);
1299 : 0 : cpu_pmu->name = "ARMv7_Cortex_A9";
1300 : 0 : cpu_pmu->map_event = armv7_a9_map_event;
1301 : 0 : cpu_pmu->num_events = armv7_read_num_pmnc_events();
1302 : 0 : return 0;
1303 : : }
1304 : :
1305 : 0 : static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1306 : : {
1307 : : armv7pmu_init(cpu_pmu);
1308 : 0 : cpu_pmu->name = "ARMv7_Cortex_A5";
1309 : 0 : cpu_pmu->map_event = armv7_a5_map_event;
1310 : 0 : cpu_pmu->num_events = armv7_read_num_pmnc_events();
1311 : 0 : return 0;
1312 : : }
1313 : :
1314 : 0 : static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1315 : : {
1316 : : armv7pmu_init(cpu_pmu);
1317 : 0 : cpu_pmu->name = "ARMv7_Cortex_A15";
1318 : 0 : cpu_pmu->map_event = armv7_a15_map_event;
1319 : 0 : cpu_pmu->num_events = armv7_read_num_pmnc_events();
1320 : 0 : cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1321 : 0 : return 0;
1322 : : }
1323 : :
1324 : 0 : static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1325 : : {
1326 : : armv7pmu_init(cpu_pmu);
1327 : 0 : cpu_pmu->name = "ARMv7_Cortex_A7";
1328 : 0 : cpu_pmu->map_event = armv7_a7_map_event;
1329 : 0 : cpu_pmu->num_events = armv7_read_num_pmnc_events();
1330 : 0 : cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1331 : 0 : return 0;
1332 : : }
1333 : : #else
1334 : : static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1335 : : {
1336 : : return -ENODEV;
1337 : : }
1338 : :
1339 : : static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1340 : : {
1341 : : return -ENODEV;
1342 : : }
1343 : :
1344 : : static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1345 : : {
1346 : : return -ENODEV;
1347 : : }
1348 : :
1349 : : static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1350 : : {
1351 : : return -ENODEV;
1352 : : }
1353 : :
1354 : : static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1355 : : {
1356 : : return -ENODEV;
1357 : : }
1358 : : #endif /* CONFIG_CPU_V7 */
|