1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * ARMv6 Performance counter handling code. |
4 | * |
5 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles |
6 | * |
7 | * ARMv6 has 2 configurable performance counters and a single cycle counter. |
8 | * They all share a single reset bit but can be written to zero so we can use |
9 | * that for a reset. |
10 | * |
11 | * The counters can't be individually enabled or disabled so when we remove |
12 | * one event and replace it with another we could get spurious counts from the |
13 | * wrong event. However, we can take advantage of the fact that the |
14 | * performance counters can export events to the event bus, and the event bus |
15 | * itself can be monitored. This requires that we *don't* export the events to |
16 | * the event bus. The procedure for disabling a configurable counter is: |
17 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This |
18 | * effectively stops the counter from counting. |
19 | * - disable the counter's interrupt generation (each counter has it's |
20 | * own interrupt enable bit). |
21 | * Once stopped, the counter value can be written as 0 to reset. |
22 | * |
23 | * To enable a counter: |
24 | * - enable the counter's interrupt generation. |
25 | * - set the new event type. |
26 | * |
27 | * Note: the dedicated cycle counter only counts cycles and can't be |
28 | * enabled/disabled independently of the others. When we want to disable the |
29 | * cycle counter, we have to just disable the interrupt reporting and start |
30 | * ignoring that counter. When re-enabling, we have to reset the value and |
31 | * enable the interrupt. |
32 | */ |
33 | |
34 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
35 | |
36 | #include <asm/cputype.h> |
37 | #include <asm/irq_regs.h> |
38 | |
39 | #include <linux/of.h> |
40 | #include <linux/perf/arm_pmu.h> |
41 | #include <linux/platform_device.h> |
42 | |
43 | enum armv6_perf_types { |
44 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, |
45 | ARMV6_PERFCTR_IBUF_STALL = 0x1, |
46 | ARMV6_PERFCTR_DDEP_STALL = 0x2, |
47 | ARMV6_PERFCTR_ITLB_MISS = 0x3, |
48 | ARMV6_PERFCTR_DTLB_MISS = 0x4, |
49 | ARMV6_PERFCTR_BR_EXEC = 0x5, |
50 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, |
51 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, |
52 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, |
53 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, |
54 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, |
55 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, |
56 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, |
57 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, |
58 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, |
59 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, |
60 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, |
61 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, |
62 | ARMV6_PERFCTR_NOP = 0x20, |
63 | }; |
64 | |
65 | enum armv6_counters { |
66 | ARMV6_CYCLE_COUNTER = 0, |
67 | ARMV6_COUNTER0, |
68 | ARMV6_COUNTER1, |
69 | }; |
70 | |
71 | /* |
72 | * The hardware events that we support. We do support cache operations but |
73 | * we have harvard caches and no way to combine instruction and data |
74 | * accesses/misses in hardware. |
75 | */ |
76 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { |
77 | PERF_MAP_ALL_UNSUPPORTED, |
78 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, |
79 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, |
80 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, |
81 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, |
82 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, |
83 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, |
84 | }; |
85 | |
86 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
87 | [PERF_COUNT_HW_CACHE_OP_MAX] |
88 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
89 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
90 | |
91 | /* |
92 | * The performance counters don't differentiate between read and write |
93 | * accesses/misses so this isn't strictly correct, but it's the best we |
94 | * can do. Writes and reads get combined. |
95 | */ |
96 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, |
97 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, |
98 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, |
99 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, |
100 | |
101 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, |
102 | |
103 | /* |
104 | * The ARM performance counters can count micro DTLB misses, micro ITLB |
105 | * misses and main TLB misses. There isn't an event for TLB misses, so |
106 | * use the micro misses here and if users want the main TLB misses they |
107 | * can use a raw counter. |
108 | */ |
109 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, |
110 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, |
111 | |
112 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, |
113 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, |
114 | }; |
115 | |
116 | static inline unsigned long |
117 | armv6_pmcr_read(void) |
118 | { |
119 | u32 val; |
120 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val)); |
121 | return val; |
122 | } |
123 | |
124 | static inline void |
125 | armv6_pmcr_write(unsigned long val) |
126 | { |
127 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val)); |
128 | } |
129 | |
130 | #define ARMV6_PMCR_ENABLE (1 << 0) |
131 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) |
132 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) |
133 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) |
134 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) |
135 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) |
136 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) |
137 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) |
138 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) |
139 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) |
140 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 |
141 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
142 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 |
143 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
144 | |
145 | #define ARMV6_PMCR_OVERFLOWED_MASK \ |
146 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ |
147 | ARMV6_PMCR_CCOUNT_OVERFLOW) |
148 | |
149 | static inline int |
150 | armv6_pmcr_has_overflowed(unsigned long pmcr) |
151 | { |
152 | return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; |
153 | } |
154 | |
155 | static inline int |
156 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, |
157 | enum armv6_counters counter) |
158 | { |
159 | int ret = 0; |
160 | |
161 | if (ARMV6_CYCLE_COUNTER == counter) |
162 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; |
163 | else if (ARMV6_COUNTER0 == counter) |
164 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; |
165 | else if (ARMV6_COUNTER1 == counter) |
166 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; |
167 | else |
168 | WARN_ONCE(1, "invalid counter number (%d)\n" , counter); |
169 | |
170 | return ret; |
171 | } |
172 | |
173 | static inline u64 armv6pmu_read_counter(struct perf_event *event) |
174 | { |
175 | struct hw_perf_event *hwc = &event->hw; |
176 | int counter = hwc->idx; |
177 | unsigned long value = 0; |
178 | |
179 | if (ARMV6_CYCLE_COUNTER == counter) |
180 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r" (value)); |
181 | else if (ARMV6_COUNTER0 == counter) |
182 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r" (value)); |
183 | else if (ARMV6_COUNTER1 == counter) |
184 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r" (value)); |
185 | else |
186 | WARN_ONCE(1, "invalid counter number (%d)\n" , counter); |
187 | |
188 | return value; |
189 | } |
190 | |
191 | static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) |
192 | { |
193 | struct hw_perf_event *hwc = &event->hw; |
194 | int counter = hwc->idx; |
195 | |
196 | if (ARMV6_CYCLE_COUNTER == counter) |
197 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (value)); |
198 | else if (ARMV6_COUNTER0 == counter) |
199 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (value)); |
200 | else if (ARMV6_COUNTER1 == counter) |
201 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (value)); |
202 | else |
203 | WARN_ONCE(1, "invalid counter number (%d)\n" , counter); |
204 | } |
205 | |
206 | static void armv6pmu_enable_event(struct perf_event *event) |
207 | { |
208 | unsigned long val, mask, evt; |
209 | struct hw_perf_event *hwc = &event->hw; |
210 | int idx = hwc->idx; |
211 | |
212 | if (ARMV6_CYCLE_COUNTER == idx) { |
213 | mask = 0; |
214 | evt = ARMV6_PMCR_CCOUNT_IEN; |
215 | } else if (ARMV6_COUNTER0 == idx) { |
216 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; |
217 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | |
218 | ARMV6_PMCR_COUNT0_IEN; |
219 | } else if (ARMV6_COUNTER1 == idx) { |
220 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; |
221 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | |
222 | ARMV6_PMCR_COUNT1_IEN; |
223 | } else { |
224 | WARN_ONCE(1, "invalid counter number (%d)\n" , idx); |
225 | return; |
226 | } |
227 | |
228 | /* |
229 | * Mask out the current event and set the counter to count the event |
230 | * that we're interested in. |
231 | */ |
232 | val = armv6_pmcr_read(); |
233 | val &= ~mask; |
234 | val |= evt; |
235 | armv6_pmcr_write(val); |
236 | } |
237 | |
238 | static irqreturn_t |
239 | armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) |
240 | { |
241 | unsigned long pmcr = armv6_pmcr_read(); |
242 | struct perf_sample_data data; |
243 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
244 | struct pt_regs *regs; |
245 | int idx; |
246 | |
247 | if (!armv6_pmcr_has_overflowed(pmcr)) |
248 | return IRQ_NONE; |
249 | |
250 | regs = get_irq_regs(); |
251 | |
252 | /* |
253 | * The interrupts are cleared by writing the overflow flags back to |
254 | * the control register. All of the other bits don't have any effect |
255 | * if they are rewritten, so write the whole value back. |
256 | */ |
257 | armv6_pmcr_write(pmcr); |
258 | |
259 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
260 | struct perf_event *event = cpuc->events[idx]; |
261 | struct hw_perf_event *hwc; |
262 | |
263 | /* Ignore if we don't have an event. */ |
264 | if (!event) |
265 | continue; |
266 | |
267 | /* |
268 | * We have a single interrupt for all counters. Check that |
269 | * each counter has overflowed before we process it. |
270 | */ |
271 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) |
272 | continue; |
273 | |
274 | hwc = &event->hw; |
275 | armpmu_event_update(event); |
276 | perf_sample_data_init(&data, 0, hwc->last_period); |
277 | if (!armpmu_event_set_period(event)) |
278 | continue; |
279 | |
280 | if (perf_event_overflow(event, &data, regs)) |
281 | cpu_pmu->disable(event); |
282 | } |
283 | |
284 | /* |
285 | * Handle the pending perf events. |
286 | * |
287 | * Note: this call *must* be run with interrupts disabled. For |
288 | * platforms that can have the PMU interrupts raised as an NMI, this |
289 | * will not work. |
290 | */ |
291 | irq_work_run(); |
292 | |
293 | return IRQ_HANDLED; |
294 | } |
295 | |
296 | static void armv6pmu_start(struct arm_pmu *cpu_pmu) |
297 | { |
298 | unsigned long val; |
299 | |
300 | val = armv6_pmcr_read(); |
301 | val |= ARMV6_PMCR_ENABLE; |
302 | armv6_pmcr_write(val); |
303 | } |
304 | |
305 | static void armv6pmu_stop(struct arm_pmu *cpu_pmu) |
306 | { |
307 | unsigned long val; |
308 | |
309 | val = armv6_pmcr_read(); |
310 | val &= ~ARMV6_PMCR_ENABLE; |
311 | armv6_pmcr_write(val); |
312 | } |
313 | |
314 | static int |
315 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
316 | struct perf_event *event) |
317 | { |
318 | struct hw_perf_event *hwc = &event->hw; |
319 | /* Always place a cycle counter into the cycle counter. */ |
320 | if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { |
321 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) |
322 | return -EAGAIN; |
323 | |
324 | return ARMV6_CYCLE_COUNTER; |
325 | } else { |
326 | /* |
327 | * For anything other than a cycle counter, try and use |
328 | * counter0 and counter1. |
329 | */ |
330 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) |
331 | return ARMV6_COUNTER1; |
332 | |
333 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) |
334 | return ARMV6_COUNTER0; |
335 | |
336 | /* The counters are all in use. */ |
337 | return -EAGAIN; |
338 | } |
339 | } |
340 | |
341 | static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, |
342 | struct perf_event *event) |
343 | { |
344 | clear_bit(event->hw.idx, cpuc->used_mask); |
345 | } |
346 | |
347 | static void armv6pmu_disable_event(struct perf_event *event) |
348 | { |
349 | unsigned long val, mask, evt; |
350 | struct hw_perf_event *hwc = &event->hw; |
351 | int idx = hwc->idx; |
352 | |
353 | if (ARMV6_CYCLE_COUNTER == idx) { |
354 | mask = ARMV6_PMCR_CCOUNT_IEN; |
355 | evt = 0; |
356 | } else if (ARMV6_COUNTER0 == idx) { |
357 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; |
358 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; |
359 | } else if (ARMV6_COUNTER1 == idx) { |
360 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; |
361 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; |
362 | } else { |
363 | WARN_ONCE(1, "invalid counter number (%d)\n" , idx); |
364 | return; |
365 | } |
366 | |
367 | /* |
368 | * Mask out the current event and set the counter to count the number |
369 | * of ETM bus signal assertion cycles. The external reporting should |
370 | * be disabled and so this should never increment. |
371 | */ |
372 | val = armv6_pmcr_read(); |
373 | val &= ~mask; |
374 | val |= evt; |
375 | armv6_pmcr_write(val); |
376 | } |
377 | |
378 | static int armv6_map_event(struct perf_event *event) |
379 | { |
380 | return armpmu_map_event(event, &armv6_perf_map, |
381 | &armv6_perf_cache_map, 0xFF); |
382 | } |
383 | |
384 | static void armv6pmu_init(struct arm_pmu *cpu_pmu) |
385 | { |
386 | cpu_pmu->handle_irq = armv6pmu_handle_irq; |
387 | cpu_pmu->enable = armv6pmu_enable_event; |
388 | cpu_pmu->disable = armv6pmu_disable_event; |
389 | cpu_pmu->read_counter = armv6pmu_read_counter; |
390 | cpu_pmu->write_counter = armv6pmu_write_counter; |
391 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; |
392 | cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; |
393 | cpu_pmu->start = armv6pmu_start; |
394 | cpu_pmu->stop = armv6pmu_stop; |
395 | cpu_pmu->map_event = armv6_map_event; |
396 | cpu_pmu->num_events = 3; |
397 | } |
398 | |
399 | static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) |
400 | { |
401 | armv6pmu_init(cpu_pmu); |
402 | cpu_pmu->name = "armv6_1136" ; |
403 | return 0; |
404 | } |
405 | |
406 | static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu) |
407 | { |
408 | armv6pmu_init(cpu_pmu); |
409 | cpu_pmu->name = "armv6_1156" ; |
410 | return 0; |
411 | } |
412 | |
413 | static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) |
414 | { |
415 | armv6pmu_init(cpu_pmu); |
416 | cpu_pmu->name = "armv6_1176" ; |
417 | return 0; |
418 | } |
419 | |
420 | static const struct of_device_id armv6_pmu_of_device_ids[] = { |
421 | {.compatible = "arm,arm1176-pmu" , .data = armv6_1176_pmu_init}, |
422 | {.compatible = "arm,arm1136-pmu" , .data = armv6_1136_pmu_init}, |
423 | { /* sentinel value */ } |
424 | }; |
425 | |
426 | static const struct pmu_probe_info armv6_pmu_probe_table[] = { |
427 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init), |
428 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init), |
429 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init), |
430 | { /* sentinel value */ } |
431 | }; |
432 | |
433 | static int armv6_pmu_device_probe(struct platform_device *pdev) |
434 | { |
435 | return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, |
436 | armv6_pmu_probe_table); |
437 | } |
438 | |
439 | static struct platform_driver armv6_pmu_driver = { |
440 | .driver = { |
441 | .name = "armv6-pmu" , |
442 | .of_match_table = armv6_pmu_of_device_ids, |
443 | }, |
444 | .probe = armv6_pmu_device_probe, |
445 | }; |
446 | |
447 | builtin_platform_driver(armv6_pmu_driver); |
448 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |
449 | |