1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | #undef DEBUG |
3 | |
4 | /* |
5 | * ARM performance counter support. |
6 | * |
7 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles |
8 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
9 | * |
10 | * This code is based on the sparc64 perf event code, which is in turn based |
11 | * on the x86 code. |
12 | */ |
13 | #define pr_fmt(fmt) "hw perfevents: " fmt |
14 | |
15 | #include <linux/bitmap.h> |
16 | #include <linux/cpumask.h> |
17 | #include <linux/cpu_pm.h> |
18 | #include <linux/export.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/perf/arm_pmu.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/sched/clock.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/irq.h> |
25 | #include <linux/irqdesc.h> |
26 | |
27 | #include <asm/irq_regs.h> |
28 | |
29 | static int armpmu_count_irq_users(const int irq); |
30 | |
31 | struct pmu_irq_ops { |
32 | void (*enable_pmuirq)(unsigned int irq); |
33 | void (*disable_pmuirq)(unsigned int irq); |
34 | void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); |
35 | }; |
36 | |
37 | static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) |
38 | { |
39 | free_irq(irq, per_cpu_ptr(devid, cpu)); |
40 | } |
41 | |
42 | static const struct pmu_irq_ops pmuirq_ops = { |
43 | .enable_pmuirq = enable_irq, |
44 | .disable_pmuirq = disable_irq_nosync, |
45 | .free_pmuirq = armpmu_free_pmuirq |
46 | }; |
47 | |
48 | static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) |
49 | { |
50 | free_nmi(irq, per_cpu_ptr(devid, cpu)); |
51 | } |
52 | |
53 | static const struct pmu_irq_ops pmunmi_ops = { |
54 | .enable_pmuirq = enable_nmi, |
55 | .disable_pmuirq = disable_nmi_nosync, |
56 | .free_pmuirq = armpmu_free_pmunmi |
57 | }; |
58 | |
59 | static void armpmu_enable_percpu_pmuirq(unsigned int irq) |
60 | { |
61 | enable_percpu_irq(irq, type: IRQ_TYPE_NONE); |
62 | } |
63 | |
64 | static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, |
65 | void __percpu *devid) |
66 | { |
67 | if (armpmu_count_irq_users(irq) == 1) |
68 | free_percpu_irq(irq, devid); |
69 | } |
70 | |
71 | static const struct pmu_irq_ops percpu_pmuirq_ops = { |
72 | .enable_pmuirq = armpmu_enable_percpu_pmuirq, |
73 | .disable_pmuirq = disable_percpu_irq, |
74 | .free_pmuirq = armpmu_free_percpu_pmuirq |
75 | }; |
76 | |
77 | static void armpmu_enable_percpu_pmunmi(unsigned int irq) |
78 | { |
79 | if (!prepare_percpu_nmi(irq)) |
80 | enable_percpu_nmi(irq, type: IRQ_TYPE_NONE); |
81 | } |
82 | |
83 | static void armpmu_disable_percpu_pmunmi(unsigned int irq) |
84 | { |
85 | disable_percpu_nmi(irq); |
86 | teardown_percpu_nmi(irq); |
87 | } |
88 | |
89 | static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, |
90 | void __percpu *devid) |
91 | { |
92 | if (armpmu_count_irq_users(irq) == 1) |
93 | free_percpu_nmi(irq, percpu_dev_id: devid); |
94 | } |
95 | |
96 | static const struct pmu_irq_ops percpu_pmunmi_ops = { |
97 | .enable_pmuirq = armpmu_enable_percpu_pmunmi, |
98 | .disable_pmuirq = armpmu_disable_percpu_pmunmi, |
99 | .free_pmuirq = armpmu_free_percpu_pmunmi |
100 | }; |
101 | |
102 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); |
103 | static DEFINE_PER_CPU(int, cpu_irq); |
104 | static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); |
105 | |
106 | static bool has_nmi; |
107 | |
108 | static inline u64 arm_pmu_event_max_period(struct perf_event *event) |
109 | { |
110 | if (event->hw.flags & ARMPMU_EVT_64BIT) |
111 | return GENMASK_ULL(63, 0); |
112 | else if (event->hw.flags & ARMPMU_EVT_63BIT) |
113 | return GENMASK_ULL(62, 0); |
114 | else if (event->hw.flags & ARMPMU_EVT_47BIT) |
115 | return GENMASK_ULL(46, 0); |
116 | else |
117 | return GENMASK_ULL(31, 0); |
118 | } |
119 | |
120 | static int |
121 | armpmu_map_cache_event(const unsigned (*cache_map) |
122 | [PERF_COUNT_HW_CACHE_MAX] |
123 | [PERF_COUNT_HW_CACHE_OP_MAX] |
124 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
125 | u64 config) |
126 | { |
127 | unsigned int cache_type, cache_op, cache_result, ret; |
128 | |
129 | cache_type = (config >> 0) & 0xff; |
130 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
131 | return -EINVAL; |
132 | |
133 | cache_op = (config >> 8) & 0xff; |
134 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
135 | return -EINVAL; |
136 | |
137 | cache_result = (config >> 16) & 0xff; |
138 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
139 | return -EINVAL; |
140 | |
141 | if (!cache_map) |
142 | return -ENOENT; |
143 | |
144 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
145 | |
146 | if (ret == CACHE_OP_UNSUPPORTED) |
147 | return -ENOENT; |
148 | |
149 | return ret; |
150 | } |
151 | |
152 | static int |
153 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
154 | { |
155 | int mapping; |
156 | |
157 | if (config >= PERF_COUNT_HW_MAX) |
158 | return -EINVAL; |
159 | |
160 | if (!event_map) |
161 | return -ENOENT; |
162 | |
163 | mapping = (*event_map)[config]; |
164 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
165 | } |
166 | |
167 | static int |
168 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
169 | { |
170 | return (int)(config & raw_event_mask); |
171 | } |
172 | |
173 | int |
174 | armpmu_map_event(struct perf_event *event, |
175 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], |
176 | const unsigned (*cache_map) |
177 | [PERF_COUNT_HW_CACHE_MAX] |
178 | [PERF_COUNT_HW_CACHE_OP_MAX] |
179 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
180 | u32 raw_event_mask) |
181 | { |
182 | u64 config = event->attr.config; |
183 | int type = event->attr.type; |
184 | |
185 | if (type == event->pmu->type) |
186 | return armpmu_map_raw_event(raw_event_mask, config); |
187 | |
188 | switch (type) { |
189 | case PERF_TYPE_HARDWARE: |
190 | return armpmu_map_hw_event(event_map, config); |
191 | case PERF_TYPE_HW_CACHE: |
192 | return armpmu_map_cache_event(cache_map, config); |
193 | case PERF_TYPE_RAW: |
194 | return armpmu_map_raw_event(raw_event_mask, config); |
195 | } |
196 | |
197 | return -ENOENT; |
198 | } |
199 | |
200 | int armpmu_event_set_period(struct perf_event *event) |
201 | { |
202 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
203 | struct hw_perf_event *hwc = &event->hw; |
204 | s64 left = local64_read(&hwc->period_left); |
205 | s64 period = hwc->sample_period; |
206 | u64 max_period; |
207 | int ret = 0; |
208 | |
209 | max_period = arm_pmu_event_max_period(event); |
210 | if (unlikely(left <= -period)) { |
211 | left = period; |
212 | local64_set(&hwc->period_left, left); |
213 | hwc->last_period = period; |
214 | ret = 1; |
215 | } |
216 | |
217 | if (unlikely(left <= 0)) { |
218 | left += period; |
219 | local64_set(&hwc->period_left, left); |
220 | hwc->last_period = period; |
221 | ret = 1; |
222 | } |
223 | |
224 | /* |
225 | * Limit the maximum period to prevent the counter value |
226 | * from overtaking the one we are about to program. In |
227 | * effect we are reducing max_period to account for |
228 | * interrupt latency (and we are being very conservative). |
229 | */ |
230 | if (left > (max_period >> 1)) |
231 | left = (max_period >> 1); |
232 | |
233 | local64_set(&hwc->prev_count, (u64)-left); |
234 | |
235 | armpmu->write_counter(event, (u64)(-left) & max_period); |
236 | |
237 | perf_event_update_userpage(event); |
238 | |
239 | return ret; |
240 | } |
241 | |
242 | u64 armpmu_event_update(struct perf_event *event) |
243 | { |
244 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
245 | struct hw_perf_event *hwc = &event->hw; |
246 | u64 delta, prev_raw_count, new_raw_count; |
247 | u64 max_period = arm_pmu_event_max_period(event); |
248 | |
249 | again: |
250 | prev_raw_count = local64_read(&hwc->prev_count); |
251 | new_raw_count = armpmu->read_counter(event); |
252 | |
253 | if (local64_cmpxchg(l: &hwc->prev_count, old: prev_raw_count, |
254 | new: new_raw_count) != prev_raw_count) |
255 | goto again; |
256 | |
257 | delta = (new_raw_count - prev_raw_count) & max_period; |
258 | |
259 | local64_add(delta, &event->count); |
260 | local64_sub(delta, &hwc->period_left); |
261 | |
262 | return new_raw_count; |
263 | } |
264 | |
265 | static void |
266 | armpmu_read(struct perf_event *event) |
267 | { |
268 | armpmu_event_update(event); |
269 | } |
270 | |
271 | static void |
272 | armpmu_stop(struct perf_event *event, int flags) |
273 | { |
274 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
275 | struct hw_perf_event *hwc = &event->hw; |
276 | |
277 | /* |
278 | * ARM pmu always has to update the counter, so ignore |
279 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
280 | */ |
281 | if (!(hwc->state & PERF_HES_STOPPED)) { |
282 | armpmu->disable(event); |
283 | armpmu_event_update(event); |
284 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
285 | } |
286 | } |
287 | |
288 | static void armpmu_start(struct perf_event *event, int flags) |
289 | { |
290 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
291 | struct hw_perf_event *hwc = &event->hw; |
292 | |
293 | /* |
294 | * ARM pmu always has to reprogram the period, so ignore |
295 | * PERF_EF_RELOAD, see the comment below. |
296 | */ |
297 | if (flags & PERF_EF_RELOAD) |
298 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
299 | |
300 | hwc->state = 0; |
301 | /* |
302 | * Set the period again. Some counters can't be stopped, so when we |
303 | * were stopped we simply disabled the IRQ source and the counter |
304 | * may have been left counting. If we don't do this step then we may |
305 | * get an interrupt too soon or *way* too late if the overflow has |
306 | * happened since disabling. |
307 | */ |
308 | armpmu_event_set_period(event); |
309 | armpmu->enable(event); |
310 | } |
311 | |
312 | static void |
313 | armpmu_del(struct perf_event *event, int flags) |
314 | { |
315 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
316 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
317 | struct hw_perf_event *hwc = &event->hw; |
318 | int idx = hwc->idx; |
319 | |
320 | armpmu_stop(event, PERF_EF_UPDATE); |
321 | hw_events->events[idx] = NULL; |
322 | armpmu->clear_event_idx(hw_events, event); |
323 | perf_event_update_userpage(event); |
324 | /* Clear the allocated counter */ |
325 | hwc->idx = -1; |
326 | } |
327 | |
328 | static int |
329 | armpmu_add(struct perf_event *event, int flags) |
330 | { |
331 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
332 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
333 | struct hw_perf_event *hwc = &event->hw; |
334 | int idx; |
335 | |
336 | /* An event following a process won't be stopped earlier */ |
337 | if (!cpumask_test_cpu(smp_processor_id(), cpumask: &armpmu->supported_cpus)) |
338 | return -ENOENT; |
339 | |
340 | /* If we don't have a space for the counter then finish early. */ |
341 | idx = armpmu->get_event_idx(hw_events, event); |
342 | if (idx < 0) |
343 | return idx; |
344 | |
345 | /* |
346 | * If there is an event in the counter we are going to use then make |
347 | * sure it is disabled. |
348 | */ |
349 | event->hw.idx = idx; |
350 | armpmu->disable(event); |
351 | hw_events->events[idx] = event; |
352 | |
353 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
354 | if (flags & PERF_EF_START) |
355 | armpmu_start(event, PERF_EF_RELOAD); |
356 | |
357 | /* Propagate our changes to the userspace mapping. */ |
358 | perf_event_update_userpage(event); |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | static int |
364 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
365 | struct perf_event *event) |
366 | { |
367 | struct arm_pmu *armpmu; |
368 | |
369 | if (is_software_event(event)) |
370 | return 1; |
371 | |
372 | /* |
373 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The |
374 | * core perf code won't check that the pmu->ctx == leader->ctx |
375 | * until after pmu->event_init(event). |
376 | */ |
377 | if (event->pmu != pmu) |
378 | return 0; |
379 | |
380 | if (event->state < PERF_EVENT_STATE_OFF) |
381 | return 1; |
382 | |
383 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) |
384 | return 1; |
385 | |
386 | armpmu = to_arm_pmu(event->pmu); |
387 | return armpmu->get_event_idx(hw_events, event) >= 0; |
388 | } |
389 | |
390 | static int |
391 | validate_group(struct perf_event *event) |
392 | { |
393 | struct perf_event *sibling, *leader = event->group_leader; |
394 | struct pmu_hw_events fake_pmu; |
395 | |
396 | /* |
397 | * Initialise the fake PMU. We only need to populate the |
398 | * used_mask for the purposes of validation. |
399 | */ |
400 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
401 | |
402 | if (!validate_event(pmu: event->pmu, hw_events: &fake_pmu, event: leader)) |
403 | return -EINVAL; |
404 | |
405 | if (event == leader) |
406 | return 0; |
407 | |
408 | for_each_sibling_event(sibling, leader) { |
409 | if (!validate_event(pmu: event->pmu, hw_events: &fake_pmu, event: sibling)) |
410 | return -EINVAL; |
411 | } |
412 | |
413 | if (!validate_event(pmu: event->pmu, hw_events: &fake_pmu, event)) |
414 | return -EINVAL; |
415 | |
416 | return 0; |
417 | } |
418 | |
419 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
420 | { |
421 | struct arm_pmu *armpmu; |
422 | int ret; |
423 | u64 start_clock, finish_clock; |
424 | |
425 | /* |
426 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but |
427 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will |
428 | * do any necessary shifting, we just need to perform the first |
429 | * dereference. |
430 | */ |
431 | armpmu = *(void **)dev; |
432 | if (WARN_ON_ONCE(!armpmu)) |
433 | return IRQ_NONE; |
434 | |
435 | start_clock = sched_clock(); |
436 | ret = armpmu->handle_irq(armpmu); |
437 | finish_clock = sched_clock(); |
438 | |
439 | perf_sample_event_took(sample_len_ns: finish_clock - start_clock); |
440 | return ret; |
441 | } |
442 | |
443 | static int |
444 | __hw_perf_event_init(struct perf_event *event) |
445 | { |
446 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
447 | struct hw_perf_event *hwc = &event->hw; |
448 | int mapping; |
449 | |
450 | hwc->flags = 0; |
451 | mapping = armpmu->map_event(event); |
452 | |
453 | if (mapping < 0) { |
454 | pr_debug("event %x:%llx not supported\n" , event->attr.type, |
455 | event->attr.config); |
456 | return mapping; |
457 | } |
458 | |
459 | /* |
460 | * We don't assign an index until we actually place the event onto |
461 | * hardware. Use -1 to signify that we haven't decided where to put it |
462 | * yet. For SMP systems, each core has it's own PMU so we can't do any |
463 | * clever allocation or constraints checking at this point. |
464 | */ |
465 | hwc->idx = -1; |
466 | hwc->config_base = 0; |
467 | hwc->config = 0; |
468 | hwc->event_base = 0; |
469 | |
470 | /* |
471 | * Check whether we need to exclude the counter from certain modes. |
472 | */ |
473 | if (armpmu->set_event_filter && |
474 | armpmu->set_event_filter(hwc, &event->attr)) { |
475 | pr_debug("ARM performance counters do not support " |
476 | "mode exclusion\n" ); |
477 | return -EOPNOTSUPP; |
478 | } |
479 | |
480 | /* |
481 | * Store the event encoding into the config_base field. |
482 | */ |
483 | hwc->config_base |= (unsigned long)mapping; |
484 | |
485 | if (!is_sampling_event(event)) { |
486 | /* |
487 | * For non-sampling runs, limit the sample_period to half |
488 | * of the counter width. That way, the new counter value |
489 | * is far less likely to overtake the previous one unless |
490 | * you have some serious IRQ latency issues. |
491 | */ |
492 | hwc->sample_period = arm_pmu_event_max_period(event) >> 1; |
493 | hwc->last_period = hwc->sample_period; |
494 | local64_set(&hwc->period_left, hwc->sample_period); |
495 | } |
496 | |
497 | return validate_group(event); |
498 | } |
499 | |
500 | static int armpmu_event_init(struct perf_event *event) |
501 | { |
502 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
503 | |
504 | /* |
505 | * Reject CPU-affine events for CPUs that are of a different class to |
506 | * that which this PMU handles. Process-following events (where |
507 | * event->cpu == -1) can be migrated between CPUs, and thus we have to |
508 | * reject them later (in armpmu_add) if they're scheduled on a |
509 | * different class of CPU. |
510 | */ |
511 | if (event->cpu != -1 && |
512 | !cpumask_test_cpu(cpu: event->cpu, cpumask: &armpmu->supported_cpus)) |
513 | return -ENOENT; |
514 | |
515 | /* does not support taken branch sampling */ |
516 | if (has_branch_stack(event)) |
517 | return -EOPNOTSUPP; |
518 | |
519 | return __hw_perf_event_init(event); |
520 | } |
521 | |
522 | static void armpmu_enable(struct pmu *pmu) |
523 | { |
524 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
525 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
526 | bool enabled = !bitmap_empty(src: hw_events->used_mask, nbits: armpmu->num_events); |
527 | |
528 | /* For task-bound events we may be called on other CPUs */ |
529 | if (!cpumask_test_cpu(smp_processor_id(), cpumask: &armpmu->supported_cpus)) |
530 | return; |
531 | |
532 | if (enabled) |
533 | armpmu->start(armpmu); |
534 | } |
535 | |
536 | static void armpmu_disable(struct pmu *pmu) |
537 | { |
538 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
539 | |
540 | /* For task-bound events we may be called on other CPUs */ |
541 | if (!cpumask_test_cpu(smp_processor_id(), cpumask: &armpmu->supported_cpus)) |
542 | return; |
543 | |
544 | armpmu->stop(armpmu); |
545 | } |
546 | |
547 | /* |
548 | * In heterogeneous systems, events are specific to a particular |
549 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of |
550 | * the same microarchitecture. |
551 | */ |
552 | static bool armpmu_filter(struct pmu *pmu, int cpu) |
553 | { |
554 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
555 | return !cpumask_test_cpu(cpu, cpumask: &armpmu->supported_cpus); |
556 | } |
557 | |
558 | static ssize_t cpus_show(struct device *dev, |
559 | struct device_attribute *attr, char *buf) |
560 | { |
561 | struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); |
562 | return cpumap_print_to_pagebuf(list: true, buf, mask: &armpmu->supported_cpus); |
563 | } |
564 | |
565 | static DEVICE_ATTR_RO(cpus); |
566 | |
567 | static struct attribute *armpmu_common_attrs[] = { |
568 | &dev_attr_cpus.attr, |
569 | NULL, |
570 | }; |
571 | |
572 | static const struct attribute_group armpmu_common_attr_group = { |
573 | .attrs = armpmu_common_attrs, |
574 | }; |
575 | |
576 | static int armpmu_count_irq_users(const int irq) |
577 | { |
578 | int cpu, count = 0; |
579 | |
580 | for_each_possible_cpu(cpu) { |
581 | if (per_cpu(cpu_irq, cpu) == irq) |
582 | count++; |
583 | } |
584 | |
585 | return count; |
586 | } |
587 | |
588 | static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) |
589 | { |
590 | const struct pmu_irq_ops *ops = NULL; |
591 | int cpu; |
592 | |
593 | for_each_possible_cpu(cpu) { |
594 | if (per_cpu(cpu_irq, cpu) != irq) |
595 | continue; |
596 | |
597 | ops = per_cpu(cpu_irq_ops, cpu); |
598 | if (ops) |
599 | break; |
600 | } |
601 | |
602 | return ops; |
603 | } |
604 | |
605 | void armpmu_free_irq(int irq, int cpu) |
606 | { |
607 | if (per_cpu(cpu_irq, cpu) == 0) |
608 | return; |
609 | if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) |
610 | return; |
611 | |
612 | per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); |
613 | |
614 | per_cpu(cpu_irq, cpu) = 0; |
615 | per_cpu(cpu_irq_ops, cpu) = NULL; |
616 | } |
617 | |
618 | int armpmu_request_irq(int irq, int cpu) |
619 | { |
620 | int err = 0; |
621 | const irq_handler_t handler = armpmu_dispatch_irq; |
622 | const struct pmu_irq_ops *irq_ops; |
623 | |
624 | if (!irq) |
625 | return 0; |
626 | |
627 | if (!irq_is_percpu_devid(irq)) { |
628 | unsigned long irq_flags; |
629 | |
630 | err = irq_force_affinity(irq, cpumask_of(cpu)); |
631 | |
632 | if (err && num_possible_cpus() > 1) { |
633 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n" , |
634 | irq, cpu); |
635 | goto err_out; |
636 | } |
637 | |
638 | irq_flags = IRQF_PERCPU | |
639 | IRQF_NOBALANCING | IRQF_NO_AUTOEN | |
640 | IRQF_NO_THREAD; |
641 | |
642 | err = request_nmi(irq, handler, flags: irq_flags, name: "arm-pmu" , |
643 | per_cpu_ptr(&cpu_armpmu, cpu)); |
644 | |
645 | /* If cannot get an NMI, get a normal interrupt */ |
646 | if (err) { |
647 | err = request_irq(irq, handler, flags: irq_flags, name: "arm-pmu" , |
648 | per_cpu_ptr(&cpu_armpmu, cpu)); |
649 | irq_ops = &pmuirq_ops; |
650 | } else { |
651 | has_nmi = true; |
652 | irq_ops = &pmunmi_ops; |
653 | } |
654 | } else if (armpmu_count_irq_users(irq) == 0) { |
655 | err = request_percpu_nmi(irq, handler, devname: "arm-pmu" , dev: &cpu_armpmu); |
656 | |
657 | /* If cannot get an NMI, get a normal interrupt */ |
658 | if (err) { |
659 | err = request_percpu_irq(irq, handler, devname: "arm-pmu" , |
660 | percpu_dev_id: &cpu_armpmu); |
661 | irq_ops = &percpu_pmuirq_ops; |
662 | } else { |
663 | has_nmi = true; |
664 | irq_ops = &percpu_pmunmi_ops; |
665 | } |
666 | } else { |
667 | /* Per cpudevid irq was already requested by another CPU */ |
668 | irq_ops = armpmu_find_irq_ops(irq); |
669 | |
670 | if (WARN_ON(!irq_ops)) |
671 | err = -EINVAL; |
672 | } |
673 | |
674 | if (err) |
675 | goto err_out; |
676 | |
677 | per_cpu(cpu_irq, cpu) = irq; |
678 | per_cpu(cpu_irq_ops, cpu) = irq_ops; |
679 | return 0; |
680 | |
681 | err_out: |
682 | pr_err("unable to request IRQ%d for ARM PMU counters\n" , irq); |
683 | return err; |
684 | } |
685 | |
686 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
687 | { |
688 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; |
689 | return per_cpu(hw_events->irq, cpu); |
690 | } |
691 | |
692 | bool arm_pmu_irq_is_nmi(void) |
693 | { |
694 | return has_nmi; |
695 | } |
696 | |
697 | /* |
698 | * PMU hardware loses all context when a CPU goes offline. |
699 | * When a CPU is hotplugged back in, since some hardware registers are |
700 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading |
701 | * junk values out of them. |
702 | */ |
703 | static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) |
704 | { |
705 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
706 | int irq; |
707 | |
708 | if (!cpumask_test_cpu(cpu, cpumask: &pmu->supported_cpus)) |
709 | return 0; |
710 | if (pmu->reset) |
711 | pmu->reset(pmu); |
712 | |
713 | per_cpu(cpu_armpmu, cpu) = pmu; |
714 | |
715 | irq = armpmu_get_cpu_irq(pmu, cpu); |
716 | if (irq) |
717 | per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); |
718 | |
719 | return 0; |
720 | } |
721 | |
722 | static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) |
723 | { |
724 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
725 | int irq; |
726 | |
727 | if (!cpumask_test_cpu(cpu, cpumask: &pmu->supported_cpus)) |
728 | return 0; |
729 | |
730 | irq = armpmu_get_cpu_irq(pmu, cpu); |
731 | if (irq) |
732 | per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); |
733 | |
734 | per_cpu(cpu_armpmu, cpu) = NULL; |
735 | |
736 | return 0; |
737 | } |
738 | |
739 | #ifdef CONFIG_CPU_PM |
740 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) |
741 | { |
742 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
743 | struct perf_event *event; |
744 | int idx; |
745 | |
746 | for (idx = 0; idx < armpmu->num_events; idx++) { |
747 | event = hw_events->events[idx]; |
748 | if (!event) |
749 | continue; |
750 | |
751 | switch (cmd) { |
752 | case CPU_PM_ENTER: |
753 | /* |
754 | * Stop and update the counter |
755 | */ |
756 | armpmu_stop(event, PERF_EF_UPDATE); |
757 | break; |
758 | case CPU_PM_EXIT: |
759 | case CPU_PM_ENTER_FAILED: |
760 | /* |
761 | * Restore and enable the counter. |
762 | */ |
763 | armpmu_start(event, PERF_EF_RELOAD); |
764 | break; |
765 | default: |
766 | break; |
767 | } |
768 | } |
769 | } |
770 | |
771 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, |
772 | void *v) |
773 | { |
774 | struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); |
775 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
776 | bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); |
777 | |
778 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) |
779 | return NOTIFY_DONE; |
780 | |
781 | /* |
782 | * Always reset the PMU registers on power-up even if |
783 | * there are no events running. |
784 | */ |
785 | if (cmd == CPU_PM_EXIT && armpmu->reset) |
786 | armpmu->reset(armpmu); |
787 | |
788 | if (!enabled) |
789 | return NOTIFY_OK; |
790 | |
791 | switch (cmd) { |
792 | case CPU_PM_ENTER: |
793 | armpmu->stop(armpmu); |
794 | cpu_pm_pmu_setup(armpmu, cmd); |
795 | break; |
796 | case CPU_PM_EXIT: |
797 | case CPU_PM_ENTER_FAILED: |
798 | cpu_pm_pmu_setup(armpmu, cmd); |
799 | armpmu->start(armpmu); |
800 | break; |
801 | default: |
802 | return NOTIFY_DONE; |
803 | } |
804 | |
805 | return NOTIFY_OK; |
806 | } |
807 | |
808 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) |
809 | { |
810 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; |
811 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); |
812 | } |
813 | |
814 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) |
815 | { |
816 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); |
817 | } |
818 | #else |
819 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } |
820 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } |
821 | #endif |
822 | |
823 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
824 | { |
825 | int err; |
826 | |
827 | err = cpuhp_state_add_instance(state: CPUHP_AP_PERF_ARM_STARTING, |
828 | node: &cpu_pmu->node); |
829 | if (err) |
830 | goto out; |
831 | |
832 | err = cpu_pm_pmu_register(cpu_pmu); |
833 | if (err) |
834 | goto out_unregister; |
835 | |
836 | return 0; |
837 | |
838 | out_unregister: |
839 | cpuhp_state_remove_instance_nocalls(state: CPUHP_AP_PERF_ARM_STARTING, |
840 | node: &cpu_pmu->node); |
841 | out: |
842 | return err; |
843 | } |
844 | |
845 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
846 | { |
847 | cpu_pm_pmu_unregister(cpu_pmu); |
848 | cpuhp_state_remove_instance_nocalls(state: CPUHP_AP_PERF_ARM_STARTING, |
849 | node: &cpu_pmu->node); |
850 | } |
851 | |
852 | struct arm_pmu *armpmu_alloc(void) |
853 | { |
854 | struct arm_pmu *pmu; |
855 | int cpu; |
856 | |
857 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); |
858 | if (!pmu) |
859 | goto out; |
860 | |
861 | pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); |
862 | if (!pmu->hw_events) { |
863 | pr_info("failed to allocate per-cpu PMU data.\n" ); |
864 | goto out_free_pmu; |
865 | } |
866 | |
867 | pmu->pmu = (struct pmu) { |
868 | .pmu_enable = armpmu_enable, |
869 | .pmu_disable = armpmu_disable, |
870 | .event_init = armpmu_event_init, |
871 | .add = armpmu_add, |
872 | .del = armpmu_del, |
873 | .start = armpmu_start, |
874 | .stop = armpmu_stop, |
875 | .read = armpmu_read, |
876 | .filter = armpmu_filter, |
877 | .attr_groups = pmu->attr_groups, |
878 | /* |
879 | * This is a CPU PMU potentially in a heterogeneous |
880 | * configuration (e.g. big.LITTLE) so |
881 | * PERF_PMU_CAP_EXTENDED_HW_TYPE is required to open |
882 | * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE events on a |
883 | * specific PMU. |
884 | */ |
885 | .capabilities = PERF_PMU_CAP_EXTENDED_REGS | |
886 | PERF_PMU_CAP_EXTENDED_HW_TYPE, |
887 | }; |
888 | |
889 | pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = |
890 | &armpmu_common_attr_group; |
891 | |
892 | for_each_possible_cpu(cpu) { |
893 | struct pmu_hw_events *events; |
894 | |
895 | events = per_cpu_ptr(pmu->hw_events, cpu); |
896 | raw_spin_lock_init(&events->pmu_lock); |
897 | events->percpu_pmu = pmu; |
898 | } |
899 | |
900 | return pmu; |
901 | |
902 | out_free_pmu: |
903 | kfree(objp: pmu); |
904 | out: |
905 | return NULL; |
906 | } |
907 | |
908 | void armpmu_free(struct arm_pmu *pmu) |
909 | { |
910 | free_percpu(pdata: pmu->hw_events); |
911 | kfree(objp: pmu); |
912 | } |
913 | |
914 | int armpmu_register(struct arm_pmu *pmu) |
915 | { |
916 | int ret; |
917 | |
918 | ret = cpu_pmu_init(cpu_pmu: pmu); |
919 | if (ret) |
920 | return ret; |
921 | |
922 | if (!pmu->set_event_filter) |
923 | pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; |
924 | |
925 | ret = perf_pmu_register(pmu: &pmu->pmu, name: pmu->name, type: -1); |
926 | if (ret) |
927 | goto out_destroy; |
928 | |
929 | pr_info("enabled with %s PMU driver, %d counters available%s\n" , |
930 | pmu->name, pmu->num_events, |
931 | has_nmi ? ", using NMIs" : "" ); |
932 | |
933 | kvm_host_pmu_init(pmu); |
934 | |
935 | return 0; |
936 | |
937 | out_destroy: |
938 | cpu_pmu_destroy(cpu_pmu: pmu); |
939 | return ret; |
940 | } |
941 | |
942 | static int arm_pmu_hp_init(void) |
943 | { |
944 | int ret; |
945 | |
946 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_PERF_ARM_STARTING, |
947 | name: "perf/arm/pmu:starting" , |
948 | startup: arm_perf_starting_cpu, |
949 | teardown: arm_perf_teardown_cpu); |
950 | if (ret) |
951 | pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n" , |
952 | ret); |
953 | return ret; |
954 | } |
955 | subsys_initcall(arm_pmu_hp_init); |
956 | |