1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. |
3 | */ |
4 | #include <linux/acpi.h> |
5 | #include <linux/bitops.h> |
6 | #include <linux/bug.h> |
7 | #include <linux/cpuhotplug.h> |
8 | #include <linux/cpumask.h> |
9 | #include <linux/device.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/irq.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/list.h> |
15 | #include <linux/percpu.h> |
16 | #include <linux/perf_event.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/smp.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/sysfs.h> |
21 | #include <linux/types.h> |
22 | |
23 | #include <asm/barrier.h> |
24 | #include <asm/local64.h> |
25 | #include <asm/sysreg.h> |
26 | #include <soc/qcom/kryo-l2-accessors.h> |
27 | |
28 | #define MAX_L2_CTRS 9 |
29 | |
30 | #define L2PMCR_NUM_EV_SHIFT 11 |
31 | #define L2PMCR_NUM_EV_MASK 0x1F |
32 | |
33 | #define L2PMCR 0x400 |
34 | #define L2PMCNTENCLR 0x403 |
35 | #define L2PMCNTENSET 0x404 |
36 | #define L2PMINTENCLR 0x405 |
37 | #define L2PMINTENSET 0x406 |
38 | #define L2PMOVSCLR 0x407 |
39 | #define L2PMOVSSET 0x408 |
40 | #define L2PMCCNTCR 0x409 |
41 | #define L2PMCCNTR 0x40A |
42 | #define L2PMCCNTSR 0x40C |
43 | #define L2PMRESR 0x410 |
44 | #define IA_L2PMXEVCNTCR_BASE 0x420 |
45 | #define IA_L2PMXEVCNTR_BASE 0x421 |
46 | #define IA_L2PMXEVFILTER_BASE 0x423 |
47 | #define IA_L2PMXEVTYPER_BASE 0x424 |
48 | |
49 | #define IA_L2_REG_OFFSET 0x10 |
50 | |
51 | #define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000 |
52 | #define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004 |
53 | #define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003 |
54 | |
55 | #define L2EVTYPER_REG_SHIFT 3 |
56 | |
57 | #define L2PMRESR_GROUP_BITS 8 |
58 | #define L2PMRESR_GROUP_MASK GENMASK(7, 0) |
59 | |
60 | #define L2CYCLE_CTR_BIT 31 |
61 | #define L2CYCLE_CTR_RAW_CODE 0xFE |
62 | |
63 | #define L2PMCR_RESET_ALL 0x6 |
64 | #define L2PMCR_COUNTERS_ENABLE 0x1 |
65 | #define L2PMCR_COUNTERS_DISABLE 0x0 |
66 | |
67 | #define L2PMRESR_EN BIT_ULL(63) |
68 | |
69 | #define L2_EVT_MASK 0x00000FFF |
70 | #define L2_EVT_CODE_MASK 0x00000FF0 |
71 | #define L2_EVT_GRP_MASK 0x0000000F |
72 | #define L2_EVT_CODE_SHIFT 4 |
73 | #define L2_EVT_GRP_SHIFT 0 |
74 | |
75 | #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT) |
76 | #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT) |
77 | |
78 | #define L2_EVT_GROUP_MAX 7 |
79 | |
80 | #define L2_COUNTER_RELOAD BIT_ULL(31) |
81 | #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) |
82 | |
83 | |
84 | #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) |
85 | |
86 | /* |
87 | * Events |
88 | */ |
89 | #define L2_EVENT_CYCLES 0xfe |
90 | #define L2_EVENT_DCACHE_OPS 0x400 |
91 | #define L2_EVENT_ICACHE_OPS 0x401 |
92 | #define L2_EVENT_TLBI 0x402 |
93 | #define L2_EVENT_BARRIERS 0x403 |
94 | #define L2_EVENT_TOTAL_READS 0x405 |
95 | #define L2_EVENT_TOTAL_WRITES 0x406 |
96 | #define L2_EVENT_TOTAL_REQUESTS 0x407 |
97 | #define L2_EVENT_LDREX 0x420 |
98 | #define L2_EVENT_STREX 0x421 |
99 | #define L2_EVENT_CLREX 0x422 |
100 | |
101 | |
102 | |
103 | struct cluster_pmu; |
104 | |
105 | /* |
106 | * Aggregate PMU. Implements the core pmu functions and manages |
107 | * the hardware PMUs. |
108 | */ |
109 | struct l2cache_pmu { |
110 | struct hlist_node node; |
111 | u32 num_pmus; |
112 | struct pmu pmu; |
113 | int num_counters; |
114 | cpumask_t cpumask; |
115 | struct platform_device *pdev; |
116 | struct cluster_pmu * __percpu *pmu_cluster; |
117 | struct list_head clusters; |
118 | }; |
119 | |
120 | /* |
121 | * The cache is made up of one or more clusters, each cluster has its own PMU. |
122 | * Each cluster is associated with one or more CPUs. |
123 | * This structure represents one of the hardware PMUs. |
124 | * |
125 | * Events can be envisioned as a 2-dimensional array. Each column represents |
126 | * a group of events. There are 8 groups. Only one entry from each |
127 | * group can be in use at a time. |
128 | * |
129 | * Events are specified as 0xCCG, where CC is 2 hex digits specifying |
130 | * the code (array row) and G specifies the group (column). |
131 | * |
132 | * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE |
133 | * which is outside the above scheme. |
134 | */ |
135 | struct cluster_pmu { |
136 | struct list_head next; |
137 | struct perf_event *events[MAX_L2_CTRS]; |
138 | struct l2cache_pmu *l2cache_pmu; |
139 | DECLARE_BITMAP(used_counters, MAX_L2_CTRS); |
140 | DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1); |
141 | int irq; |
142 | int cluster_id; |
143 | /* The CPU that is used for collecting events on this cluster */ |
144 | int on_cpu; |
145 | /* All the CPUs associated with this cluster */ |
146 | cpumask_t cluster_cpus; |
147 | spinlock_t pmu_lock; |
148 | }; |
149 | |
150 | #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu)) |
151 | |
152 | static u32 l2_cycle_ctr_idx; |
153 | static u32 l2_counter_present_mask; |
154 | |
155 | static inline u32 idx_to_reg_bit(u32 idx) |
156 | { |
157 | if (idx == l2_cycle_ctr_idx) |
158 | return BIT(L2CYCLE_CTR_BIT); |
159 | |
160 | return BIT(idx); |
161 | } |
162 | |
163 | static inline struct cluster_pmu *get_cluster_pmu( |
164 | struct l2cache_pmu *l2cache_pmu, int cpu) |
165 | { |
166 | return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); |
167 | } |
168 | |
169 | static void cluster_pmu_reset(void) |
170 | { |
171 | /* Reset all counters */ |
172 | kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); |
173 | kryo_l2_set_indirect_reg(L2PMCNTENCLR, val: l2_counter_present_mask); |
174 | kryo_l2_set_indirect_reg(L2PMINTENCLR, val: l2_counter_present_mask); |
175 | kryo_l2_set_indirect_reg(L2PMOVSCLR, val: l2_counter_present_mask); |
176 | } |
177 | |
178 | static inline void cluster_pmu_enable(void) |
179 | { |
180 | kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); |
181 | } |
182 | |
183 | static inline void cluster_pmu_disable(void) |
184 | { |
185 | kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); |
186 | } |
187 | |
188 | static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) |
189 | { |
190 | if (idx == l2_cycle_ctr_idx) |
191 | kryo_l2_set_indirect_reg(L2PMCCNTR, val: value); |
192 | else |
193 | kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), val: value); |
194 | } |
195 | |
196 | static inline u64 cluster_pmu_counter_get_value(u32 idx) |
197 | { |
198 | u64 value; |
199 | |
200 | if (idx == l2_cycle_ctr_idx) |
201 | value = kryo_l2_get_indirect_reg(L2PMCCNTR); |
202 | else |
203 | value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); |
204 | |
205 | return value; |
206 | } |
207 | |
208 | static inline void cluster_pmu_counter_enable(u32 idx) |
209 | { |
210 | kryo_l2_set_indirect_reg(L2PMCNTENSET, val: idx_to_reg_bit(idx)); |
211 | } |
212 | |
213 | static inline void cluster_pmu_counter_disable(u32 idx) |
214 | { |
215 | kryo_l2_set_indirect_reg(L2PMCNTENCLR, val: idx_to_reg_bit(idx)); |
216 | } |
217 | |
218 | static inline void cluster_pmu_counter_enable_interrupt(u32 idx) |
219 | { |
220 | kryo_l2_set_indirect_reg(L2PMINTENSET, val: idx_to_reg_bit(idx)); |
221 | } |
222 | |
223 | static inline void cluster_pmu_counter_disable_interrupt(u32 idx) |
224 | { |
225 | kryo_l2_set_indirect_reg(L2PMINTENCLR, val: idx_to_reg_bit(idx)); |
226 | } |
227 | |
228 | static inline void cluster_pmu_set_evccntcr(u32 val) |
229 | { |
230 | kryo_l2_set_indirect_reg(L2PMCCNTCR, val); |
231 | } |
232 | |
233 | static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) |
234 | { |
235 | kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); |
236 | } |
237 | |
238 | static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) |
239 | { |
240 | kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); |
241 | } |
242 | |
243 | static void cluster_pmu_set_resr(struct cluster_pmu *cluster, |
244 | u32 event_group, u32 event_cc) |
245 | { |
246 | u64 field; |
247 | u64 resr_val; |
248 | u32 shift; |
249 | unsigned long flags; |
250 | |
251 | shift = L2PMRESR_GROUP_BITS * event_group; |
252 | field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift); |
253 | |
254 | spin_lock_irqsave(&cluster->pmu_lock, flags); |
255 | |
256 | resr_val = kryo_l2_get_indirect_reg(L2PMRESR); |
257 | resr_val &= ~(L2PMRESR_GROUP_MASK << shift); |
258 | resr_val |= field; |
259 | resr_val |= L2PMRESR_EN; |
260 | kryo_l2_set_indirect_reg(L2PMRESR, val: resr_val); |
261 | |
262 | spin_unlock_irqrestore(lock: &cluster->pmu_lock, flags); |
263 | } |
264 | |
265 | /* |
266 | * Hardware allows filtering of events based on the originating |
267 | * CPU. Turn this off by setting filter bits to allow events from |
268 | * all CPUS, subunits and ID independent events in this cluster. |
269 | */ |
270 | static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr) |
271 | { |
272 | u32 val = L2PMXEVFILTER_SUFILTER_ALL | |
273 | L2PMXEVFILTER_ORGFILTER_IDINDEP | |
274 | L2PMXEVFILTER_ORGFILTER_ALL; |
275 | |
276 | kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); |
277 | } |
278 | |
279 | static inline u32 cluster_pmu_getreset_ovsr(void) |
280 | { |
281 | u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET); |
282 | |
283 | kryo_l2_set_indirect_reg(L2PMOVSCLR, val: result); |
284 | return result; |
285 | } |
286 | |
287 | static inline bool cluster_pmu_has_overflowed(u32 ovsr) |
288 | { |
289 | return !!(ovsr & l2_counter_present_mask); |
290 | } |
291 | |
292 | static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx) |
293 | { |
294 | return !!(ovsr & idx_to_reg_bit(idx)); |
295 | } |
296 | |
297 | static void l2_cache_event_update(struct perf_event *event) |
298 | { |
299 | struct hw_perf_event *hwc = &event->hw; |
300 | u64 delta, prev, now; |
301 | u32 idx = hwc->idx; |
302 | |
303 | do { |
304 | prev = local64_read(&hwc->prev_count); |
305 | now = cluster_pmu_counter_get_value(idx); |
306 | } while (local64_cmpxchg(l: &hwc->prev_count, old: prev, new: now) != prev); |
307 | |
308 | /* |
309 | * The cycle counter is 64-bit, but all other counters are |
310 | * 32-bit, and we must handle 32-bit overflow explicitly. |
311 | */ |
312 | delta = now - prev; |
313 | if (idx != l2_cycle_ctr_idx) |
314 | delta &= 0xffffffff; |
315 | |
316 | local64_add(delta, &event->count); |
317 | } |
318 | |
319 | static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, |
320 | struct hw_perf_event *hwc) |
321 | { |
322 | u32 idx = hwc->idx; |
323 | u64 new; |
324 | |
325 | /* |
326 | * We limit the max period to half the max counter value so |
327 | * that even in the case of extreme interrupt latency the |
328 | * counter will (hopefully) not wrap past its initial value. |
329 | */ |
330 | if (idx == l2_cycle_ctr_idx) |
331 | new = L2_CYCLE_COUNTER_RELOAD; |
332 | else |
333 | new = L2_COUNTER_RELOAD; |
334 | |
335 | local64_set(&hwc->prev_count, new); |
336 | cluster_pmu_counter_set_value(idx, value: new); |
337 | } |
338 | |
339 | static int l2_cache_get_event_idx(struct cluster_pmu *cluster, |
340 | struct perf_event *event) |
341 | { |
342 | struct hw_perf_event *hwc = &event->hw; |
343 | int idx; |
344 | int num_ctrs = cluster->l2cache_pmu->num_counters - 1; |
345 | unsigned int group; |
346 | |
347 | if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { |
348 | if (test_and_set_bit(nr: l2_cycle_ctr_idx, addr: cluster->used_counters)) |
349 | return -EAGAIN; |
350 | |
351 | return l2_cycle_ctr_idx; |
352 | } |
353 | |
354 | idx = find_first_zero_bit(addr: cluster->used_counters, size: num_ctrs); |
355 | if (idx == num_ctrs) |
356 | /* The counters are all in use. */ |
357 | return -EAGAIN; |
358 | |
359 | /* |
360 | * Check for column exclusion: event column already in use by another |
361 | * event. This is for events which are not in the same group. |
362 | * Conflicting events in the same group are detected in event_init. |
363 | */ |
364 | group = L2_EVT_GROUP(hwc->config_base); |
365 | if (test_bit(group, cluster->used_groups)) |
366 | return -EAGAIN; |
367 | |
368 | set_bit(nr: idx, addr: cluster->used_counters); |
369 | set_bit(nr: group, addr: cluster->used_groups); |
370 | |
371 | return idx; |
372 | } |
373 | |
374 | static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, |
375 | struct perf_event *event) |
376 | { |
377 | struct hw_perf_event *hwc = &event->hw; |
378 | int idx = hwc->idx; |
379 | |
380 | clear_bit(nr: idx, addr: cluster->used_counters); |
381 | if (hwc->config_base != L2CYCLE_CTR_RAW_CODE) |
382 | clear_bit(L2_EVT_GROUP(hwc->config_base), addr: cluster->used_groups); |
383 | } |
384 | |
385 | static irqreturn_t l2_cache_handle_irq(int irq_num, void *data) |
386 | { |
387 | struct cluster_pmu *cluster = data; |
388 | int num_counters = cluster->l2cache_pmu->num_counters; |
389 | u32 ovsr; |
390 | int idx; |
391 | |
392 | ovsr = cluster_pmu_getreset_ovsr(); |
393 | if (!cluster_pmu_has_overflowed(ovsr)) |
394 | return IRQ_NONE; |
395 | |
396 | for_each_set_bit(idx, cluster->used_counters, num_counters) { |
397 | struct perf_event *event = cluster->events[idx]; |
398 | struct hw_perf_event *hwc; |
399 | |
400 | if (WARN_ON_ONCE(!event)) |
401 | continue; |
402 | |
403 | if (!cluster_pmu_counter_has_overflowed(ovsr, idx)) |
404 | continue; |
405 | |
406 | l2_cache_event_update(event); |
407 | hwc = &event->hw; |
408 | |
409 | l2_cache_cluster_set_period(cluster, hwc); |
410 | } |
411 | |
412 | return IRQ_HANDLED; |
413 | } |
414 | |
415 | /* |
416 | * Implementation of abstract pmu functionality required by |
417 | * the core perf events code. |
418 | */ |
419 | |
420 | static void l2_cache_pmu_enable(struct pmu *pmu) |
421 | { |
422 | /* |
423 | * Although there is only one PMU (per socket) controlling multiple |
424 | * physical PMUs (per cluster), because we do not support per-task mode |
425 | * each event is associated with a CPU. Each event has pmu_enable |
426 | * called on its CPU, so here it is only necessary to enable the |
427 | * counters for the current CPU. |
428 | */ |
429 | |
430 | cluster_pmu_enable(); |
431 | } |
432 | |
433 | static void l2_cache_pmu_disable(struct pmu *pmu) |
434 | { |
435 | cluster_pmu_disable(); |
436 | } |
437 | |
438 | static int l2_cache_event_init(struct perf_event *event) |
439 | { |
440 | struct hw_perf_event *hwc = &event->hw; |
441 | struct cluster_pmu *cluster; |
442 | struct perf_event *sibling; |
443 | struct l2cache_pmu *l2cache_pmu; |
444 | |
445 | if (event->attr.type != event->pmu->type) |
446 | return -ENOENT; |
447 | |
448 | l2cache_pmu = to_l2cache_pmu(event->pmu); |
449 | |
450 | if (hwc->sample_period) { |
451 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
452 | "Sampling not supported\n" ); |
453 | return -EOPNOTSUPP; |
454 | } |
455 | |
456 | if (event->cpu < 0) { |
457 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
458 | "Per-task mode not supported\n" ); |
459 | return -EOPNOTSUPP; |
460 | } |
461 | |
462 | if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || |
463 | ((event->attr.config & ~L2_EVT_MASK) != 0)) && |
464 | (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { |
465 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
466 | "Invalid config %llx\n" , |
467 | event->attr.config); |
468 | return -EINVAL; |
469 | } |
470 | |
471 | /* Don't allow groups with mixed PMUs, except for s/w events */ |
472 | if (event->group_leader->pmu != event->pmu && |
473 | !is_software_event(event: event->group_leader)) { |
474 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
475 | "Can't create mixed PMU group\n" ); |
476 | return -EINVAL; |
477 | } |
478 | |
479 | for_each_sibling_event(sibling, event->group_leader) { |
480 | if (sibling->pmu != event->pmu && |
481 | !is_software_event(event: sibling)) { |
482 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
483 | "Can't create mixed PMU group\n" ); |
484 | return -EINVAL; |
485 | } |
486 | } |
487 | |
488 | cluster = get_cluster_pmu(l2cache_pmu, cpu: event->cpu); |
489 | if (!cluster) { |
490 | /* CPU has not been initialised */ |
491 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
492 | "CPU%d not associated with L2 cluster\n" , event->cpu); |
493 | return -EINVAL; |
494 | } |
495 | |
496 | /* Ensure all events in a group are on the same cpu */ |
497 | if ((event->group_leader != event) && |
498 | (cluster->on_cpu != event->group_leader->cpu)) { |
499 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
500 | "Can't create group on CPUs %d and %d" , |
501 | event->cpu, event->group_leader->cpu); |
502 | return -EINVAL; |
503 | } |
504 | |
505 | if ((event != event->group_leader) && |
506 | !is_software_event(event: event->group_leader) && |
507 | (L2_EVT_GROUP(event->group_leader->attr.config) == |
508 | L2_EVT_GROUP(event->attr.config))) { |
509 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
510 | "Column exclusion: conflicting events %llx %llx\n" , |
511 | event->group_leader->attr.config, |
512 | event->attr.config); |
513 | return -EINVAL; |
514 | } |
515 | |
516 | for_each_sibling_event(sibling, event->group_leader) { |
517 | if ((sibling != event) && |
518 | !is_software_event(event: sibling) && |
519 | (L2_EVT_GROUP(sibling->attr.config) == |
520 | L2_EVT_GROUP(event->attr.config))) { |
521 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
522 | "Column exclusion: conflicting events %llx %llx\n" , |
523 | sibling->attr.config, |
524 | event->attr.config); |
525 | return -EINVAL; |
526 | } |
527 | } |
528 | |
529 | hwc->idx = -1; |
530 | hwc->config_base = event->attr.config; |
531 | |
532 | /* |
533 | * Ensure all events are on the same cpu so all events are in the |
534 | * same cpu context, to avoid races on pmu_enable etc. |
535 | */ |
536 | event->cpu = cluster->on_cpu; |
537 | |
538 | return 0; |
539 | } |
540 | |
541 | static void l2_cache_event_start(struct perf_event *event, int flags) |
542 | { |
543 | struct cluster_pmu *cluster; |
544 | struct hw_perf_event *hwc = &event->hw; |
545 | int idx = hwc->idx; |
546 | u32 config; |
547 | u32 event_cc, event_group; |
548 | |
549 | hwc->state = 0; |
550 | |
551 | cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), cpu: event->cpu); |
552 | |
553 | l2_cache_cluster_set_period(cluster, hwc); |
554 | |
555 | if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { |
556 | cluster_pmu_set_evccntcr(val: 0); |
557 | } else { |
558 | config = hwc->config_base; |
559 | event_cc = L2_EVT_CODE(config); |
560 | event_group = L2_EVT_GROUP(config); |
561 | |
562 | cluster_pmu_set_evcntcr(ctr: idx, val: 0); |
563 | cluster_pmu_set_evtyper(ctr: idx, val: event_group); |
564 | cluster_pmu_set_resr(cluster, event_group, event_cc); |
565 | cluster_pmu_set_evfilter_sys_mode(ctr: idx); |
566 | } |
567 | |
568 | cluster_pmu_counter_enable_interrupt(idx); |
569 | cluster_pmu_counter_enable(idx); |
570 | } |
571 | |
572 | static void l2_cache_event_stop(struct perf_event *event, int flags) |
573 | { |
574 | struct hw_perf_event *hwc = &event->hw; |
575 | int idx = hwc->idx; |
576 | |
577 | if (hwc->state & PERF_HES_STOPPED) |
578 | return; |
579 | |
580 | cluster_pmu_counter_disable_interrupt(idx); |
581 | cluster_pmu_counter_disable(idx); |
582 | |
583 | if (flags & PERF_EF_UPDATE) |
584 | l2_cache_event_update(event); |
585 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
586 | } |
587 | |
588 | static int l2_cache_event_add(struct perf_event *event, int flags) |
589 | { |
590 | struct hw_perf_event *hwc = &event->hw; |
591 | int idx; |
592 | int err = 0; |
593 | struct cluster_pmu *cluster; |
594 | |
595 | cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), cpu: event->cpu); |
596 | |
597 | idx = l2_cache_get_event_idx(cluster, event); |
598 | if (idx < 0) |
599 | return idx; |
600 | |
601 | hwc->idx = idx; |
602 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
603 | cluster->events[idx] = event; |
604 | local64_set(&hwc->prev_count, 0); |
605 | |
606 | if (flags & PERF_EF_START) |
607 | l2_cache_event_start(event, flags); |
608 | |
609 | /* Propagate changes to the userspace mapping. */ |
610 | perf_event_update_userpage(event); |
611 | |
612 | return err; |
613 | } |
614 | |
615 | static void l2_cache_event_del(struct perf_event *event, int flags) |
616 | { |
617 | struct hw_perf_event *hwc = &event->hw; |
618 | struct cluster_pmu *cluster; |
619 | int idx = hwc->idx; |
620 | |
621 | cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), cpu: event->cpu); |
622 | |
623 | l2_cache_event_stop(event, flags: flags | PERF_EF_UPDATE); |
624 | cluster->events[idx] = NULL; |
625 | l2_cache_clear_event_idx(cluster, event); |
626 | |
627 | perf_event_update_userpage(event); |
628 | } |
629 | |
630 | static void l2_cache_event_read(struct perf_event *event) |
631 | { |
632 | l2_cache_event_update(event); |
633 | } |
634 | |
635 | static ssize_t l2_cache_pmu_cpumask_show(struct device *dev, |
636 | struct device_attribute *attr, |
637 | char *buf) |
638 | { |
639 | struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); |
640 | |
641 | return cpumap_print_to_pagebuf(list: true, buf, mask: &l2cache_pmu->cpumask); |
642 | } |
643 | |
644 | static struct device_attribute l2_cache_pmu_cpumask_attr = |
645 | __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL); |
646 | |
647 | static struct attribute *l2_cache_pmu_cpumask_attrs[] = { |
648 | &l2_cache_pmu_cpumask_attr.attr, |
649 | NULL, |
650 | }; |
651 | |
652 | static const struct attribute_group l2_cache_pmu_cpumask_group = { |
653 | .attrs = l2_cache_pmu_cpumask_attrs, |
654 | }; |
655 | |
656 | /* CCG format for perf RAW codes. */ |
657 | PMU_FORMAT_ATTR(l2_code, "config:4-11" ); |
658 | PMU_FORMAT_ATTR(l2_group, "config:0-3" ); |
659 | PMU_FORMAT_ATTR(event, "config:0-11" ); |
660 | |
661 | static struct attribute *l2_cache_pmu_formats[] = { |
662 | &format_attr_l2_code.attr, |
663 | &format_attr_l2_group.attr, |
664 | &format_attr_event.attr, |
665 | NULL, |
666 | }; |
667 | |
668 | static const struct attribute_group l2_cache_pmu_format_group = { |
669 | .name = "format" , |
670 | .attrs = l2_cache_pmu_formats, |
671 | }; |
672 | |
673 | static ssize_t l2cache_pmu_event_show(struct device *dev, |
674 | struct device_attribute *attr, char *page) |
675 | { |
676 | struct perf_pmu_events_attr *pmu_attr; |
677 | |
678 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
679 | return sysfs_emit(buf: page, fmt: "event=0x%02llx\n" , pmu_attr->id); |
680 | } |
681 | |
682 | #define L2CACHE_EVENT_ATTR(_name, _id) \ |
683 | PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id) |
684 | |
685 | static struct attribute *l2_cache_pmu_events[] = { |
686 | L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES), |
687 | L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS), |
688 | L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS), |
689 | L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI), |
690 | L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS), |
691 | L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS), |
692 | L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES), |
693 | L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS), |
694 | L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX), |
695 | L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX), |
696 | L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX), |
697 | NULL |
698 | }; |
699 | |
700 | static const struct attribute_group l2_cache_pmu_events_group = { |
701 | .name = "events" , |
702 | .attrs = l2_cache_pmu_events, |
703 | }; |
704 | |
705 | static const struct attribute_group *l2_cache_pmu_attr_grps[] = { |
706 | &l2_cache_pmu_format_group, |
707 | &l2_cache_pmu_cpumask_group, |
708 | &l2_cache_pmu_events_group, |
709 | NULL, |
710 | }; |
711 | |
712 | /* |
713 | * Generic device handlers |
714 | */ |
715 | |
716 | static const struct acpi_device_id l2_cache_pmu_acpi_match[] = { |
717 | { "QCOM8130" , }, |
718 | { } |
719 | }; |
720 | |
721 | static int get_num_counters(void) |
722 | { |
723 | int val; |
724 | |
725 | val = kryo_l2_get_indirect_reg(L2PMCR); |
726 | |
727 | /* |
728 | * Read number of counters from L2PMCR and add 1 |
729 | * for the cycle counter. |
730 | */ |
731 | return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1; |
732 | } |
733 | |
734 | static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( |
735 | struct l2cache_pmu *l2cache_pmu, int cpu) |
736 | { |
737 | u64 mpidr; |
738 | int cpu_cluster_id; |
739 | struct cluster_pmu *cluster; |
740 | |
741 | /* |
742 | * This assumes that the cluster_id is in MPIDR[aff1] for |
743 | * single-threaded cores, and MPIDR[aff2] for multi-threaded |
744 | * cores. This logic will have to be updated if this changes. |
745 | */ |
746 | mpidr = read_cpuid_mpidr(); |
747 | if (mpidr & MPIDR_MT_BITMASK) |
748 | cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
749 | else |
750 | cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
751 | |
752 | list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { |
753 | if (cluster->cluster_id != cpu_cluster_id) |
754 | continue; |
755 | |
756 | dev_info(&l2cache_pmu->pdev->dev, |
757 | "CPU%d associated with cluster %d\n" , cpu, |
758 | cluster->cluster_id); |
759 | cpumask_set_cpu(cpu, dstp: &cluster->cluster_cpus); |
760 | *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; |
761 | return cluster; |
762 | } |
763 | |
764 | return NULL; |
765 | } |
766 | |
767 | static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) |
768 | { |
769 | struct cluster_pmu *cluster; |
770 | struct l2cache_pmu *l2cache_pmu; |
771 | |
772 | l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); |
773 | cluster = get_cluster_pmu(l2cache_pmu, cpu); |
774 | if (!cluster) { |
775 | /* First time this CPU has come online */ |
776 | cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); |
777 | if (!cluster) { |
778 | /* Only if broken firmware doesn't list every cluster */ |
779 | WARN_ONCE(1, "No L2 cache cluster for CPU%d\n" , cpu); |
780 | return 0; |
781 | } |
782 | } |
783 | |
784 | /* If another CPU is managing this cluster, we're done */ |
785 | if (cluster->on_cpu != -1) |
786 | return 0; |
787 | |
788 | /* |
789 | * All CPUs on this cluster were down, use this one. |
790 | * Reset to put it into sane state. |
791 | */ |
792 | cluster->on_cpu = cpu; |
793 | cpumask_set_cpu(cpu, dstp: &l2cache_pmu->cpumask); |
794 | cluster_pmu_reset(); |
795 | |
796 | WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); |
797 | enable_irq(irq: cluster->irq); |
798 | |
799 | return 0; |
800 | } |
801 | |
802 | static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) |
803 | { |
804 | struct cluster_pmu *cluster; |
805 | struct l2cache_pmu *l2cache_pmu; |
806 | cpumask_t cluster_online_cpus; |
807 | unsigned int target; |
808 | |
809 | l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); |
810 | cluster = get_cluster_pmu(l2cache_pmu, cpu); |
811 | if (!cluster) |
812 | return 0; |
813 | |
814 | /* If this CPU is not managing the cluster, we're done */ |
815 | if (cluster->on_cpu != cpu) |
816 | return 0; |
817 | |
818 | /* Give up ownership of cluster */ |
819 | cpumask_clear_cpu(cpu, dstp: &l2cache_pmu->cpumask); |
820 | cluster->on_cpu = -1; |
821 | |
822 | /* Any other CPU for this cluster which is still online */ |
823 | cpumask_and(dstp: &cluster_online_cpus, src1p: &cluster->cluster_cpus, |
824 | cpu_online_mask); |
825 | target = cpumask_any_but(mask: &cluster_online_cpus, cpu); |
826 | if (target >= nr_cpu_ids) { |
827 | disable_irq(irq: cluster->irq); |
828 | return 0; |
829 | } |
830 | |
831 | perf_pmu_migrate_context(pmu: &l2cache_pmu->pmu, src_cpu: cpu, dst_cpu: target); |
832 | cluster->on_cpu = target; |
833 | cpumask_set_cpu(cpu: target, dstp: &l2cache_pmu->cpumask); |
834 | WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) |
840 | { |
841 | struct platform_device *pdev = to_platform_device(dev->parent); |
842 | struct platform_device *sdev = to_platform_device(dev); |
843 | struct l2cache_pmu *l2cache_pmu = data; |
844 | struct cluster_pmu *cluster; |
845 | u64 fw_cluster_id; |
846 | int err; |
847 | int irq; |
848 | |
849 | err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), integer: &fw_cluster_id); |
850 | if (err) { |
851 | dev_err(&pdev->dev, "unable to read ACPI uid\n" ); |
852 | return err; |
853 | } |
854 | |
855 | cluster = devm_kzalloc(dev: &pdev->dev, size: sizeof(*cluster), GFP_KERNEL); |
856 | if (!cluster) |
857 | return -ENOMEM; |
858 | |
859 | INIT_LIST_HEAD(list: &cluster->next); |
860 | cluster->cluster_id = fw_cluster_id; |
861 | |
862 | irq = platform_get_irq(sdev, 0); |
863 | if (irq < 0) |
864 | return irq; |
865 | cluster->irq = irq; |
866 | |
867 | cluster->l2cache_pmu = l2cache_pmu; |
868 | cluster->on_cpu = -1; |
869 | |
870 | err = devm_request_irq(dev: &pdev->dev, irq, handler: l2_cache_handle_irq, |
871 | IRQF_NOBALANCING | IRQF_NO_THREAD | |
872 | IRQF_NO_AUTOEN, |
873 | devname: "l2-cache-pmu" , dev_id: cluster); |
874 | if (err) { |
875 | dev_err(&pdev->dev, |
876 | "Unable to request IRQ%d for L2 PMU counters\n" , irq); |
877 | return err; |
878 | } |
879 | |
880 | dev_info(&pdev->dev, |
881 | "Registered L2 cache PMU cluster %lld\n" , fw_cluster_id); |
882 | |
883 | spin_lock_init(&cluster->pmu_lock); |
884 | |
885 | list_add(new: &cluster->next, head: &l2cache_pmu->clusters); |
886 | l2cache_pmu->num_pmus++; |
887 | |
888 | return 0; |
889 | } |
890 | |
891 | static int l2_cache_pmu_probe(struct platform_device *pdev) |
892 | { |
893 | int err; |
894 | struct l2cache_pmu *l2cache_pmu; |
895 | |
896 | l2cache_pmu = |
897 | devm_kzalloc(dev: &pdev->dev, size: sizeof(*l2cache_pmu), GFP_KERNEL); |
898 | if (!l2cache_pmu) |
899 | return -ENOMEM; |
900 | |
901 | INIT_LIST_HEAD(list: &l2cache_pmu->clusters); |
902 | |
903 | platform_set_drvdata(pdev, data: l2cache_pmu); |
904 | l2cache_pmu->pmu = (struct pmu) { |
905 | /* suffix is instance id for future use with multiple sockets */ |
906 | .name = "l2cache_0" , |
907 | .task_ctx_nr = perf_invalid_context, |
908 | .pmu_enable = l2_cache_pmu_enable, |
909 | .pmu_disable = l2_cache_pmu_disable, |
910 | .event_init = l2_cache_event_init, |
911 | .add = l2_cache_event_add, |
912 | .del = l2_cache_event_del, |
913 | .start = l2_cache_event_start, |
914 | .stop = l2_cache_event_stop, |
915 | .read = l2_cache_event_read, |
916 | .attr_groups = l2_cache_pmu_attr_grps, |
917 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
918 | }; |
919 | |
920 | l2cache_pmu->num_counters = get_num_counters(); |
921 | l2cache_pmu->pdev = pdev; |
922 | l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, |
923 | struct cluster_pmu *); |
924 | if (!l2cache_pmu->pmu_cluster) |
925 | return -ENOMEM; |
926 | |
927 | l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; |
928 | l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | |
929 | BIT(L2CYCLE_CTR_BIT); |
930 | |
931 | cpumask_clear(dstp: &l2cache_pmu->cpumask); |
932 | |
933 | /* Read cluster info and initialize each cluster */ |
934 | err = device_for_each_child(dev: &pdev->dev, data: l2cache_pmu, |
935 | fn: l2_cache_pmu_probe_cluster); |
936 | if (err) |
937 | return err; |
938 | |
939 | if (l2cache_pmu->num_pmus == 0) { |
940 | dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n" ); |
941 | return -ENODEV; |
942 | } |
943 | |
944 | err = cpuhp_state_add_instance(state: CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, |
945 | node: &l2cache_pmu->node); |
946 | if (err) { |
947 | dev_err(&pdev->dev, "Error %d registering hotplug" , err); |
948 | return err; |
949 | } |
950 | |
951 | err = perf_pmu_register(pmu: &l2cache_pmu->pmu, name: l2cache_pmu->pmu.name, type: -1); |
952 | if (err) { |
953 | dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n" , err); |
954 | goto out_unregister; |
955 | } |
956 | |
957 | dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n" , |
958 | l2cache_pmu->num_pmus); |
959 | |
960 | return err; |
961 | |
962 | out_unregister: |
963 | cpuhp_state_remove_instance(state: CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, |
964 | node: &l2cache_pmu->node); |
965 | return err; |
966 | } |
967 | |
968 | static int l2_cache_pmu_remove(struct platform_device *pdev) |
969 | { |
970 | struct l2cache_pmu *l2cache_pmu = |
971 | to_l2cache_pmu(platform_get_drvdata(pdev)); |
972 | |
973 | perf_pmu_unregister(pmu: &l2cache_pmu->pmu); |
974 | cpuhp_state_remove_instance(state: CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, |
975 | node: &l2cache_pmu->node); |
976 | return 0; |
977 | } |
978 | |
979 | static struct platform_driver l2_cache_pmu_driver = { |
980 | .driver = { |
981 | .name = "qcom-l2cache-pmu" , |
982 | .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), |
983 | .suppress_bind_attrs = true, |
984 | }, |
985 | .probe = l2_cache_pmu_probe, |
986 | .remove = l2_cache_pmu_remove, |
987 | }; |
988 | |
989 | static int __init register_l2_cache_pmu_driver(void) |
990 | { |
991 | int err; |
992 | |
993 | err = cpuhp_setup_state_multi(state: CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, |
994 | name: "AP_PERF_ARM_QCOM_L2_ONLINE" , |
995 | startup: l2cache_pmu_online_cpu, |
996 | teardown: l2cache_pmu_offline_cpu); |
997 | if (err) |
998 | return err; |
999 | |
1000 | return platform_driver_register(&l2_cache_pmu_driver); |
1001 | } |
1002 | device_initcall(register_l2_cache_pmu_driver); |
1003 | |