1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for the L3 cache PMUs in Qualcomm Technologies chips. |
4 | * |
5 | * The driver supports a distributed cache architecture where the overall |
6 | * cache for a socket is comprised of multiple slices each with its own PMU. |
7 | * Access to each individual PMU is provided even though all CPUs share all |
8 | * the slices. User space needs to aggregate to individual counts to provide |
9 | * a global picture. |
10 | * |
11 | * See Documentation/admin-guide/perf/qcom_l3_pmu.rst for more details. |
12 | * |
13 | * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. |
14 | */ |
15 | |
16 | #include <linux/acpi.h> |
17 | #include <linux/bitops.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/io.h> |
20 | #include <linux/list.h> |
21 | #include <linux/module.h> |
22 | #include <linux/perf_event.h> |
23 | #include <linux/platform_device.h> |
24 | |
25 | /* |
26 | * General constants |
27 | */ |
28 | |
29 | /* Number of counters on each PMU */ |
30 | #define L3_NUM_COUNTERS 8 |
31 | /* Mask for the event type field within perf_event_attr.config and EVTYPE reg */ |
32 | #define L3_EVTYPE_MASK 0xFF |
33 | /* |
34 | * Bit position of the 'long counter' flag within perf_event_attr.config. |
35 | * Reserve some space between the event type and this flag to allow expansion |
36 | * in the event type field. |
37 | */ |
38 | #define L3_EVENT_LC_BIT 32 |
39 | |
40 | /* |
41 | * Register offsets |
42 | */ |
43 | |
44 | /* Perfmon registers */ |
45 | #define L3_HML3_PM_CR 0x000 |
46 | #define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8) |
47 | #define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8) |
48 | #define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8) |
49 | #define L3_HML3_PM_FILTRA 0x300 |
50 | #define L3_HML3_PM_FILTRB 0x308 |
51 | #define L3_HML3_PM_FILTRC 0x310 |
52 | #define L3_HML3_PM_FILTRAM 0x304 |
53 | #define L3_HML3_PM_FILTRBM 0x30C |
54 | #define L3_HML3_PM_FILTRCM 0x314 |
55 | |
56 | /* Basic counter registers */ |
57 | #define L3_M_BC_CR 0x500 |
58 | #define L3_M_BC_SATROLL_CR 0x504 |
59 | #define L3_M_BC_CNTENSET 0x508 |
60 | #define L3_M_BC_CNTENCLR 0x50C |
61 | #define L3_M_BC_INTENSET 0x510 |
62 | #define L3_M_BC_INTENCLR 0x514 |
63 | #define L3_M_BC_GANG 0x718 |
64 | #define L3_M_BC_OVSR 0x740 |
65 | #define L3_M_BC_IRQCTL 0x96C |
66 | |
67 | /* |
68 | * Bit field definitions |
69 | */ |
70 | |
71 | /* L3_HML3_PM_CR */ |
72 | #define PM_CR_RESET (0) |
73 | |
74 | /* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */ |
75 | #define PMCNT_RESET (0) |
76 | |
77 | /* L3_HML3_PM_EVTYPEx */ |
78 | #define EVSEL(__val) ((__val) & L3_EVTYPE_MASK) |
79 | |
80 | /* Reset value for all the filter registers */ |
81 | #define PM_FLTR_RESET (0) |
82 | |
83 | /* L3_M_BC_CR */ |
84 | #define BC_RESET (1UL << 1) |
85 | #define BC_ENABLE (1UL << 0) |
86 | |
87 | /* L3_M_BC_SATROLL_CR */ |
88 | #define BC_SATROLL_CR_RESET (0) |
89 | |
90 | /* L3_M_BC_CNTENSET */ |
91 | #define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7)) |
92 | |
93 | /* L3_M_BC_CNTENCLR */ |
94 | #define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) |
95 | #define BC_CNTENCLR_RESET (0xFF) |
96 | |
97 | /* L3_M_BC_INTENSET */ |
98 | #define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7)) |
99 | |
100 | /* L3_M_BC_INTENCLR */ |
101 | #define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) |
102 | #define BC_INTENCLR_RESET (0xFF) |
103 | |
104 | /* L3_M_BC_GANG */ |
105 | #define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7)) |
106 | #define BC_GANG_RESET (0) |
107 | |
108 | /* L3_M_BC_OVSR */ |
109 | #define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7)) |
110 | #define PMOVSRCLR_RESET (0xFF) |
111 | |
112 | /* L3_M_BC_IRQCTL */ |
113 | #define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7)) |
114 | #define BC_IRQCTL_RESET (0x0) |
115 | |
116 | /* |
117 | * Events |
118 | */ |
119 | |
120 | #define L3_EVENT_CYCLES 0x01 |
121 | #define L3_EVENT_READ_HIT 0x20 |
122 | #define L3_EVENT_READ_MISS 0x21 |
123 | #define L3_EVENT_READ_HIT_D 0x22 |
124 | #define L3_EVENT_READ_MISS_D 0x23 |
125 | #define L3_EVENT_WRITE_HIT 0x24 |
126 | #define L3_EVENT_WRITE_MISS 0x25 |
127 | |
128 | /* |
129 | * Decoding of settings from perf_event_attr |
130 | * |
131 | * The config format for perf events is: |
132 | * - config: bits 0-7: event type |
133 | * bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits |
134 | */ |
135 | |
136 | static inline u32 get_event_type(struct perf_event *event) |
137 | { |
138 | return (event->attr.config) & L3_EVTYPE_MASK; |
139 | } |
140 | |
141 | static inline bool event_uses_long_counter(struct perf_event *event) |
142 | { |
143 | return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); |
144 | } |
145 | |
146 | static inline int event_num_counters(struct perf_event *event) |
147 | { |
148 | return event_uses_long_counter(event) ? 2 : 1; |
149 | } |
150 | |
151 | /* |
152 | * Main PMU, inherits from the core perf PMU type |
153 | */ |
154 | struct l3cache_pmu { |
155 | struct pmu pmu; |
156 | struct hlist_node node; |
157 | void __iomem *regs; |
158 | struct perf_event *events[L3_NUM_COUNTERS]; |
159 | unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)]; |
160 | cpumask_t cpumask; |
161 | }; |
162 | |
163 | #define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu)) |
164 | |
165 | /* |
166 | * Type used to group hardware counter operations |
167 | * |
168 | * Used to implement two types of hardware counters, standard (32bits) and |
169 | * long (64bits). The hardware supports counter chaining which we use to |
170 | * implement long counters. This support is exposed via the 'lc' flag field |
171 | * in perf_event_attr.config. |
172 | */ |
173 | struct l3cache_event_ops { |
174 | /* Called to start event monitoring */ |
175 | void (*start)(struct perf_event *event); |
176 | /* Called to stop event monitoring */ |
177 | void (*stop)(struct perf_event *event, int flags); |
178 | /* Called to update the perf_event */ |
179 | void (*update)(struct perf_event *event); |
180 | }; |
181 | |
182 | /* |
183 | * Implementation of long counter operations |
184 | * |
185 | * 64bit counters are implemented by chaining two of the 32bit physical |
186 | * counters. The PMU only supports chaining of adjacent even/odd pairs |
187 | * and for simplicity the driver always configures the odd counter to |
188 | * count the overflows of the lower-numbered even counter. Note that since |
189 | * the resulting hardware counter is 64bits no IRQs are required to maintain |
190 | * the software counter which is also 64bits. |
191 | */ |
192 | |
193 | static void qcom_l3_cache__64bit_counter_start(struct perf_event *event) |
194 | { |
195 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
196 | int idx = event->hw.idx; |
197 | u32 evsel = get_event_type(event); |
198 | u32 gang; |
199 | |
200 | /* Set the odd counter to count the overflows of the even counter */ |
201 | gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); |
202 | gang |= GANG_EN(idx + 1); |
203 | writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG); |
204 | |
205 | /* Initialize the hardware counters and reset prev_count*/ |
206 | local64_set(&event->hw.prev_count, 0); |
207 | writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); |
208 | writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); |
209 | |
210 | /* |
211 | * Set the event types, the upper half must use zero and the lower |
212 | * half the actual event type |
213 | */ |
214 | writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1)); |
215 | writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); |
216 | |
217 | /* Finally, enable the counters */ |
218 | writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1)); |
219 | writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET); |
220 | writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); |
221 | writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); |
222 | } |
223 | |
224 | static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event, |
225 | int flags) |
226 | { |
227 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
228 | int idx = event->hw.idx; |
229 | u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); |
230 | |
231 | /* Disable the counters */ |
232 | writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); |
233 | writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR); |
234 | |
235 | /* Disable chaining */ |
236 | writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG); |
237 | } |
238 | |
239 | static void qcom_l3_cache__64bit_counter_update(struct perf_event *event) |
240 | { |
241 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
242 | int idx = event->hw.idx; |
243 | u32 hi, lo; |
244 | u64 prev, new; |
245 | |
246 | do { |
247 | prev = local64_read(&event->hw.prev_count); |
248 | do { |
249 | hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); |
250 | lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); |
251 | } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1))); |
252 | new = ((u64)hi << 32) | lo; |
253 | } while (local64_cmpxchg(l: &event->hw.prev_count, old: prev, new) != prev); |
254 | |
255 | local64_add(new - prev, &event->count); |
256 | } |
257 | |
258 | static const struct l3cache_event_ops event_ops_long = { |
259 | .start = qcom_l3_cache__64bit_counter_start, |
260 | .stop = qcom_l3_cache__64bit_counter_stop, |
261 | .update = qcom_l3_cache__64bit_counter_update, |
262 | }; |
263 | |
264 | /* |
265 | * Implementation of standard counter operations |
266 | * |
267 | * 32bit counters use a single physical counter and a hardware feature that |
268 | * asserts the overflow IRQ on the toggling of the most significant bit in |
269 | * the counter. This feature allows the counters to be left free-running |
270 | * without needing the usual reprogramming required to properly handle races |
271 | * during concurrent calls to update. |
272 | */ |
273 | |
274 | static void qcom_l3_cache__32bit_counter_start(struct perf_event *event) |
275 | { |
276 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
277 | int idx = event->hw.idx; |
278 | u32 evsel = get_event_type(event); |
279 | u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); |
280 | |
281 | /* Set the counter to assert the overflow IRQ on MSB toggling */ |
282 | writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); |
283 | |
284 | /* Initialize the hardware counter and reset prev_count*/ |
285 | local64_set(&event->hw.prev_count, 0); |
286 | writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); |
287 | |
288 | /* Set the event type */ |
289 | writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); |
290 | |
291 | /* Enable interrupt generation by this counter */ |
292 | writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET); |
293 | |
294 | /* Finally, enable the counter */ |
295 | writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); |
296 | writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); |
297 | } |
298 | |
299 | static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event, |
300 | int flags) |
301 | { |
302 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
303 | int idx = event->hw.idx; |
304 | u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); |
305 | |
306 | /* Disable the counter */ |
307 | writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); |
308 | |
309 | /* Disable interrupt generation by this counter */ |
310 | writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR); |
311 | |
312 | /* Set the counter to not assert the overflow IRQ on MSB toggling */ |
313 | writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); |
314 | } |
315 | |
316 | static void qcom_l3_cache__32bit_counter_update(struct perf_event *event) |
317 | { |
318 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
319 | int idx = event->hw.idx; |
320 | u32 prev, new; |
321 | |
322 | do { |
323 | prev = local64_read(&event->hw.prev_count); |
324 | new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); |
325 | } while (local64_cmpxchg(l: &event->hw.prev_count, old: prev, new) != prev); |
326 | |
327 | local64_add(new - prev, &event->count); |
328 | } |
329 | |
330 | static const struct l3cache_event_ops event_ops_std = { |
331 | .start = qcom_l3_cache__32bit_counter_start, |
332 | .stop = qcom_l3_cache__32bit_counter_stop, |
333 | .update = qcom_l3_cache__32bit_counter_update, |
334 | }; |
335 | |
336 | /* Retrieve the appropriate operations for the given event */ |
337 | static |
338 | const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event) |
339 | { |
340 | if (event_uses_long_counter(event)) |
341 | return &event_ops_long; |
342 | else |
343 | return &event_ops_std; |
344 | } |
345 | |
346 | /* |
347 | * Top level PMU functions. |
348 | */ |
349 | |
350 | static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu) |
351 | { |
352 | int i; |
353 | |
354 | writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR); |
355 | |
356 | /* |
357 | * Use writel for the first programming command to ensure the basic |
358 | * counter unit is stopped before proceeding |
359 | */ |
360 | writel(BC_SATROLL_CR_RESET, addr: l3pmu->regs + L3_M_BC_SATROLL_CR); |
361 | |
362 | writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR); |
363 | writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR); |
364 | writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR); |
365 | writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG); |
366 | writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL); |
367 | writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR); |
368 | |
369 | for (i = 0; i < L3_NUM_COUNTERS; ++i) { |
370 | writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i)); |
371 | writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i)); |
372 | } |
373 | |
374 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA); |
375 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM); |
376 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB); |
377 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM); |
378 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC); |
379 | writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM); |
380 | |
381 | /* |
382 | * Use writel here to ensure all programming commands are done |
383 | * before proceeding |
384 | */ |
385 | writel(BC_ENABLE, addr: l3pmu->regs + L3_M_BC_CR); |
386 | } |
387 | |
388 | static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data) |
389 | { |
390 | struct l3cache_pmu *l3pmu = data; |
391 | /* Read the overflow status register */ |
392 | long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR); |
393 | int idx; |
394 | |
395 | if (status == 0) |
396 | return IRQ_NONE; |
397 | |
398 | /* Clear the bits we read on the overflow status register */ |
399 | writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR); |
400 | |
401 | for_each_set_bit(idx, &status, L3_NUM_COUNTERS) { |
402 | struct perf_event *event; |
403 | const struct l3cache_event_ops *ops; |
404 | |
405 | event = l3pmu->events[idx]; |
406 | if (!event) |
407 | continue; |
408 | |
409 | /* |
410 | * Since the IRQ is not enabled for events using long counters |
411 | * we should never see one of those here, however, be consistent |
412 | * and use the ops indirections like in the other operations. |
413 | */ |
414 | |
415 | ops = l3cache_event_get_ops(event); |
416 | ops->update(event); |
417 | } |
418 | |
419 | return IRQ_HANDLED; |
420 | } |
421 | |
422 | /* |
423 | * Implementation of abstract pmu functionality required by |
424 | * the core perf events code. |
425 | */ |
426 | |
427 | static void qcom_l3_cache__pmu_enable(struct pmu *pmu) |
428 | { |
429 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); |
430 | |
431 | /* Ensure the other programming commands are observed before enabling */ |
432 | wmb(); |
433 | |
434 | writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); |
435 | } |
436 | |
437 | static void qcom_l3_cache__pmu_disable(struct pmu *pmu) |
438 | { |
439 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); |
440 | |
441 | writel_relaxed(0, l3pmu->regs + L3_M_BC_CR); |
442 | |
443 | /* Ensure the basic counter unit is stopped before proceeding */ |
444 | wmb(); |
445 | } |
446 | |
447 | /* |
448 | * We must NOT create groups containing events from multiple hardware PMUs, |
449 | * although mixing different software and hardware PMUs is allowed. |
450 | */ |
451 | static bool qcom_l3_cache__validate_event_group(struct perf_event *event) |
452 | { |
453 | struct perf_event *leader = event->group_leader; |
454 | struct perf_event *sibling; |
455 | int counters = 0; |
456 | |
457 | if (leader->pmu != event->pmu && !is_software_event(event: leader)) |
458 | return false; |
459 | |
460 | counters = event_num_counters(event); |
461 | counters += event_num_counters(event: leader); |
462 | |
463 | for_each_sibling_event(sibling, leader) { |
464 | if (is_software_event(event: sibling)) |
465 | continue; |
466 | if (sibling->pmu != event->pmu) |
467 | return false; |
468 | counters += event_num_counters(event: sibling); |
469 | } |
470 | |
471 | /* |
472 | * If the group requires more counters than the HW has, it |
473 | * cannot ever be scheduled. |
474 | */ |
475 | return counters <= L3_NUM_COUNTERS; |
476 | } |
477 | |
478 | static int qcom_l3_cache__event_init(struct perf_event *event) |
479 | { |
480 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
481 | struct hw_perf_event *hwc = &event->hw; |
482 | |
483 | /* |
484 | * Is the event for this PMU? |
485 | */ |
486 | if (event->attr.type != event->pmu->type) |
487 | return -ENOENT; |
488 | |
489 | /* |
490 | * Sampling not supported since these events are not core-attributable. |
491 | */ |
492 | if (hwc->sample_period) |
493 | return -EINVAL; |
494 | |
495 | /* |
496 | * Task mode not available, we run the counters as socket counters, |
497 | * not attributable to any CPU and therefore cannot attribute per-task. |
498 | */ |
499 | if (event->cpu < 0) |
500 | return -EINVAL; |
501 | |
502 | /* Validate the group */ |
503 | if (!qcom_l3_cache__validate_event_group(event)) |
504 | return -EINVAL; |
505 | |
506 | hwc->idx = -1; |
507 | |
508 | /* |
509 | * Many perf core operations (eg. events rotation) operate on a |
510 | * single CPU context. This is obvious for CPU PMUs, where one |
511 | * expects the same sets of events being observed on all CPUs, |
512 | * but can lead to issues for off-core PMUs, like this one, where |
513 | * each event could be theoretically assigned to a different CPU. |
514 | * To mitigate this, we enforce CPU assignment to one designated |
515 | * processor (the one described in the "cpumask" attribute exported |
516 | * by the PMU device). perf user space tools honor this and avoid |
517 | * opening more than one copy of the events. |
518 | */ |
519 | event->cpu = cpumask_first(srcp: &l3pmu->cpumask); |
520 | |
521 | return 0; |
522 | } |
523 | |
524 | static void qcom_l3_cache__event_start(struct perf_event *event, int flags) |
525 | { |
526 | struct hw_perf_event *hwc = &event->hw; |
527 | const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); |
528 | |
529 | hwc->state = 0; |
530 | ops->start(event); |
531 | } |
532 | |
533 | static void qcom_l3_cache__event_stop(struct perf_event *event, int flags) |
534 | { |
535 | struct hw_perf_event *hwc = &event->hw; |
536 | const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); |
537 | |
538 | if (hwc->state & PERF_HES_STOPPED) |
539 | return; |
540 | |
541 | ops->stop(event, flags); |
542 | if (flags & PERF_EF_UPDATE) |
543 | ops->update(event); |
544 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
545 | } |
546 | |
547 | static int qcom_l3_cache__event_add(struct perf_event *event, int flags) |
548 | { |
549 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
550 | struct hw_perf_event *hwc = &event->hw; |
551 | int order = event_uses_long_counter(event) ? 1 : 0; |
552 | int idx; |
553 | |
554 | /* |
555 | * Try to allocate a counter. |
556 | */ |
557 | idx = bitmap_find_free_region(bitmap: l3pmu->used_mask, L3_NUM_COUNTERS, order); |
558 | if (idx < 0) |
559 | /* The counters are all in use. */ |
560 | return -EAGAIN; |
561 | |
562 | hwc->idx = idx; |
563 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
564 | l3pmu->events[idx] = event; |
565 | |
566 | if (flags & PERF_EF_START) |
567 | qcom_l3_cache__event_start(event, flags: 0); |
568 | |
569 | /* Propagate changes to the userspace mapping. */ |
570 | perf_event_update_userpage(event); |
571 | |
572 | return 0; |
573 | } |
574 | |
575 | static void qcom_l3_cache__event_del(struct perf_event *event, int flags) |
576 | { |
577 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); |
578 | struct hw_perf_event *hwc = &event->hw; |
579 | int order = event_uses_long_counter(event) ? 1 : 0; |
580 | |
581 | /* Stop and clean up */ |
582 | qcom_l3_cache__event_stop(event, flags: flags | PERF_EF_UPDATE); |
583 | l3pmu->events[hwc->idx] = NULL; |
584 | bitmap_release_region(bitmap: l3pmu->used_mask, pos: hwc->idx, order); |
585 | |
586 | /* Propagate changes to the userspace mapping. */ |
587 | perf_event_update_userpage(event); |
588 | } |
589 | |
590 | static void qcom_l3_cache__event_read(struct perf_event *event) |
591 | { |
592 | const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); |
593 | |
594 | ops->update(event); |
595 | } |
596 | |
597 | /* |
598 | * Add sysfs attributes |
599 | * |
600 | * We export: |
601 | * - formats, used by perf user space and other tools to configure events |
602 | * - events, used by perf user space and other tools to create events |
603 | * symbolically, e.g.: |
604 | * perf stat -a -e l3cache_0_0/event=read-miss/ ls |
605 | * perf stat -a -e l3cache_0_0/event=0x21/ ls |
606 | * - cpumask, used by perf user space and other tools to know on which CPUs |
607 | * to open the events |
608 | */ |
609 | |
610 | /* formats */ |
611 | |
612 | static ssize_t l3cache_pmu_format_show(struct device *dev, |
613 | struct device_attribute *attr, char *buf) |
614 | { |
615 | struct dev_ext_attribute *eattr; |
616 | |
617 | eattr = container_of(attr, struct dev_ext_attribute, attr); |
618 | return sysfs_emit(buf, fmt: "%s\n" , (char *) eattr->var); |
619 | } |
620 | |
621 | #define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \ |
622 | (&((struct dev_ext_attribute[]) { \ |
623 | { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \ |
624 | .var = (void *) _config, } \ |
625 | })[0].attr.attr) |
626 | |
627 | static struct attribute *qcom_l3_cache_pmu_formats[] = { |
628 | L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7" ), |
629 | L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)), |
630 | NULL, |
631 | }; |
632 | |
633 | static const struct attribute_group qcom_l3_cache_pmu_format_group = { |
634 | .name = "format" , |
635 | .attrs = qcom_l3_cache_pmu_formats, |
636 | }; |
637 | |
638 | /* events */ |
639 | |
640 | static ssize_t l3cache_pmu_event_show(struct device *dev, |
641 | struct device_attribute *attr, char *page) |
642 | { |
643 | struct perf_pmu_events_attr *pmu_attr; |
644 | |
645 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
646 | return sysfs_emit(buf: page, fmt: "event=0x%02llx\n" , pmu_attr->id); |
647 | } |
648 | |
649 | #define L3CACHE_EVENT_ATTR(_name, _id) \ |
650 | PMU_EVENT_ATTR_ID(_name, l3cache_pmu_event_show, _id) |
651 | |
652 | static struct attribute *qcom_l3_cache_pmu_events[] = { |
653 | L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES), |
654 | L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT), |
655 | L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS), |
656 | L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D), |
657 | L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D), |
658 | L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT), |
659 | L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS), |
660 | NULL |
661 | }; |
662 | |
663 | static const struct attribute_group qcom_l3_cache_pmu_events_group = { |
664 | .name = "events" , |
665 | .attrs = qcom_l3_cache_pmu_events, |
666 | }; |
667 | |
668 | /* cpumask */ |
669 | |
670 | static ssize_t cpumask_show(struct device *dev, |
671 | struct device_attribute *attr, char *buf) |
672 | { |
673 | struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev)); |
674 | |
675 | return cpumap_print_to_pagebuf(list: true, buf, mask: &l3pmu->cpumask); |
676 | } |
677 | |
678 | static DEVICE_ATTR_RO(cpumask); |
679 | |
680 | static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = { |
681 | &dev_attr_cpumask.attr, |
682 | NULL, |
683 | }; |
684 | |
685 | static const struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = { |
686 | .attrs = qcom_l3_cache_pmu_cpumask_attrs, |
687 | }; |
688 | |
689 | /* |
690 | * Per PMU device attribute groups |
691 | */ |
692 | static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = { |
693 | &qcom_l3_cache_pmu_format_group, |
694 | &qcom_l3_cache_pmu_events_group, |
695 | &qcom_l3_cache_pmu_cpumask_attr_group, |
696 | NULL, |
697 | }; |
698 | |
699 | /* |
700 | * Probing functions and data. |
701 | */ |
702 | |
703 | static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) |
704 | { |
705 | struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); |
706 | |
707 | /* If there is not a CPU/PMU association pick this CPU */ |
708 | if (cpumask_empty(srcp: &l3pmu->cpumask)) |
709 | cpumask_set_cpu(cpu, dstp: &l3pmu->cpumask); |
710 | |
711 | return 0; |
712 | } |
713 | |
714 | static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) |
715 | { |
716 | struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); |
717 | unsigned int target; |
718 | |
719 | if (!cpumask_test_and_clear_cpu(cpu, cpumask: &l3pmu->cpumask)) |
720 | return 0; |
721 | target = cpumask_any_but(cpu_online_mask, cpu); |
722 | if (target >= nr_cpu_ids) |
723 | return 0; |
724 | perf_pmu_migrate_context(pmu: &l3pmu->pmu, src_cpu: cpu, dst_cpu: target); |
725 | cpumask_set_cpu(cpu: target, dstp: &l3pmu->cpumask); |
726 | return 0; |
727 | } |
728 | |
729 | static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) |
730 | { |
731 | struct l3cache_pmu *l3pmu; |
732 | struct acpi_device *acpi_dev; |
733 | struct resource *memrc; |
734 | int ret; |
735 | char *name; |
736 | |
737 | /* Initialize the PMU data structures */ |
738 | |
739 | acpi_dev = ACPI_COMPANION(&pdev->dev); |
740 | if (!acpi_dev) |
741 | return -ENODEV; |
742 | |
743 | l3pmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*l3pmu), GFP_KERNEL); |
744 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, fmt: "l3cache_%s_%s" , |
745 | acpi_device_uid(acpi_dev_parent(acpi_dev)), |
746 | acpi_device_uid(acpi_dev)); |
747 | if (!l3pmu || !name) |
748 | return -ENOMEM; |
749 | |
750 | l3pmu->pmu = (struct pmu) { |
751 | .task_ctx_nr = perf_invalid_context, |
752 | |
753 | .pmu_enable = qcom_l3_cache__pmu_enable, |
754 | .pmu_disable = qcom_l3_cache__pmu_disable, |
755 | .event_init = qcom_l3_cache__event_init, |
756 | .add = qcom_l3_cache__event_add, |
757 | .del = qcom_l3_cache__event_del, |
758 | .start = qcom_l3_cache__event_start, |
759 | .stop = qcom_l3_cache__event_stop, |
760 | .read = qcom_l3_cache__event_read, |
761 | |
762 | .attr_groups = qcom_l3_cache_pmu_attr_grps, |
763 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
764 | }; |
765 | |
766 | l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &memrc); |
767 | if (IS_ERR(ptr: l3pmu->regs)) |
768 | return PTR_ERR(ptr: l3pmu->regs); |
769 | |
770 | qcom_l3_cache__init(l3pmu); |
771 | |
772 | ret = platform_get_irq(pdev, 0); |
773 | if (ret <= 0) |
774 | return ret; |
775 | |
776 | ret = devm_request_irq(dev: &pdev->dev, irq: ret, handler: qcom_l3_cache__handle_irq, irqflags: 0, |
777 | devname: name, dev_id: l3pmu); |
778 | if (ret) { |
779 | dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n" , |
780 | &memrc->start); |
781 | return ret; |
782 | } |
783 | |
784 | /* Add this instance to the list used by the offline callback */ |
785 | ret = cpuhp_state_add_instance(state: CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, node: &l3pmu->node); |
786 | if (ret) { |
787 | dev_err(&pdev->dev, "Error %d registering hotplug" , ret); |
788 | return ret; |
789 | } |
790 | |
791 | ret = perf_pmu_register(pmu: &l3pmu->pmu, name, type: -1); |
792 | if (ret < 0) { |
793 | dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n" , ret); |
794 | return ret; |
795 | } |
796 | |
797 | dev_info(&pdev->dev, "Registered %s, type: %d\n" , name, l3pmu->pmu.type); |
798 | |
799 | return 0; |
800 | } |
801 | |
802 | static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = { |
803 | { "QCOM8081" , }, |
804 | { } |
805 | }; |
806 | MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match); |
807 | |
808 | static struct platform_driver qcom_l3_cache_pmu_driver = { |
809 | .driver = { |
810 | .name = "qcom-l3cache-pmu" , |
811 | .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), |
812 | .suppress_bind_attrs = true, |
813 | }, |
814 | .probe = qcom_l3_cache_pmu_probe, |
815 | }; |
816 | |
817 | static int __init register_qcom_l3_cache_pmu_driver(void) |
818 | { |
819 | int ret; |
820 | |
821 | /* Install a hook to update the reader CPU in case it goes offline */ |
822 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, |
823 | name: "perf/qcom/l3cache:online" , |
824 | startup: qcom_l3_cache_pmu_online_cpu, |
825 | teardown: qcom_l3_cache_pmu_offline_cpu); |
826 | if (ret) |
827 | return ret; |
828 | |
829 | return platform_driver_register(&qcom_l3_cache_pmu_driver); |
830 | } |
831 | device_initcall(register_qcom_l3_cache_pmu_driver); |
832 | |