1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Perf support for the Statistical Profiling Extension, introduced as |
4 | * part of ARMv8.2. |
5 | * |
6 | * Copyright (C) 2016 ARM Limited |
7 | * |
8 | * Author: Will Deacon <will.deacon@arm.com> |
9 | */ |
10 | |
11 | #define PMUNAME "arm_spe" |
12 | #define DRVNAME PMUNAME "_pmu" |
13 | #define pr_fmt(fmt) DRVNAME ": " fmt |
14 | |
15 | #include <linux/bitfield.h> |
16 | #include <linux/bitops.h> |
17 | #include <linux/bug.h> |
18 | #include <linux/capability.h> |
19 | #include <linux/cpuhotplug.h> |
20 | #include <linux/cpumask.h> |
21 | #include <linux/device.h> |
22 | #include <linux/errno.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/irq.h> |
25 | #include <linux/kernel.h> |
26 | #include <linux/list.h> |
27 | #include <linux/module.h> |
28 | #include <linux/of.h> |
29 | #include <linux/perf_event.h> |
30 | #include <linux/perf/arm_pmu.h> |
31 | #include <linux/platform_device.h> |
32 | #include <linux/printk.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/smp.h> |
35 | #include <linux/vmalloc.h> |
36 | |
37 | #include <asm/barrier.h> |
38 | #include <asm/cpufeature.h> |
39 | #include <asm/mmu.h> |
40 | #include <asm/sysreg.h> |
41 | |
42 | /* |
43 | * Cache if the event is allowed to trace Context information. |
44 | * This allows us to perform the check, i.e, perfmon_capable(), |
45 | * in the context of the event owner, once, during the event_init(). |
46 | */ |
47 | #define SPE_PMU_HW_FLAGS_CX 0x00001 |
48 | |
49 | static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX); |
50 | |
51 | static void set_spe_event_has_cx(struct perf_event *event) |
52 | { |
53 | if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) |
54 | event->hw.flags |= SPE_PMU_HW_FLAGS_CX; |
55 | } |
56 | |
57 | static bool get_spe_event_has_cx(struct perf_event *event) |
58 | { |
59 | return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); |
60 | } |
61 | |
62 | #define ARM_SPE_BUF_PAD_BYTE 0 |
63 | |
64 | struct arm_spe_pmu_buf { |
65 | int nr_pages; |
66 | bool snapshot; |
67 | void *base; |
68 | }; |
69 | |
70 | struct arm_spe_pmu { |
71 | struct pmu pmu; |
72 | struct platform_device *pdev; |
73 | cpumask_t supported_cpus; |
74 | struct hlist_node hotplug_node; |
75 | |
76 | int irq; /* PPI */ |
77 | u16 pmsver; |
78 | u16 min_period; |
79 | u16 counter_sz; |
80 | |
81 | #define SPE_PMU_FEAT_FILT_EVT (1UL << 0) |
82 | #define SPE_PMU_FEAT_FILT_TYP (1UL << 1) |
83 | #define SPE_PMU_FEAT_FILT_LAT (1UL << 2) |
84 | #define SPE_PMU_FEAT_ARCH_INST (1UL << 3) |
85 | #define SPE_PMU_FEAT_LDS (1UL << 4) |
86 | #define SPE_PMU_FEAT_ERND (1UL << 5) |
87 | #define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6) |
88 | #define SPE_PMU_FEAT_DEV_PROBED (1UL << 63) |
89 | u64 features; |
90 | |
91 | u16 max_record_sz; |
92 | u16 align; |
93 | struct perf_output_handle __percpu *handle; |
94 | }; |
95 | |
96 | #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu)) |
97 | |
98 | /* Convert a free-running index from perf into an SPE buffer offset */ |
99 | #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) |
100 | |
101 | /* Keep track of our dynamic hotplug state */ |
102 | static enum cpuhp_state arm_spe_pmu_online; |
103 | |
104 | enum arm_spe_pmu_buf_fault_action { |
105 | SPE_PMU_BUF_FAULT_ACT_SPURIOUS, |
106 | SPE_PMU_BUF_FAULT_ACT_FATAL, |
107 | SPE_PMU_BUF_FAULT_ACT_OK, |
108 | }; |
109 | |
110 | /* This sysfs gunk was really good fun to write. */ |
111 | enum arm_spe_pmu_capabilities { |
112 | SPE_PMU_CAP_ARCH_INST = 0, |
113 | SPE_PMU_CAP_ERND, |
114 | SPE_PMU_CAP_FEAT_MAX, |
115 | SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX, |
116 | SPE_PMU_CAP_MIN_IVAL, |
117 | }; |
118 | |
119 | static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = { |
120 | [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST, |
121 | [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND, |
122 | }; |
123 | |
124 | static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap) |
125 | { |
126 | if (cap < SPE_PMU_CAP_FEAT_MAX) |
127 | return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]); |
128 | |
129 | switch (cap) { |
130 | case SPE_PMU_CAP_CNT_SZ: |
131 | return spe_pmu->counter_sz; |
132 | case SPE_PMU_CAP_MIN_IVAL: |
133 | return spe_pmu->min_period; |
134 | default: |
135 | WARN(1, "unknown cap %d\n" , cap); |
136 | } |
137 | |
138 | return 0; |
139 | } |
140 | |
141 | static ssize_t arm_spe_pmu_cap_show(struct device *dev, |
142 | struct device_attribute *attr, |
143 | char *buf) |
144 | { |
145 | struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); |
146 | struct dev_ext_attribute *ea = |
147 | container_of(attr, struct dev_ext_attribute, attr); |
148 | int cap = (long)ea->var; |
149 | |
150 | return sysfs_emit(buf, fmt: "%u\n" , arm_spe_pmu_cap_get(spe_pmu, cap)); |
151 | } |
152 | |
153 | #define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \ |
154 | &((struct dev_ext_attribute[]) { \ |
155 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \ |
156 | })[0].attr.attr |
157 | |
158 | #define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \ |
159 | SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var) |
160 | |
161 | static struct attribute *arm_spe_pmu_cap_attr[] = { |
162 | SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST), |
163 | SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND), |
164 | SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ), |
165 | SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL), |
166 | NULL, |
167 | }; |
168 | |
169 | static const struct attribute_group arm_spe_pmu_cap_group = { |
170 | .name = "caps" , |
171 | .attrs = arm_spe_pmu_cap_attr, |
172 | }; |
173 | |
174 | /* User ABI */ |
175 | #define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */ |
176 | #define ATTR_CFG_FLD_ts_enable_LO 0 |
177 | #define ATTR_CFG_FLD_ts_enable_HI 0 |
178 | #define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */ |
179 | #define ATTR_CFG_FLD_pa_enable_LO 1 |
180 | #define ATTR_CFG_FLD_pa_enable_HI 1 |
181 | #define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */ |
182 | #define ATTR_CFG_FLD_pct_enable_LO 2 |
183 | #define ATTR_CFG_FLD_pct_enable_HI 2 |
184 | #define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */ |
185 | #define ATTR_CFG_FLD_jitter_LO 16 |
186 | #define ATTR_CFG_FLD_jitter_HI 16 |
187 | #define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */ |
188 | #define ATTR_CFG_FLD_branch_filter_LO 32 |
189 | #define ATTR_CFG_FLD_branch_filter_HI 32 |
190 | #define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */ |
191 | #define ATTR_CFG_FLD_load_filter_LO 33 |
192 | #define ATTR_CFG_FLD_load_filter_HI 33 |
193 | #define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */ |
194 | #define ATTR_CFG_FLD_store_filter_LO 34 |
195 | #define ATTR_CFG_FLD_store_filter_HI 34 |
196 | |
197 | #define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */ |
198 | #define ATTR_CFG_FLD_event_filter_LO 0 |
199 | #define ATTR_CFG_FLD_event_filter_HI 63 |
200 | |
201 | #define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */ |
202 | #define ATTR_CFG_FLD_min_latency_LO 0 |
203 | #define ATTR_CFG_FLD_min_latency_HI 11 |
204 | |
205 | #define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */ |
206 | #define ATTR_CFG_FLD_inv_event_filter_LO 0 |
207 | #define ATTR_CFG_FLD_inv_event_filter_HI 63 |
208 | |
209 | /* Why does everything I do descend into this? */ |
210 | #define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ |
211 | (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi |
212 | |
213 | #define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ |
214 | __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) |
215 | |
216 | #define GEN_PMU_FORMAT_ATTR(name) \ |
217 | PMU_FORMAT_ATTR(name, \ |
218 | _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ |
219 | ATTR_CFG_FLD_##name##_LO, \ |
220 | ATTR_CFG_FLD_##name##_HI)) |
221 | |
222 | #define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ |
223 | ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0)) |
224 | |
225 | #define ATTR_CFG_GET_FLD(attr, name) \ |
226 | _ATTR_CFG_GET_FLD(attr, \ |
227 | ATTR_CFG_FLD_##name##_CFG, \ |
228 | ATTR_CFG_FLD_##name##_LO, \ |
229 | ATTR_CFG_FLD_##name##_HI) |
230 | |
231 | GEN_PMU_FORMAT_ATTR(ts_enable); |
232 | GEN_PMU_FORMAT_ATTR(pa_enable); |
233 | GEN_PMU_FORMAT_ATTR(pct_enable); |
234 | GEN_PMU_FORMAT_ATTR(jitter); |
235 | GEN_PMU_FORMAT_ATTR(branch_filter); |
236 | GEN_PMU_FORMAT_ATTR(load_filter); |
237 | GEN_PMU_FORMAT_ATTR(store_filter); |
238 | GEN_PMU_FORMAT_ATTR(event_filter); |
239 | GEN_PMU_FORMAT_ATTR(inv_event_filter); |
240 | GEN_PMU_FORMAT_ATTR(min_latency); |
241 | |
242 | static struct attribute *arm_spe_pmu_formats_attr[] = { |
243 | &format_attr_ts_enable.attr, |
244 | &format_attr_pa_enable.attr, |
245 | &format_attr_pct_enable.attr, |
246 | &format_attr_jitter.attr, |
247 | &format_attr_branch_filter.attr, |
248 | &format_attr_load_filter.attr, |
249 | &format_attr_store_filter.attr, |
250 | &format_attr_event_filter.attr, |
251 | &format_attr_inv_event_filter.attr, |
252 | &format_attr_min_latency.attr, |
253 | NULL, |
254 | }; |
255 | |
256 | static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj, |
257 | struct attribute *attr, |
258 | int unused) |
259 | { |
260 | struct device *dev = kobj_to_dev(kobj); |
261 | struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); |
262 | |
263 | if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) |
264 | return 0; |
265 | |
266 | return attr->mode; |
267 | } |
268 | |
269 | static const struct attribute_group arm_spe_pmu_format_group = { |
270 | .name = "format" , |
271 | .is_visible = arm_spe_pmu_format_attr_is_visible, |
272 | .attrs = arm_spe_pmu_formats_attr, |
273 | }; |
274 | |
275 | static ssize_t cpumask_show(struct device *dev, |
276 | struct device_attribute *attr, char *buf) |
277 | { |
278 | struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); |
279 | |
280 | return cpumap_print_to_pagebuf(list: true, buf, mask: &spe_pmu->supported_cpus); |
281 | } |
282 | static DEVICE_ATTR_RO(cpumask); |
283 | |
284 | static struct attribute *arm_spe_pmu_attrs[] = { |
285 | &dev_attr_cpumask.attr, |
286 | NULL, |
287 | }; |
288 | |
289 | static const struct attribute_group arm_spe_pmu_group = { |
290 | .attrs = arm_spe_pmu_attrs, |
291 | }; |
292 | |
293 | static const struct attribute_group *arm_spe_pmu_attr_groups[] = { |
294 | &arm_spe_pmu_group, |
295 | &arm_spe_pmu_cap_group, |
296 | &arm_spe_pmu_format_group, |
297 | NULL, |
298 | }; |
299 | |
300 | /* Convert between user ABI and register values */ |
301 | static u64 arm_spe_event_to_pmscr(struct perf_event *event) |
302 | { |
303 | struct perf_event_attr *attr = &event->attr; |
304 | u64 reg = 0; |
305 | |
306 | reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable)); |
307 | reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable)); |
308 | reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable)); |
309 | |
310 | if (!attr->exclude_user) |
311 | reg |= PMSCR_EL1_E0SPE; |
312 | |
313 | if (!attr->exclude_kernel) |
314 | reg |= PMSCR_EL1_E1SPE; |
315 | |
316 | if (get_spe_event_has_cx(event)) |
317 | reg |= PMSCR_EL1_CX; |
318 | |
319 | return reg; |
320 | } |
321 | |
322 | static void arm_spe_event_sanitise_period(struct perf_event *event) |
323 | { |
324 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); |
325 | u64 period = event->hw.sample_period; |
326 | u64 max_period = PMSIRR_EL1_INTERVAL_MASK; |
327 | |
328 | if (period < spe_pmu->min_period) |
329 | period = spe_pmu->min_period; |
330 | else if (period > max_period) |
331 | period = max_period; |
332 | else |
333 | period &= max_period; |
334 | |
335 | event->hw.sample_period = period; |
336 | } |
337 | |
338 | static u64 arm_spe_event_to_pmsirr(struct perf_event *event) |
339 | { |
340 | struct perf_event_attr *attr = &event->attr; |
341 | u64 reg = 0; |
342 | |
343 | arm_spe_event_sanitise_period(event); |
344 | |
345 | reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter)); |
346 | reg |= event->hw.sample_period; |
347 | |
348 | return reg; |
349 | } |
350 | |
351 | static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) |
352 | { |
353 | struct perf_event_attr *attr = &event->attr; |
354 | u64 reg = 0; |
355 | |
356 | reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter)); |
357 | reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter)); |
358 | reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter)); |
359 | |
360 | if (reg) |
361 | reg |= PMSFCR_EL1_FT; |
362 | |
363 | if (ATTR_CFG_GET_FLD(attr, event_filter)) |
364 | reg |= PMSFCR_EL1_FE; |
365 | |
366 | if (ATTR_CFG_GET_FLD(attr, inv_event_filter)) |
367 | reg |= PMSFCR_EL1_FnE; |
368 | |
369 | if (ATTR_CFG_GET_FLD(attr, min_latency)) |
370 | reg |= PMSFCR_EL1_FL; |
371 | |
372 | return reg; |
373 | } |
374 | |
375 | static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) |
376 | { |
377 | struct perf_event_attr *attr = &event->attr; |
378 | return ATTR_CFG_GET_FLD(attr, event_filter); |
379 | } |
380 | |
381 | static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event) |
382 | { |
383 | struct perf_event_attr *attr = &event->attr; |
384 | return ATTR_CFG_GET_FLD(attr, inv_event_filter); |
385 | } |
386 | |
387 | static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) |
388 | { |
389 | struct perf_event_attr *attr = &event->attr; |
390 | return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency)); |
391 | } |
392 | |
393 | static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) |
394 | { |
395 | struct arm_spe_pmu_buf *buf = perf_get_aux(handle); |
396 | u64 head = PERF_IDX2OFF(handle->head, buf); |
397 | |
398 | memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len); |
399 | if (!buf->snapshot) |
400 | perf_aux_output_skip(handle, size: len); |
401 | } |
402 | |
403 | static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle) |
404 | { |
405 | struct arm_spe_pmu_buf *buf = perf_get_aux(handle); |
406 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); |
407 | u64 head = PERF_IDX2OFF(handle->head, buf); |
408 | u64 limit = buf->nr_pages * PAGE_SIZE; |
409 | |
410 | /* |
411 | * The trace format isn't parseable in reverse, so clamp |
412 | * the limit to half of the buffer size in snapshot mode |
413 | * so that the worst case is half a buffer of records, as |
414 | * opposed to a single record. |
415 | */ |
416 | if (head < limit >> 1) |
417 | limit >>= 1; |
418 | |
419 | /* |
420 | * If we're within max_record_sz of the limit, we must |
421 | * pad, move the head index and recompute the limit. |
422 | */ |
423 | if (limit - head < spe_pmu->max_record_sz) { |
424 | arm_spe_pmu_pad_buf(handle, len: limit - head); |
425 | handle->head = PERF_IDX2OFF(limit, buf); |
426 | limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head; |
427 | } |
428 | |
429 | return limit; |
430 | } |
431 | |
432 | static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle) |
433 | { |
434 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); |
435 | struct arm_spe_pmu_buf *buf = perf_get_aux(handle); |
436 | const u64 bufsize = buf->nr_pages * PAGE_SIZE; |
437 | u64 limit = bufsize; |
438 | u64 head, tail, wakeup; |
439 | |
440 | /* |
441 | * The head can be misaligned for two reasons: |
442 | * |
443 | * 1. The hardware left PMBPTR pointing to the first byte after |
444 | * a record when generating a buffer management event. |
445 | * |
446 | * 2. We used perf_aux_output_skip to consume handle->size bytes |
447 | * and CIRC_SPACE was used to compute the size, which always |
448 | * leaves one entry free. |
449 | * |
450 | * Deal with this by padding to the next alignment boundary and |
451 | * moving the head index. If we run out of buffer space, we'll |
452 | * reduce handle->size to zero and end up reporting truncation. |
453 | */ |
454 | head = PERF_IDX2OFF(handle->head, buf); |
455 | if (!IS_ALIGNED(head, spe_pmu->align)) { |
456 | unsigned long delta = roundup(head, spe_pmu->align) - head; |
457 | |
458 | delta = min(delta, handle->size); |
459 | arm_spe_pmu_pad_buf(handle, len: delta); |
460 | head = PERF_IDX2OFF(handle->head, buf); |
461 | } |
462 | |
463 | /* If we've run out of free space, then nothing more to do */ |
464 | if (!handle->size) |
465 | goto no_space; |
466 | |
467 | /* Compute the tail and wakeup indices now that we've aligned head */ |
468 | tail = PERF_IDX2OFF(handle->head + handle->size, buf); |
469 | wakeup = PERF_IDX2OFF(handle->wakeup, buf); |
470 | |
471 | /* |
472 | * Avoid clobbering unconsumed data. We know we have space, so |
473 | * if we see head == tail we know that the buffer is empty. If |
474 | * head > tail, then there's nothing to clobber prior to |
475 | * wrapping. |
476 | */ |
477 | if (head < tail) |
478 | limit = round_down(tail, PAGE_SIZE); |
479 | |
480 | /* |
481 | * Wakeup may be arbitrarily far into the future. If it's not in |
482 | * the current generation, either we'll wrap before hitting it, |
483 | * or it's in the past and has been handled already. |
484 | * |
485 | * If there's a wakeup before we wrap, arrange to be woken up by |
486 | * the page boundary following it. Keep the tail boundary if |
487 | * that's lower. |
488 | */ |
489 | if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) |
490 | limit = min(limit, round_up(wakeup, PAGE_SIZE)); |
491 | |
492 | if (limit > head) |
493 | return limit; |
494 | |
495 | arm_spe_pmu_pad_buf(handle, len: handle->size); |
496 | no_space: |
497 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
498 | perf_aux_output_end(handle, size: 0); |
499 | return 0; |
500 | } |
501 | |
502 | static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle) |
503 | { |
504 | struct arm_spe_pmu_buf *buf = perf_get_aux(handle); |
505 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); |
506 | u64 limit = __arm_spe_pmu_next_off(handle); |
507 | u64 head = PERF_IDX2OFF(handle->head, buf); |
508 | |
509 | /* |
510 | * If the head has come too close to the end of the buffer, |
511 | * then pad to the end and recompute the limit. |
512 | */ |
513 | if (limit && (limit - head < spe_pmu->max_record_sz)) { |
514 | arm_spe_pmu_pad_buf(handle, len: limit - head); |
515 | limit = __arm_spe_pmu_next_off(handle); |
516 | } |
517 | |
518 | return limit; |
519 | } |
520 | |
521 | static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, |
522 | struct perf_event *event) |
523 | { |
524 | u64 base, limit; |
525 | struct arm_spe_pmu_buf *buf; |
526 | |
527 | /* Start a new aux session */ |
528 | buf = perf_aux_output_begin(handle, event); |
529 | if (!buf) { |
530 | event->hw.state |= PERF_HES_STOPPED; |
531 | /* |
532 | * We still need to clear the limit pointer, since the |
533 | * profiler might only be disabled by virtue of a fault. |
534 | */ |
535 | limit = 0; |
536 | goto out_write_limit; |
537 | } |
538 | |
539 | limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) |
540 | : arm_spe_pmu_next_off(handle); |
541 | if (limit) |
542 | limit |= PMBLIMITR_EL1_E; |
543 | |
544 | limit += (u64)buf->base; |
545 | base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); |
546 | write_sysreg_s(base, SYS_PMBPTR_EL1); |
547 | |
548 | out_write_limit: |
549 | write_sysreg_s(limit, SYS_PMBLIMITR_EL1); |
550 | } |
551 | |
552 | static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle) |
553 | { |
554 | struct arm_spe_pmu_buf *buf = perf_get_aux(handle); |
555 | u64 offset, size; |
556 | |
557 | offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base; |
558 | size = offset - PERF_IDX2OFF(handle->head, buf); |
559 | |
560 | if (buf->snapshot) |
561 | handle->head = offset; |
562 | |
563 | perf_aux_output_end(handle, size); |
564 | } |
565 | |
566 | static void arm_spe_pmu_disable_and_drain_local(void) |
567 | { |
568 | /* Disable profiling at EL0 and EL1 */ |
569 | write_sysreg_s(0, SYS_PMSCR_EL1); |
570 | isb(); |
571 | |
572 | /* Drain any buffered data */ |
573 | psb_csync(); |
574 | dsb(nsh); |
575 | |
576 | /* Disable the profiling buffer */ |
577 | write_sysreg_s(0, SYS_PMBLIMITR_EL1); |
578 | isb(); |
579 | } |
580 | |
581 | /* IRQ handling */ |
582 | static enum arm_spe_pmu_buf_fault_action |
583 | arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) |
584 | { |
585 | const char *err_str; |
586 | u64 pmbsr; |
587 | enum arm_spe_pmu_buf_fault_action ret; |
588 | |
589 | /* |
590 | * Ensure new profiling data is visible to the CPU and any external |
591 | * aborts have been resolved. |
592 | */ |
593 | psb_csync(); |
594 | dsb(nsh); |
595 | |
596 | /* Ensure hardware updates to PMBPTR_EL1 are visible */ |
597 | isb(); |
598 | |
599 | /* Service required? */ |
600 | pmbsr = read_sysreg_s(SYS_PMBSR_EL1); |
601 | if (!FIELD_GET(PMBSR_EL1_S, pmbsr)) |
602 | return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; |
603 | |
604 | /* |
605 | * If we've lost data, disable profiling and also set the PARTIAL |
606 | * flag to indicate that the last record is corrupted. |
607 | */ |
608 | if (FIELD_GET(PMBSR_EL1_DL, pmbsr)) |
609 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | |
610 | PERF_AUX_FLAG_PARTIAL); |
611 | |
612 | /* Report collisions to userspace so that it can up the period */ |
613 | if (FIELD_GET(PMBSR_EL1_COLL, pmbsr)) |
614 | perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); |
615 | |
616 | /* We only expect buffer management events */ |
617 | switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) { |
618 | case PMBSR_EL1_EC_BUF: |
619 | /* Handled below */ |
620 | break; |
621 | case PMBSR_EL1_EC_FAULT_S1: |
622 | case PMBSR_EL1_EC_FAULT_S2: |
623 | err_str = "Unexpected buffer fault" ; |
624 | goto out_err; |
625 | default: |
626 | err_str = "Unknown error code" ; |
627 | goto out_err; |
628 | } |
629 | |
630 | /* Buffer management event */ |
631 | switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) { |
632 | case PMBSR_EL1_BUF_BSC_FULL: |
633 | ret = SPE_PMU_BUF_FAULT_ACT_OK; |
634 | goto out_stop; |
635 | default: |
636 | err_str = "Unknown buffer status code" ; |
637 | } |
638 | |
639 | out_err: |
640 | pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n" , |
641 | err_str, smp_processor_id(), pmbsr, |
642 | read_sysreg_s(SYS_PMBPTR_EL1), |
643 | read_sysreg_s(SYS_PMBLIMITR_EL1)); |
644 | ret = SPE_PMU_BUF_FAULT_ACT_FATAL; |
645 | |
646 | out_stop: |
647 | arm_spe_perf_aux_output_end(handle); |
648 | return ret; |
649 | } |
650 | |
651 | static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev) |
652 | { |
653 | struct perf_output_handle *handle = dev; |
654 | struct perf_event *event = handle->event; |
655 | enum arm_spe_pmu_buf_fault_action act; |
656 | |
657 | if (!perf_get_aux(handle)) |
658 | return IRQ_NONE; |
659 | |
660 | act = arm_spe_pmu_buf_get_fault_act(handle); |
661 | if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) |
662 | return IRQ_NONE; |
663 | |
664 | /* |
665 | * Ensure perf callbacks have completed, which may disable the |
666 | * profiling buffer in response to a TRUNCATION flag. |
667 | */ |
668 | irq_work_run(); |
669 | |
670 | switch (act) { |
671 | case SPE_PMU_BUF_FAULT_ACT_FATAL: |
672 | /* |
673 | * If a fatal exception occurred then leaving the profiling |
674 | * buffer enabled is a recipe waiting to happen. Since |
675 | * fatal faults don't always imply truncation, make sure |
676 | * that the profiling buffer is disabled explicitly before |
677 | * clearing the syndrome register. |
678 | */ |
679 | arm_spe_pmu_disable_and_drain_local(); |
680 | break; |
681 | case SPE_PMU_BUF_FAULT_ACT_OK: |
682 | /* |
683 | * We handled the fault (the buffer was full), so resume |
684 | * profiling as long as we didn't detect truncation. |
685 | * PMBPTR might be misaligned, but we'll burn that bridge |
686 | * when we get to it. |
687 | */ |
688 | if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) { |
689 | arm_spe_perf_aux_output_begin(handle, event); |
690 | isb(); |
691 | } |
692 | break; |
693 | case SPE_PMU_BUF_FAULT_ACT_SPURIOUS: |
694 | /* We've seen you before, but GCC has the memory of a sieve. */ |
695 | break; |
696 | } |
697 | |
698 | /* The buffer pointers are now sane, so resume profiling. */ |
699 | write_sysreg_s(0, SYS_PMBSR_EL1); |
700 | return IRQ_HANDLED; |
701 | } |
702 | |
703 | static u64 arm_spe_pmsevfr_res0(u16 pmsver) |
704 | { |
705 | switch (pmsver) { |
706 | case ID_AA64DFR0_EL1_PMSVer_IMP: |
707 | return PMSEVFR_EL1_RES0_IMP; |
708 | case ID_AA64DFR0_EL1_PMSVer_V1P1: |
709 | return PMSEVFR_EL1_RES0_V1P1; |
710 | case ID_AA64DFR0_EL1_PMSVer_V1P2: |
711 | /* Return the highest version we support in default */ |
712 | default: |
713 | return PMSEVFR_EL1_RES0_V1P2; |
714 | } |
715 | } |
716 | |
717 | /* Perf callbacks */ |
718 | static int arm_spe_pmu_event_init(struct perf_event *event) |
719 | { |
720 | u64 reg; |
721 | struct perf_event_attr *attr = &event->attr; |
722 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); |
723 | |
724 | /* This is, of course, deeply driver-specific */ |
725 | if (attr->type != event->pmu->type) |
726 | return -ENOENT; |
727 | |
728 | if (event->cpu >= 0 && |
729 | !cpumask_test_cpu(cpu: event->cpu, cpumask: &spe_pmu->supported_cpus)) |
730 | return -ENOENT; |
731 | |
732 | if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(pmsver: spe_pmu->pmsver)) |
733 | return -EOPNOTSUPP; |
734 | |
735 | if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(pmsver: spe_pmu->pmsver)) |
736 | return -EOPNOTSUPP; |
737 | |
738 | if (attr->exclude_idle) |
739 | return -EOPNOTSUPP; |
740 | |
741 | /* |
742 | * Feedback-directed frequency throttling doesn't work when we |
743 | * have a buffer of samples. We'd need to manually count the |
744 | * samples in the buffer when it fills up and adjust the event |
745 | * count to reflect that. Instead, just force the user to specify |
746 | * a sample period. |
747 | */ |
748 | if (attr->freq) |
749 | return -EINVAL; |
750 | |
751 | reg = arm_spe_event_to_pmsfcr(event); |
752 | if ((FIELD_GET(PMSFCR_EL1_FE, reg)) && |
753 | !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) |
754 | return -EOPNOTSUPP; |
755 | |
756 | if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) && |
757 | !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) |
758 | return -EOPNOTSUPP; |
759 | |
760 | if ((FIELD_GET(PMSFCR_EL1_FT, reg)) && |
761 | !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) |
762 | return -EOPNOTSUPP; |
763 | |
764 | if ((FIELD_GET(PMSFCR_EL1_FL, reg)) && |
765 | !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) |
766 | return -EOPNOTSUPP; |
767 | |
768 | set_spe_event_has_cx(event); |
769 | reg = arm_spe_event_to_pmscr(event); |
770 | if (!perfmon_capable() && |
771 | (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))) |
772 | return -EACCES; |
773 | |
774 | return 0; |
775 | } |
776 | |
777 | static void arm_spe_pmu_start(struct perf_event *event, int flags) |
778 | { |
779 | u64 reg; |
780 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); |
781 | struct hw_perf_event *hwc = &event->hw; |
782 | struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); |
783 | |
784 | hwc->state = 0; |
785 | arm_spe_perf_aux_output_begin(handle, event); |
786 | if (hwc->state) |
787 | return; |
788 | |
789 | reg = arm_spe_event_to_pmsfcr(event); |
790 | write_sysreg_s(reg, SYS_PMSFCR_EL1); |
791 | |
792 | reg = arm_spe_event_to_pmsevfr(event); |
793 | write_sysreg_s(reg, SYS_PMSEVFR_EL1); |
794 | |
795 | if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) { |
796 | reg = arm_spe_event_to_pmsnevfr(event); |
797 | write_sysreg_s(reg, SYS_PMSNEVFR_EL1); |
798 | } |
799 | |
800 | reg = arm_spe_event_to_pmslatfr(event); |
801 | write_sysreg_s(reg, SYS_PMSLATFR_EL1); |
802 | |
803 | if (flags & PERF_EF_RELOAD) { |
804 | reg = arm_spe_event_to_pmsirr(event); |
805 | write_sysreg_s(reg, SYS_PMSIRR_EL1); |
806 | isb(); |
807 | reg = local64_read(&hwc->period_left); |
808 | write_sysreg_s(reg, SYS_PMSICR_EL1); |
809 | } |
810 | |
811 | reg = arm_spe_event_to_pmscr(event); |
812 | isb(); |
813 | write_sysreg_s(reg, SYS_PMSCR_EL1); |
814 | } |
815 | |
816 | static void arm_spe_pmu_stop(struct perf_event *event, int flags) |
817 | { |
818 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); |
819 | struct hw_perf_event *hwc = &event->hw; |
820 | struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); |
821 | |
822 | /* If we're already stopped, then nothing to do */ |
823 | if (hwc->state & PERF_HES_STOPPED) |
824 | return; |
825 | |
826 | /* Stop all trace generation */ |
827 | arm_spe_pmu_disable_and_drain_local(); |
828 | |
829 | if (flags & PERF_EF_UPDATE) { |
830 | /* |
831 | * If there's a fault pending then ensure we contain it |
832 | * to this buffer, since we might be on the context-switch |
833 | * path. |
834 | */ |
835 | if (perf_get_aux(handle)) { |
836 | enum arm_spe_pmu_buf_fault_action act; |
837 | |
838 | act = arm_spe_pmu_buf_get_fault_act(handle); |
839 | if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) |
840 | arm_spe_perf_aux_output_end(handle); |
841 | else |
842 | write_sysreg_s(0, SYS_PMBSR_EL1); |
843 | } |
844 | |
845 | /* |
846 | * This may also contain ECOUNT, but nobody else should |
847 | * be looking at period_left, since we forbid frequency |
848 | * based sampling. |
849 | */ |
850 | local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1)); |
851 | hwc->state |= PERF_HES_UPTODATE; |
852 | } |
853 | |
854 | hwc->state |= PERF_HES_STOPPED; |
855 | } |
856 | |
857 | static int arm_spe_pmu_add(struct perf_event *event, int flags) |
858 | { |
859 | int ret = 0; |
860 | struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); |
861 | struct hw_perf_event *hwc = &event->hw; |
862 | int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu; |
863 | |
864 | if (!cpumask_test_cpu(cpu, cpumask: &spe_pmu->supported_cpus)) |
865 | return -ENOENT; |
866 | |
867 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
868 | |
869 | if (flags & PERF_EF_START) { |
870 | arm_spe_pmu_start(event, PERF_EF_RELOAD); |
871 | if (hwc->state & PERF_HES_STOPPED) |
872 | ret = -EINVAL; |
873 | } |
874 | |
875 | return ret; |
876 | } |
877 | |
878 | static void arm_spe_pmu_del(struct perf_event *event, int flags) |
879 | { |
880 | arm_spe_pmu_stop(event, PERF_EF_UPDATE); |
881 | } |
882 | |
883 | static void arm_spe_pmu_read(struct perf_event *event) |
884 | { |
885 | } |
886 | |
887 | static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, |
888 | int nr_pages, bool snapshot) |
889 | { |
890 | int i, cpu = event->cpu; |
891 | struct page **pglist; |
892 | struct arm_spe_pmu_buf *buf; |
893 | |
894 | /* We need at least two pages for this to work. */ |
895 | if (nr_pages < 2) |
896 | return NULL; |
897 | |
898 | /* |
899 | * We require an even number of pages for snapshot mode, so that |
900 | * we can effectively treat the buffer as consisting of two equal |
901 | * parts and give userspace a fighting chance of getting some |
902 | * useful data out of it. |
903 | */ |
904 | if (snapshot && (nr_pages & 1)) |
905 | return NULL; |
906 | |
907 | if (cpu == -1) |
908 | cpu = raw_smp_processor_id(); |
909 | |
910 | buf = kzalloc_node(size: sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu)); |
911 | if (!buf) |
912 | return NULL; |
913 | |
914 | pglist = kcalloc(n: nr_pages, size: sizeof(*pglist), GFP_KERNEL); |
915 | if (!pglist) |
916 | goto out_free_buf; |
917 | |
918 | for (i = 0; i < nr_pages; ++i) |
919 | pglist[i] = virt_to_page(pages[i]); |
920 | |
921 | buf->base = vmap(pages: pglist, count: nr_pages, VM_MAP, PAGE_KERNEL); |
922 | if (!buf->base) |
923 | goto out_free_pglist; |
924 | |
925 | buf->nr_pages = nr_pages; |
926 | buf->snapshot = snapshot; |
927 | |
928 | kfree(objp: pglist); |
929 | return buf; |
930 | |
931 | out_free_pglist: |
932 | kfree(objp: pglist); |
933 | out_free_buf: |
934 | kfree(objp: buf); |
935 | return NULL; |
936 | } |
937 | |
938 | static void arm_spe_pmu_free_aux(void *aux) |
939 | { |
940 | struct arm_spe_pmu_buf *buf = aux; |
941 | |
942 | vunmap(addr: buf->base); |
943 | kfree(objp: buf); |
944 | } |
945 | |
946 | /* Initialisation and teardown functions */ |
947 | static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) |
948 | { |
949 | static atomic_t pmu_idx = ATOMIC_INIT(-1); |
950 | |
951 | int idx; |
952 | char *name; |
953 | struct device *dev = &spe_pmu->pdev->dev; |
954 | |
955 | spe_pmu->pmu = (struct pmu) { |
956 | .module = THIS_MODULE, |
957 | .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, |
958 | .attr_groups = arm_spe_pmu_attr_groups, |
959 | /* |
960 | * We hitch a ride on the software context here, so that |
961 | * we can support per-task profiling (which is not possible |
962 | * with the invalid context as it doesn't get sched callbacks). |
963 | * This requires that userspace either uses a dummy event for |
964 | * perf_event_open, since the aux buffer is not setup until |
965 | * a subsequent mmap, or creates the profiling event in a |
966 | * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it |
967 | * once the buffer has been created. |
968 | */ |
969 | .task_ctx_nr = perf_sw_context, |
970 | .event_init = arm_spe_pmu_event_init, |
971 | .add = arm_spe_pmu_add, |
972 | .del = arm_spe_pmu_del, |
973 | .start = arm_spe_pmu_start, |
974 | .stop = arm_spe_pmu_stop, |
975 | .read = arm_spe_pmu_read, |
976 | .setup_aux = arm_spe_pmu_setup_aux, |
977 | .free_aux = arm_spe_pmu_free_aux, |
978 | }; |
979 | |
980 | idx = atomic_inc_return(v: &pmu_idx); |
981 | name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s_%d" , PMUNAME, idx); |
982 | if (!name) { |
983 | dev_err(dev, "failed to allocate name for pmu %d\n" , idx); |
984 | return -ENOMEM; |
985 | } |
986 | |
987 | return perf_pmu_register(pmu: &spe_pmu->pmu, name, type: -1); |
988 | } |
989 | |
990 | static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu) |
991 | { |
992 | perf_pmu_unregister(pmu: &spe_pmu->pmu); |
993 | } |
994 | |
995 | static void __arm_spe_pmu_dev_probe(void *info) |
996 | { |
997 | int fld; |
998 | u64 reg; |
999 | struct arm_spe_pmu *spe_pmu = info; |
1000 | struct device *dev = &spe_pmu->pdev->dev; |
1001 | |
1002 | fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1), |
1003 | ID_AA64DFR0_EL1_PMSVer_SHIFT); |
1004 | if (!fld) { |
1005 | dev_err(dev, |
1006 | "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n" , |
1007 | fld, smp_processor_id()); |
1008 | return; |
1009 | } |
1010 | spe_pmu->pmsver = (u16)fld; |
1011 | |
1012 | /* Read PMBIDR first to determine whether or not we have access */ |
1013 | reg = read_sysreg_s(SYS_PMBIDR_EL1); |
1014 | if (FIELD_GET(PMBIDR_EL1_P, reg)) { |
1015 | dev_err(dev, |
1016 | "profiling buffer owned by higher exception level\n" ); |
1017 | return; |
1018 | } |
1019 | |
1020 | /* Minimum alignment. If it's out-of-range, then fail the probe */ |
1021 | fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg); |
1022 | spe_pmu->align = 1 << fld; |
1023 | if (spe_pmu->align > SZ_2K) { |
1024 | dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n" , |
1025 | fld, smp_processor_id()); |
1026 | return; |
1027 | } |
1028 | |
1029 | /* It's now safe to read PMSIDR and figure out what we've got */ |
1030 | reg = read_sysreg_s(SYS_PMSIDR_EL1); |
1031 | if (FIELD_GET(PMSIDR_EL1_FE, reg)) |
1032 | spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; |
1033 | |
1034 | if (FIELD_GET(PMSIDR_EL1_FnE, reg)) |
1035 | spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT; |
1036 | |
1037 | if (FIELD_GET(PMSIDR_EL1_FT, reg)) |
1038 | spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; |
1039 | |
1040 | if (FIELD_GET(PMSIDR_EL1_FL, reg)) |
1041 | spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; |
1042 | |
1043 | if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg)) |
1044 | spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; |
1045 | |
1046 | if (FIELD_GET(PMSIDR_EL1_LDS, reg)) |
1047 | spe_pmu->features |= SPE_PMU_FEAT_LDS; |
1048 | |
1049 | if (FIELD_GET(PMSIDR_EL1_ERND, reg)) |
1050 | spe_pmu->features |= SPE_PMU_FEAT_ERND; |
1051 | |
1052 | /* This field has a spaced out encoding, so just use a look-up */ |
1053 | fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg); |
1054 | switch (fld) { |
1055 | case PMSIDR_EL1_INTERVAL_256: |
1056 | spe_pmu->min_period = 256; |
1057 | break; |
1058 | case PMSIDR_EL1_INTERVAL_512: |
1059 | spe_pmu->min_period = 512; |
1060 | break; |
1061 | case PMSIDR_EL1_INTERVAL_768: |
1062 | spe_pmu->min_period = 768; |
1063 | break; |
1064 | case PMSIDR_EL1_INTERVAL_1024: |
1065 | spe_pmu->min_period = 1024; |
1066 | break; |
1067 | case PMSIDR_EL1_INTERVAL_1536: |
1068 | spe_pmu->min_period = 1536; |
1069 | break; |
1070 | case PMSIDR_EL1_INTERVAL_2048: |
1071 | spe_pmu->min_period = 2048; |
1072 | break; |
1073 | case PMSIDR_EL1_INTERVAL_3072: |
1074 | spe_pmu->min_period = 3072; |
1075 | break; |
1076 | default: |
1077 | dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n" , |
1078 | fld); |
1079 | fallthrough; |
1080 | case PMSIDR_EL1_INTERVAL_4096: |
1081 | spe_pmu->min_period = 4096; |
1082 | } |
1083 | |
1084 | /* Maximum record size. If it's out-of-range, then fail the probe */ |
1085 | fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg); |
1086 | spe_pmu->max_record_sz = 1 << fld; |
1087 | if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { |
1088 | dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n" , |
1089 | fld, smp_processor_id()); |
1090 | return; |
1091 | } |
1092 | |
1093 | fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg); |
1094 | switch (fld) { |
1095 | default: |
1096 | dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n" , |
1097 | fld); |
1098 | fallthrough; |
1099 | case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT: |
1100 | spe_pmu->counter_sz = 12; |
1101 | break; |
1102 | case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT: |
1103 | spe_pmu->counter_sz = 16; |
1104 | } |
1105 | |
1106 | dev_info(dev, |
1107 | "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n" , |
1108 | spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus), |
1109 | spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); |
1110 | |
1111 | spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; |
1112 | } |
1113 | |
1114 | static void __arm_spe_pmu_reset_local(void) |
1115 | { |
1116 | /* |
1117 | * This is probably overkill, as we have no idea where we're |
1118 | * draining any buffered data to... |
1119 | */ |
1120 | arm_spe_pmu_disable_and_drain_local(); |
1121 | |
1122 | /* Reset the buffer base pointer */ |
1123 | write_sysreg_s(0, SYS_PMBPTR_EL1); |
1124 | isb(); |
1125 | |
1126 | /* Clear any pending management interrupts */ |
1127 | write_sysreg_s(0, SYS_PMBSR_EL1); |
1128 | isb(); |
1129 | } |
1130 | |
1131 | static void __arm_spe_pmu_setup_one(void *info) |
1132 | { |
1133 | struct arm_spe_pmu *spe_pmu = info; |
1134 | |
1135 | __arm_spe_pmu_reset_local(); |
1136 | enable_percpu_irq(irq: spe_pmu->irq, type: IRQ_TYPE_NONE); |
1137 | } |
1138 | |
1139 | static void __arm_spe_pmu_stop_one(void *info) |
1140 | { |
1141 | struct arm_spe_pmu *spe_pmu = info; |
1142 | |
1143 | disable_percpu_irq(irq: spe_pmu->irq); |
1144 | __arm_spe_pmu_reset_local(); |
1145 | } |
1146 | |
1147 | static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node) |
1148 | { |
1149 | struct arm_spe_pmu *spe_pmu; |
1150 | |
1151 | spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); |
1152 | if (!cpumask_test_cpu(cpu, cpumask: &spe_pmu->supported_cpus)) |
1153 | return 0; |
1154 | |
1155 | __arm_spe_pmu_setup_one(info: spe_pmu); |
1156 | return 0; |
1157 | } |
1158 | |
1159 | static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) |
1160 | { |
1161 | struct arm_spe_pmu *spe_pmu; |
1162 | |
1163 | spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); |
1164 | if (!cpumask_test_cpu(cpu, cpumask: &spe_pmu->supported_cpus)) |
1165 | return 0; |
1166 | |
1167 | __arm_spe_pmu_stop_one(info: spe_pmu); |
1168 | return 0; |
1169 | } |
1170 | |
1171 | static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu) |
1172 | { |
1173 | int ret; |
1174 | cpumask_t *mask = &spe_pmu->supported_cpus; |
1175 | |
1176 | /* Make sure we probe the hardware on a relevant CPU */ |
1177 | ret = smp_call_function_any(mask, func: __arm_spe_pmu_dev_probe, info: spe_pmu, wait: 1); |
1178 | if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED)) |
1179 | return -ENXIO; |
1180 | |
1181 | /* Request our PPIs (note that the IRQ is still disabled) */ |
1182 | ret = request_percpu_irq(irq: spe_pmu->irq, handler: arm_spe_pmu_irq_handler, DRVNAME, |
1183 | percpu_dev_id: spe_pmu->handle); |
1184 | if (ret) |
1185 | return ret; |
1186 | |
1187 | /* |
1188 | * Register our hotplug notifier now so we don't miss any events. |
1189 | * This will enable the IRQ for any supported CPUs that are already |
1190 | * up. |
1191 | */ |
1192 | ret = cpuhp_state_add_instance(state: arm_spe_pmu_online, |
1193 | node: &spe_pmu->hotplug_node); |
1194 | if (ret) |
1195 | free_percpu_irq(spe_pmu->irq, spe_pmu->handle); |
1196 | |
1197 | return ret; |
1198 | } |
1199 | |
1200 | static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu) |
1201 | { |
1202 | cpuhp_state_remove_instance(state: arm_spe_pmu_online, node: &spe_pmu->hotplug_node); |
1203 | free_percpu_irq(spe_pmu->irq, spe_pmu->handle); |
1204 | } |
1205 | |
1206 | /* Driver and device probing */ |
1207 | static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu) |
1208 | { |
1209 | struct platform_device *pdev = spe_pmu->pdev; |
1210 | int irq = platform_get_irq(pdev, 0); |
1211 | |
1212 | if (irq < 0) |
1213 | return -ENXIO; |
1214 | |
1215 | if (!irq_is_percpu(irq)) { |
1216 | dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n" , irq); |
1217 | return -EINVAL; |
1218 | } |
1219 | |
1220 | if (irq_get_percpu_devid_partition(irq, affinity: &spe_pmu->supported_cpus)) { |
1221 | dev_err(&pdev->dev, "failed to get PPI partition (%d)\n" , irq); |
1222 | return -EINVAL; |
1223 | } |
1224 | |
1225 | spe_pmu->irq = irq; |
1226 | return 0; |
1227 | } |
1228 | |
1229 | static const struct of_device_id arm_spe_pmu_of_match[] = { |
1230 | { .compatible = "arm,statistical-profiling-extension-v1" , .data = (void *)1 }, |
1231 | { /* Sentinel */ }, |
1232 | }; |
1233 | MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match); |
1234 | |
1235 | static const struct platform_device_id arm_spe_match[] = { |
1236 | { ARMV8_SPE_PDEV_NAME, 0}, |
1237 | { } |
1238 | }; |
1239 | MODULE_DEVICE_TABLE(platform, arm_spe_match); |
1240 | |
1241 | static int arm_spe_pmu_device_probe(struct platform_device *pdev) |
1242 | { |
1243 | int ret; |
1244 | struct arm_spe_pmu *spe_pmu; |
1245 | struct device *dev = &pdev->dev; |
1246 | |
1247 | /* |
1248 | * If kernelspace is unmapped when running at EL0, then the SPE |
1249 | * buffer will fault and prematurely terminate the AUX session. |
1250 | */ |
1251 | if (arm64_kernel_unmapped_at_el0()) { |
1252 | dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n" ); |
1253 | return -EPERM; |
1254 | } |
1255 | |
1256 | spe_pmu = devm_kzalloc(dev, size: sizeof(*spe_pmu), GFP_KERNEL); |
1257 | if (!spe_pmu) |
1258 | return -ENOMEM; |
1259 | |
1260 | spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); |
1261 | if (!spe_pmu->handle) |
1262 | return -ENOMEM; |
1263 | |
1264 | spe_pmu->pdev = pdev; |
1265 | platform_set_drvdata(pdev, data: spe_pmu); |
1266 | |
1267 | ret = arm_spe_pmu_irq_probe(spe_pmu); |
1268 | if (ret) |
1269 | goto out_free_handle; |
1270 | |
1271 | ret = arm_spe_pmu_dev_init(spe_pmu); |
1272 | if (ret) |
1273 | goto out_free_handle; |
1274 | |
1275 | ret = arm_spe_pmu_perf_init(spe_pmu); |
1276 | if (ret) |
1277 | goto out_teardown_dev; |
1278 | |
1279 | return 0; |
1280 | |
1281 | out_teardown_dev: |
1282 | arm_spe_pmu_dev_teardown(spe_pmu); |
1283 | out_free_handle: |
1284 | free_percpu(pdata: spe_pmu->handle); |
1285 | return ret; |
1286 | } |
1287 | |
1288 | static int arm_spe_pmu_device_remove(struct platform_device *pdev) |
1289 | { |
1290 | struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); |
1291 | |
1292 | arm_spe_pmu_perf_destroy(spe_pmu); |
1293 | arm_spe_pmu_dev_teardown(spe_pmu); |
1294 | free_percpu(pdata: spe_pmu->handle); |
1295 | return 0; |
1296 | } |
1297 | |
1298 | static struct platform_driver arm_spe_pmu_driver = { |
1299 | .id_table = arm_spe_match, |
1300 | .driver = { |
1301 | .name = DRVNAME, |
1302 | .of_match_table = of_match_ptr(arm_spe_pmu_of_match), |
1303 | .suppress_bind_attrs = true, |
1304 | }, |
1305 | .probe = arm_spe_pmu_device_probe, |
1306 | .remove = arm_spe_pmu_device_remove, |
1307 | }; |
1308 | |
1309 | static int __init arm_spe_pmu_init(void) |
1310 | { |
1311 | int ret; |
1312 | |
1313 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, DRVNAME, |
1314 | startup: arm_spe_pmu_cpu_startup, |
1315 | teardown: arm_spe_pmu_cpu_teardown); |
1316 | if (ret < 0) |
1317 | return ret; |
1318 | arm_spe_pmu_online = ret; |
1319 | |
1320 | ret = platform_driver_register(&arm_spe_pmu_driver); |
1321 | if (ret) |
1322 | cpuhp_remove_multi_state(state: arm_spe_pmu_online); |
1323 | |
1324 | return ret; |
1325 | } |
1326 | |
1327 | static void __exit arm_spe_pmu_exit(void) |
1328 | { |
1329 | platform_driver_unregister(&arm_spe_pmu_driver); |
1330 | cpuhp_remove_multi_state(state: arm_spe_pmu_online); |
1331 | } |
1332 | |
1333 | module_init(arm_spe_pmu_init); |
1334 | module_exit(arm_spe_pmu_exit); |
1335 | |
1336 | MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension" ); |
1337 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>" ); |
1338 | MODULE_LICENSE("GPL v2" ); |
1339 | |