1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Intel(R) Processor Trace PMU driver for perf |
4 | * Copyright (c) 2013-2014, Intel Corporation. |
5 | * |
6 | * Intel PT is specified in the Intel Architecture Instruction Set Extensions |
7 | * Programming Reference: |
8 | * http://software.intel.com/en-us/intel-isa-extensions |
9 | */ |
10 | |
11 | #undef DEBUG |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/types.h> |
16 | #include <linux/bits.h> |
17 | #include <linux/limits.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/device.h> |
20 | |
21 | #include <asm/perf_event.h> |
22 | #include <asm/insn.h> |
23 | #include <asm/io.h> |
24 | #include <asm/intel_pt.h> |
25 | #include <asm/intel-family.h> |
26 | |
27 | #include "../perf_event.h" |
28 | #include "pt.h" |
29 | |
30 | static DEFINE_PER_CPU(struct pt, pt_ctx); |
31 | |
32 | static struct pt_pmu pt_pmu; |
33 | |
34 | /* |
35 | * Capabilities of Intel PT hardware, such as number of address bits or |
36 | * supported output schemes, are cached and exported to userspace as "caps" |
37 | * attribute group of pt pmu device |
38 | * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store |
39 | * relevant bits together with intel_pt traces. |
40 | * |
41 | * These are necessary for both trace decoding (payloads_lip, contains address |
42 | * width encoded in IP-related packets), and event configuration (bitmasks with |
43 | * permitted values for certain bit fields). |
44 | */ |
45 | #define PT_CAP(_n, _l, _r, _m) \ |
46 | [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \ |
47 | .reg = _r, .mask = _m } |
48 | |
49 | static struct pt_cap_desc { |
50 | const char *name; |
51 | u32 leaf; |
52 | u8 reg; |
53 | u32 mask; |
54 | } pt_caps[] = { |
55 | PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff), |
56 | PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)), |
57 | PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)), |
58 | PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)), |
59 | PT_CAP(mtc, 0, CPUID_EBX, BIT(3)), |
60 | PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)), |
61 | PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)), |
62 | PT_CAP(event_trace, 0, CPUID_EBX, BIT(7)), |
63 | PT_CAP(tnt_disable, 0, CPUID_EBX, BIT(8)), |
64 | PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)), |
65 | PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)), |
66 | PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)), |
67 | PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)), |
68 | PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)), |
69 | PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7), |
70 | PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000), |
71 | PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff), |
72 | PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), |
73 | }; |
74 | |
75 | u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) |
76 | { |
77 | struct pt_cap_desc *cd = &pt_caps[capability]; |
78 | u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; |
79 | unsigned int shift = __ffs(cd->mask); |
80 | |
81 | return (c & cd->mask) >> shift; |
82 | } |
83 | EXPORT_SYMBOL_GPL(intel_pt_validate_cap); |
84 | |
85 | u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) |
86 | { |
87 | return intel_pt_validate_cap(pt_pmu.caps, cap); |
88 | } |
89 | EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap); |
90 | |
91 | static ssize_t pt_cap_show(struct device *cdev, |
92 | struct device_attribute *attr, |
93 | char *buf) |
94 | { |
95 | struct dev_ext_attribute *ea = |
96 | container_of(attr, struct dev_ext_attribute, attr); |
97 | enum pt_capabilities cap = (long)ea->var; |
98 | |
99 | return snprintf(buf, PAGE_SIZE, fmt: "%x\n" , intel_pt_validate_hw_cap(cap)); |
100 | } |
101 | |
102 | static struct attribute_group pt_cap_group __ro_after_init = { |
103 | .name = "caps" , |
104 | }; |
105 | |
106 | PMU_FORMAT_ATTR(pt, "config:0" ); |
107 | PMU_FORMAT_ATTR(cyc, "config:1" ); |
108 | PMU_FORMAT_ATTR(pwr_evt, "config:4" ); |
109 | PMU_FORMAT_ATTR(fup_on_ptw, "config:5" ); |
110 | PMU_FORMAT_ATTR(mtc, "config:9" ); |
111 | PMU_FORMAT_ATTR(tsc, "config:10" ); |
112 | PMU_FORMAT_ATTR(noretcomp, "config:11" ); |
113 | PMU_FORMAT_ATTR(ptw, "config:12" ); |
114 | PMU_FORMAT_ATTR(branch, "config:13" ); |
115 | PMU_FORMAT_ATTR(event, "config:31" ); |
116 | PMU_FORMAT_ATTR(notnt, "config:55" ); |
117 | PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); |
118 | PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); |
119 | PMU_FORMAT_ATTR(psb_period, "config:24-27" ); |
120 | |
121 | static struct attribute *pt_formats_attr[] = { |
122 | &format_attr_pt.attr, |
123 | &format_attr_cyc.attr, |
124 | &format_attr_pwr_evt.attr, |
125 | &format_attr_event.attr, |
126 | &format_attr_notnt.attr, |
127 | &format_attr_fup_on_ptw.attr, |
128 | &format_attr_mtc.attr, |
129 | &format_attr_tsc.attr, |
130 | &format_attr_noretcomp.attr, |
131 | &format_attr_ptw.attr, |
132 | &format_attr_branch.attr, |
133 | &format_attr_mtc_period.attr, |
134 | &format_attr_cyc_thresh.attr, |
135 | &format_attr_psb_period.attr, |
136 | NULL, |
137 | }; |
138 | |
139 | static struct attribute_group pt_format_group = { |
140 | .name = "format" , |
141 | .attrs = pt_formats_attr, |
142 | }; |
143 | |
144 | static ssize_t |
145 | pt_timing_attr_show(struct device *dev, struct device_attribute *attr, |
146 | char *page) |
147 | { |
148 | struct perf_pmu_events_attr *pmu_attr = |
149 | container_of(attr, struct perf_pmu_events_attr, attr); |
150 | |
151 | switch (pmu_attr->id) { |
152 | case 0: |
153 | return sprintf(buf: page, fmt: "%lu\n" , pt_pmu.max_nonturbo_ratio); |
154 | case 1: |
155 | return sprintf(buf: page, fmt: "%u:%u\n" , |
156 | pt_pmu.tsc_art_num, |
157 | pt_pmu.tsc_art_den); |
158 | default: |
159 | break; |
160 | } |
161 | |
162 | return -EINVAL; |
163 | } |
164 | |
165 | PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0, |
166 | pt_timing_attr_show); |
167 | PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1, |
168 | pt_timing_attr_show); |
169 | |
170 | static struct attribute *pt_timing_attr[] = { |
171 | &timing_attr_max_nonturbo_ratio.attr.attr, |
172 | &timing_attr_tsc_art_ratio.attr.attr, |
173 | NULL, |
174 | }; |
175 | |
176 | static struct attribute_group pt_timing_group = { |
177 | .attrs = pt_timing_attr, |
178 | }; |
179 | |
180 | static const struct attribute_group *pt_attr_groups[] = { |
181 | &pt_cap_group, |
182 | &pt_format_group, |
183 | &pt_timing_group, |
184 | NULL, |
185 | }; |
186 | |
187 | static int __init pt_pmu_hw_init(void) |
188 | { |
189 | struct dev_ext_attribute *de_attrs; |
190 | struct attribute **attrs; |
191 | size_t size; |
192 | u64 reg; |
193 | int ret; |
194 | long i; |
195 | |
196 | rdmsrl(MSR_PLATFORM_INFO, reg); |
197 | pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8; |
198 | |
199 | /* |
200 | * if available, read in TSC to core crystal clock ratio, |
201 | * otherwise, zero for numerator stands for "not enumerated" |
202 | * as per SDM |
203 | */ |
204 | if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) { |
205 | u32 eax, ebx, ecx, edx; |
206 | |
207 | cpuid(CPUID_TSC_LEAF, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
208 | |
209 | pt_pmu.tsc_art_num = ebx; |
210 | pt_pmu.tsc_art_den = eax; |
211 | } |
212 | |
213 | /* model-specific quirks */ |
214 | switch (boot_cpu_data.x86_model) { |
215 | case INTEL_FAM6_BROADWELL: |
216 | case INTEL_FAM6_BROADWELL_D: |
217 | case INTEL_FAM6_BROADWELL_G: |
218 | case INTEL_FAM6_BROADWELL_X: |
219 | /* not setting BRANCH_EN will #GP, erratum BDM106 */ |
220 | pt_pmu.branch_en_always_on = true; |
221 | break; |
222 | default: |
223 | break; |
224 | } |
225 | |
226 | if (boot_cpu_has(X86_FEATURE_VMX)) { |
227 | /* |
228 | * Intel SDM, 36.5 "Tracing post-VMXON" says that |
229 | * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace |
230 | * post-VMXON. |
231 | */ |
232 | rdmsrl(MSR_IA32_VMX_MISC, reg); |
233 | if (reg & BIT(14)) |
234 | pt_pmu.vmx = true; |
235 | } |
236 | |
237 | for (i = 0; i < PT_CPUID_LEAVES; i++) { |
238 | cpuid_count(op: 20, count: i, |
239 | eax: &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM], |
240 | ebx: &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM], |
241 | ecx: &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM], |
242 | edx: &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]); |
243 | } |
244 | |
245 | ret = -ENOMEM; |
246 | size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1); |
247 | attrs = kzalloc(size, GFP_KERNEL); |
248 | if (!attrs) |
249 | goto fail; |
250 | |
251 | size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1); |
252 | de_attrs = kzalloc(size, GFP_KERNEL); |
253 | if (!de_attrs) |
254 | goto fail; |
255 | |
256 | for (i = 0; i < ARRAY_SIZE(pt_caps); i++) { |
257 | struct dev_ext_attribute *de_attr = de_attrs + i; |
258 | |
259 | de_attr->attr.attr.name = pt_caps[i].name; |
260 | |
261 | sysfs_attr_init(&de_attr->attr.attr); |
262 | |
263 | de_attr->attr.attr.mode = S_IRUGO; |
264 | de_attr->attr.show = pt_cap_show; |
265 | de_attr->var = (void *)i; |
266 | |
267 | attrs[i] = &de_attr->attr.attr; |
268 | } |
269 | |
270 | pt_cap_group.attrs = attrs; |
271 | |
272 | return 0; |
273 | |
274 | fail: |
275 | kfree(objp: attrs); |
276 | |
277 | return ret; |
278 | } |
279 | |
280 | #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \ |
281 | RTIT_CTL_CYC_THRESH | \ |
282 | RTIT_CTL_PSB_FREQ) |
283 | |
284 | #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \ |
285 | RTIT_CTL_MTC_RANGE) |
286 | |
287 | #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ |
288 | RTIT_CTL_FUP_ON_PTW) |
289 | |
290 | /* |
291 | * Bit 0 (TraceEn) in the attr.config is meaningless as the |
292 | * corresponding bit in the RTIT_CTL can only be controlled |
293 | * by the driver; therefore, repurpose it to mean: pass |
294 | * through the bit that was previously assumed to be always |
295 | * on for PT, thereby allowing the user to *not* set it if |
296 | * they so wish. See also pt_event_valid() and pt_config(). |
297 | */ |
298 | #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN |
299 | |
300 | #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \ |
301 | RTIT_CTL_TSC_EN | \ |
302 | RTIT_CTL_DISRETC | \ |
303 | RTIT_CTL_BRANCH_EN | \ |
304 | RTIT_CTL_CYC_PSB | \ |
305 | RTIT_CTL_MTC | \ |
306 | RTIT_CTL_PWR_EVT_EN | \ |
307 | RTIT_CTL_EVENT_EN | \ |
308 | RTIT_CTL_NOTNT | \ |
309 | RTIT_CTL_FUP_ON_PTW | \ |
310 | RTIT_CTL_PTW_EN) |
311 | |
312 | static bool pt_event_valid(struct perf_event *event) |
313 | { |
314 | u64 config = event->attr.config; |
315 | u64 allowed, requested; |
316 | |
317 | if ((config & PT_CONFIG_MASK) != config) |
318 | return false; |
319 | |
320 | if (config & RTIT_CTL_CYC_PSB) { |
321 | if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc)) |
322 | return false; |
323 | |
324 | allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods); |
325 | requested = (config & RTIT_CTL_PSB_FREQ) >> |
326 | RTIT_CTL_PSB_FREQ_OFFSET; |
327 | if (requested && (!(allowed & BIT(requested)))) |
328 | return false; |
329 | |
330 | allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds); |
331 | requested = (config & RTIT_CTL_CYC_THRESH) >> |
332 | RTIT_CTL_CYC_THRESH_OFFSET; |
333 | if (requested && (!(allowed & BIT(requested)))) |
334 | return false; |
335 | } |
336 | |
337 | if (config & RTIT_CTL_MTC) { |
338 | /* |
339 | * In the unlikely case that CPUID lists valid mtc periods, |
340 | * but not the mtc capability, drop out here. |
341 | * |
342 | * Spec says that setting mtc period bits while mtc bit in |
343 | * CPUID is 0 will #GP, so better safe than sorry. |
344 | */ |
345 | if (!intel_pt_validate_hw_cap(PT_CAP_mtc)) |
346 | return false; |
347 | |
348 | allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods); |
349 | if (!allowed) |
350 | return false; |
351 | |
352 | requested = (config & RTIT_CTL_MTC_RANGE) >> |
353 | RTIT_CTL_MTC_RANGE_OFFSET; |
354 | |
355 | if (!(allowed & BIT(requested))) |
356 | return false; |
357 | } |
358 | |
359 | if (config & RTIT_CTL_PWR_EVT_EN && |
360 | !intel_pt_validate_hw_cap(PT_CAP_power_event_trace)) |
361 | return false; |
362 | |
363 | if (config & RTIT_CTL_EVENT_EN && |
364 | !intel_pt_validate_hw_cap(PT_CAP_event_trace)) |
365 | return false; |
366 | |
367 | if (config & RTIT_CTL_NOTNT && |
368 | !intel_pt_validate_hw_cap(PT_CAP_tnt_disable)) |
369 | return false; |
370 | |
371 | if (config & RTIT_CTL_PTW) { |
372 | if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite)) |
373 | return false; |
374 | |
375 | /* FUPonPTW without PTW doesn't make sense */ |
376 | if ((config & RTIT_CTL_FUP_ON_PTW) && |
377 | !(config & RTIT_CTL_PTW_EN)) |
378 | return false; |
379 | } |
380 | |
381 | /* |
382 | * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config |
383 | * clears the assumption that BranchEn must always be enabled, |
384 | * as was the case with the first implementation of PT. |
385 | * If this bit is not set, the legacy behavior is preserved |
386 | * for compatibility with the older userspace. |
387 | * |
388 | * Re-using bit 0 for this purpose is fine because it is never |
389 | * directly set by the user; previous attempts at setting it in |
390 | * the attr.config resulted in -EINVAL. |
391 | */ |
392 | if (config & RTIT_CTL_PASSTHROUGH) { |
393 | /* |
394 | * Disallow not setting BRANCH_EN where BRANCH_EN is |
395 | * always required. |
396 | */ |
397 | if (pt_pmu.branch_en_always_on && |
398 | !(config & RTIT_CTL_BRANCH_EN)) |
399 | return false; |
400 | } else { |
401 | /* |
402 | * Disallow BRANCH_EN without the PASSTHROUGH. |
403 | */ |
404 | if (config & RTIT_CTL_BRANCH_EN) |
405 | return false; |
406 | } |
407 | |
408 | return true; |
409 | } |
410 | |
411 | /* |
412 | * PT configuration helpers |
413 | * These all are cpu affine and operate on a local PT |
414 | */ |
415 | |
416 | static void pt_config_start(struct perf_event *event) |
417 | { |
418 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
419 | u64 ctl = event->hw.config; |
420 | |
421 | ctl |= RTIT_CTL_TRACEEN; |
422 | if (READ_ONCE(pt->vmx_on)) |
423 | perf_aux_output_flag(handle: &pt->handle, PERF_AUX_FLAG_PARTIAL); |
424 | else |
425 | wrmsrl(MSR_IA32_RTIT_CTL, val: ctl); |
426 | |
427 | WRITE_ONCE(event->hw.config, ctl); |
428 | } |
429 | |
430 | /* Address ranges and their corresponding msr configuration registers */ |
431 | static const struct pt_address_range { |
432 | unsigned long msr_a; |
433 | unsigned long msr_b; |
434 | unsigned int reg_off; |
435 | } pt_address_ranges[] = { |
436 | { |
437 | .msr_a = MSR_IA32_RTIT_ADDR0_A, |
438 | .msr_b = MSR_IA32_RTIT_ADDR0_B, |
439 | .reg_off = RTIT_CTL_ADDR0_OFFSET, |
440 | }, |
441 | { |
442 | .msr_a = MSR_IA32_RTIT_ADDR1_A, |
443 | .msr_b = MSR_IA32_RTIT_ADDR1_B, |
444 | .reg_off = RTIT_CTL_ADDR1_OFFSET, |
445 | }, |
446 | { |
447 | .msr_a = MSR_IA32_RTIT_ADDR2_A, |
448 | .msr_b = MSR_IA32_RTIT_ADDR2_B, |
449 | .reg_off = RTIT_CTL_ADDR2_OFFSET, |
450 | }, |
451 | { |
452 | .msr_a = MSR_IA32_RTIT_ADDR3_A, |
453 | .msr_b = MSR_IA32_RTIT_ADDR3_B, |
454 | .reg_off = RTIT_CTL_ADDR3_OFFSET, |
455 | } |
456 | }; |
457 | |
458 | static u64 pt_config_filters(struct perf_event *event) |
459 | { |
460 | struct pt_filters *filters = event->hw.addr_filters; |
461 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
462 | unsigned int range = 0; |
463 | u64 rtit_ctl = 0; |
464 | |
465 | if (!filters) |
466 | return 0; |
467 | |
468 | perf_event_addr_filters_sync(event); |
469 | |
470 | for (range = 0; range < filters->nr_filters; range++) { |
471 | struct pt_filter *filter = &filters->filter[range]; |
472 | |
473 | /* |
474 | * Note, if the range has zero start/end addresses due |
475 | * to its dynamic object not being loaded yet, we just |
476 | * go ahead and program zeroed range, which will simply |
477 | * produce no data. Note^2: if executable code at 0x0 |
478 | * is a concern, we can set up an "invalid" configuration |
479 | * such as msr_b < msr_a. |
480 | */ |
481 | |
482 | /* avoid redundant msr writes */ |
483 | if (pt->filters.filter[range].msr_a != filter->msr_a) { |
484 | wrmsrl(msr: pt_address_ranges[range].msr_a, val: filter->msr_a); |
485 | pt->filters.filter[range].msr_a = filter->msr_a; |
486 | } |
487 | |
488 | if (pt->filters.filter[range].msr_b != filter->msr_b) { |
489 | wrmsrl(msr: pt_address_ranges[range].msr_b, val: filter->msr_b); |
490 | pt->filters.filter[range].msr_b = filter->msr_b; |
491 | } |
492 | |
493 | rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off; |
494 | } |
495 | |
496 | return rtit_ctl; |
497 | } |
498 | |
499 | static void pt_config(struct perf_event *event) |
500 | { |
501 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
502 | struct pt_buffer *buf = perf_get_aux(handle: &pt->handle); |
503 | u64 reg; |
504 | |
505 | /* First round: clear STATUS, in particular the PSB byte counter. */ |
506 | if (!event->hw.config) { |
507 | perf_event_itrace_started(event); |
508 | wrmsrl(MSR_IA32_RTIT_STATUS, val: 0); |
509 | } |
510 | |
511 | reg = pt_config_filters(event); |
512 | reg |= RTIT_CTL_TRACEEN; |
513 | if (!buf->single) |
514 | reg |= RTIT_CTL_TOPA; |
515 | |
516 | /* |
517 | * Previously, we had BRANCH_EN on by default, but now that PT has |
518 | * grown features outside of branch tracing, it is useful to allow |
519 | * the user to disable it. Setting bit 0 in the event's attr.config |
520 | * allows BRANCH_EN to pass through instead of being always on. See |
521 | * also the comment in pt_event_valid(). |
522 | */ |
523 | if (event->attr.config & BIT(0)) { |
524 | reg |= event->attr.config & RTIT_CTL_BRANCH_EN; |
525 | } else { |
526 | reg |= RTIT_CTL_BRANCH_EN; |
527 | } |
528 | |
529 | if (!event->attr.exclude_kernel) |
530 | reg |= RTIT_CTL_OS; |
531 | if (!event->attr.exclude_user) |
532 | reg |= RTIT_CTL_USR; |
533 | |
534 | reg |= (event->attr.config & PT_CONFIG_MASK); |
535 | |
536 | event->hw.config = reg; |
537 | pt_config_start(event); |
538 | } |
539 | |
540 | static void pt_config_stop(struct perf_event *event) |
541 | { |
542 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
543 | u64 ctl = READ_ONCE(event->hw.config); |
544 | |
545 | /* may be already stopped by a PMI */ |
546 | if (!(ctl & RTIT_CTL_TRACEEN)) |
547 | return; |
548 | |
549 | ctl &= ~RTIT_CTL_TRACEEN; |
550 | if (!READ_ONCE(pt->vmx_on)) |
551 | wrmsrl(MSR_IA32_RTIT_CTL, val: ctl); |
552 | |
553 | WRITE_ONCE(event->hw.config, ctl); |
554 | |
555 | /* |
556 | * A wrmsr that disables trace generation serializes other PT |
557 | * registers and causes all data packets to be written to memory, |
558 | * but a fence is required for the data to become globally visible. |
559 | * |
560 | * The below WMB, separating data store and aux_head store matches |
561 | * the consumer's RMB that separates aux_head load and data load. |
562 | */ |
563 | wmb(); |
564 | } |
565 | |
566 | /** |
567 | * struct topa - ToPA metadata |
568 | * @list: linkage to struct pt_buffer's list of tables |
569 | * @offset: offset of the first entry in this table in the buffer |
570 | * @size: total size of all entries in this table |
571 | * @last: index of the last initialized entry in this table |
572 | * @z_count: how many times the first entry repeats |
573 | */ |
574 | struct topa { |
575 | struct list_head list; |
576 | u64 offset; |
577 | size_t size; |
578 | int last; |
579 | unsigned int z_count; |
580 | }; |
581 | |
582 | /* |
583 | * Keep ToPA table-related metadata on the same page as the actual table, |
584 | * taking up a few words from the top |
585 | */ |
586 | |
587 | #define TENTS_PER_PAGE \ |
588 | ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry)) |
589 | |
590 | /** |
591 | * struct topa_page - page-sized ToPA table with metadata at the top |
592 | * @table: actual ToPA table entries, as understood by PT hardware |
593 | * @topa: metadata |
594 | */ |
595 | struct topa_page { |
596 | struct topa_entry table[TENTS_PER_PAGE]; |
597 | struct topa topa; |
598 | }; |
599 | |
600 | static inline struct topa_page *topa_to_page(struct topa *topa) |
601 | { |
602 | return container_of(topa, struct topa_page, topa); |
603 | } |
604 | |
605 | static inline struct topa_page *topa_entry_to_page(struct topa_entry *te) |
606 | { |
607 | return (struct topa_page *)((unsigned long)te & PAGE_MASK); |
608 | } |
609 | |
610 | static inline phys_addr_t topa_pfn(struct topa *topa) |
611 | { |
612 | return PFN_DOWN(virt_to_phys(topa_to_page(topa))); |
613 | } |
614 | |
615 | /* make -1 stand for the last table entry */ |
616 | #define TOPA_ENTRY(t, i) \ |
617 | ((i) == -1 \ |
618 | ? &topa_to_page(t)->table[(t)->last] \ |
619 | : &topa_to_page(t)->table[(i)]) |
620 | #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size)) |
621 | #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size) |
622 | |
623 | static void pt_config_buffer(struct pt_buffer *buf) |
624 | { |
625 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
626 | u64 reg, mask; |
627 | void *base; |
628 | |
629 | if (buf->single) { |
630 | base = buf->data_pages[0]; |
631 | mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7; |
632 | } else { |
633 | base = topa_to_page(topa: buf->cur)->table; |
634 | mask = (u64)buf->cur_idx; |
635 | } |
636 | |
637 | reg = virt_to_phys(address: base); |
638 | if (pt->output_base != reg) { |
639 | pt->output_base = reg; |
640 | wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, val: reg); |
641 | } |
642 | |
643 | reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); |
644 | if (pt->output_mask != reg) { |
645 | pt->output_mask = reg; |
646 | wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, val: reg); |
647 | } |
648 | } |
649 | |
650 | /** |
651 | * topa_alloc() - allocate page-sized ToPA table |
652 | * @cpu: CPU on which to allocate. |
653 | * @gfp: Allocation flags. |
654 | * |
655 | * Return: On success, return the pointer to ToPA table page. |
656 | */ |
657 | static struct topa *topa_alloc(int cpu, gfp_t gfp) |
658 | { |
659 | int node = cpu_to_node(cpu); |
660 | struct topa_page *tp; |
661 | struct page *p; |
662 | |
663 | p = alloc_pages_node(nid: node, gfp_mask: gfp | __GFP_ZERO, order: 0); |
664 | if (!p) |
665 | return NULL; |
666 | |
667 | tp = page_address(p); |
668 | tp->topa.last = 0; |
669 | |
670 | /* |
671 | * In case of singe-entry ToPA, always put the self-referencing END |
672 | * link as the 2nd entry in the table |
673 | */ |
674 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { |
675 | TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT; |
676 | TOPA_ENTRY(&tp->topa, 1)->end = 1; |
677 | } |
678 | |
679 | return &tp->topa; |
680 | } |
681 | |
682 | /** |
683 | * topa_free() - free a page-sized ToPA table |
684 | * @topa: Table to deallocate. |
685 | */ |
686 | static void topa_free(struct topa *topa) |
687 | { |
688 | free_page((unsigned long)topa); |
689 | } |
690 | |
691 | /** |
692 | * topa_insert_table() - insert a ToPA table into a buffer |
693 | * @buf: PT buffer that's being extended. |
694 | * @topa: New topa table to be inserted. |
695 | * |
696 | * If it's the first table in this buffer, set up buffer's pointers |
697 | * accordingly; otherwise, add a END=1 link entry to @topa to the current |
698 | * "last" table and adjust the last table pointer to @topa. |
699 | */ |
700 | static void topa_insert_table(struct pt_buffer *buf, struct topa *topa) |
701 | { |
702 | struct topa *last = buf->last; |
703 | |
704 | list_add_tail(new: &topa->list, head: &buf->tables); |
705 | |
706 | if (!buf->first) { |
707 | buf->first = buf->last = buf->cur = topa; |
708 | return; |
709 | } |
710 | |
711 | topa->offset = last->offset + last->size; |
712 | buf->last = topa; |
713 | |
714 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
715 | return; |
716 | |
717 | BUG_ON(last->last != TENTS_PER_PAGE - 1); |
718 | |
719 | TOPA_ENTRY(last, -1)->base = topa_pfn(topa); |
720 | TOPA_ENTRY(last, -1)->end = 1; |
721 | } |
722 | |
723 | /** |
724 | * topa_table_full() - check if a ToPA table is filled up |
725 | * @topa: ToPA table. |
726 | */ |
727 | static bool topa_table_full(struct topa *topa) |
728 | { |
729 | /* single-entry ToPA is a special case */ |
730 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
731 | return !!topa->last; |
732 | |
733 | return topa->last == TENTS_PER_PAGE - 1; |
734 | } |
735 | |
736 | /** |
737 | * topa_insert_pages() - create a list of ToPA tables |
738 | * @buf: PT buffer being initialized. |
739 | * @cpu: CPU on which to allocate. |
740 | * @gfp: Allocation flags. |
741 | * |
742 | * This initializes a list of ToPA tables with entries from |
743 | * the data_pages provided by rb_alloc_aux(). |
744 | * |
745 | * Return: 0 on success or error code. |
746 | */ |
747 | static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp) |
748 | { |
749 | struct topa *topa = buf->last; |
750 | int order = 0; |
751 | struct page *p; |
752 | |
753 | p = virt_to_page(buf->data_pages[buf->nr_pages]); |
754 | if (PagePrivate(page: p)) |
755 | order = page_private(p); |
756 | |
757 | if (topa_table_full(topa)) { |
758 | topa = topa_alloc(cpu, gfp); |
759 | if (!topa) |
760 | return -ENOMEM; |
761 | |
762 | topa_insert_table(buf, topa); |
763 | } |
764 | |
765 | if (topa->z_count == topa->last - 1) { |
766 | if (order == TOPA_ENTRY(topa, topa->last - 1)->size) |
767 | topa->z_count++; |
768 | } |
769 | |
770 | TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; |
771 | TOPA_ENTRY(topa, -1)->size = order; |
772 | if (!buf->snapshot && |
773 | !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { |
774 | TOPA_ENTRY(topa, -1)->intr = 1; |
775 | TOPA_ENTRY(topa, -1)->stop = 1; |
776 | } |
777 | |
778 | topa->last++; |
779 | topa->size += sizes(tsz: order); |
780 | |
781 | buf->nr_pages += 1ul << order; |
782 | |
783 | return 0; |
784 | } |
785 | |
786 | /** |
787 | * pt_topa_dump() - print ToPA tables and their entries |
788 | * @buf: PT buffer. |
789 | */ |
790 | static void pt_topa_dump(struct pt_buffer *buf) |
791 | { |
792 | struct topa *topa; |
793 | |
794 | list_for_each_entry(topa, &buf->tables, list) { |
795 | struct topa_page *tp = topa_to_page(topa); |
796 | int i; |
797 | |
798 | pr_debug("# table @%p, off %llx size %zx\n" , tp->table, |
799 | topa->offset, topa->size); |
800 | for (i = 0; i < TENTS_PER_PAGE; i++) { |
801 | pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n" , |
802 | &tp->table[i], |
803 | (unsigned long)tp->table[i].base << TOPA_SHIFT, |
804 | sizes(tp->table[i].size), |
805 | tp->table[i].end ? 'E' : ' ', |
806 | tp->table[i].intr ? 'I' : ' ', |
807 | tp->table[i].stop ? 'S' : ' ', |
808 | *(u64 *)&tp->table[i]); |
809 | if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && |
810 | tp->table[i].stop) || |
811 | tp->table[i].end) |
812 | break; |
813 | if (!i && topa->z_count) |
814 | i += topa->z_count; |
815 | } |
816 | } |
817 | } |
818 | |
819 | /** |
820 | * pt_buffer_advance() - advance to the next output region |
821 | * @buf: PT buffer. |
822 | * |
823 | * Advance the current pointers in the buffer to the next ToPA entry. |
824 | */ |
825 | static void pt_buffer_advance(struct pt_buffer *buf) |
826 | { |
827 | buf->output_off = 0; |
828 | buf->cur_idx++; |
829 | |
830 | if (buf->cur_idx == buf->cur->last) { |
831 | if (buf->cur == buf->last) |
832 | buf->cur = buf->first; |
833 | else |
834 | buf->cur = list_entry(buf->cur->list.next, struct topa, |
835 | list); |
836 | buf->cur_idx = 0; |
837 | } |
838 | } |
839 | |
840 | /** |
841 | * pt_update_head() - calculate current offsets and sizes |
842 | * @pt: Per-cpu pt context. |
843 | * |
844 | * Update buffer's current write pointer position and data size. |
845 | */ |
846 | static void pt_update_head(struct pt *pt) |
847 | { |
848 | struct pt_buffer *buf = perf_get_aux(handle: &pt->handle); |
849 | u64 topa_idx, base, old; |
850 | |
851 | if (buf->single) { |
852 | local_set(&buf->data_size, buf->output_off); |
853 | return; |
854 | } |
855 | |
856 | /* offset of the first region in this table from the beginning of buf */ |
857 | base = buf->cur->offset + buf->output_off; |
858 | |
859 | /* offset of the current output region within this table */ |
860 | for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++) |
861 | base += TOPA_ENTRY_SIZE(buf->cur, topa_idx); |
862 | |
863 | if (buf->snapshot) { |
864 | local_set(&buf->data_size, base); |
865 | } else { |
866 | old = (local64_xchg(&buf->head, base) & |
867 | ((buf->nr_pages << PAGE_SHIFT) - 1)); |
868 | if (base < old) |
869 | base += buf->nr_pages << PAGE_SHIFT; |
870 | |
871 | local_add(i: base - old, l: &buf->data_size); |
872 | } |
873 | } |
874 | |
875 | /** |
876 | * pt_buffer_region() - obtain current output region's address |
877 | * @buf: PT buffer. |
878 | */ |
879 | static void *pt_buffer_region(struct pt_buffer *buf) |
880 | { |
881 | return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); |
882 | } |
883 | |
884 | /** |
885 | * pt_buffer_region_size() - obtain current output region's size |
886 | * @buf: PT buffer. |
887 | */ |
888 | static size_t pt_buffer_region_size(struct pt_buffer *buf) |
889 | { |
890 | return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx); |
891 | } |
892 | |
893 | /** |
894 | * pt_handle_status() - take care of possible status conditions |
895 | * @pt: Per-cpu pt context. |
896 | */ |
897 | static void pt_handle_status(struct pt *pt) |
898 | { |
899 | struct pt_buffer *buf = perf_get_aux(handle: &pt->handle); |
900 | int advance = 0; |
901 | u64 status; |
902 | |
903 | rdmsrl(MSR_IA32_RTIT_STATUS, status); |
904 | |
905 | if (status & RTIT_STATUS_ERROR) { |
906 | pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n" ); |
907 | pt_topa_dump(buf); |
908 | status &= ~RTIT_STATUS_ERROR; |
909 | } |
910 | |
911 | if (status & RTIT_STATUS_STOPPED) { |
912 | status &= ~RTIT_STATUS_STOPPED; |
913 | |
914 | /* |
915 | * On systems that only do single-entry ToPA, hitting STOP |
916 | * means we are already losing data; need to let the decoder |
917 | * know. |
918 | */ |
919 | if (!buf->single && |
920 | (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || |
921 | buf->output_off == pt_buffer_region_size(buf))) { |
922 | perf_aux_output_flag(handle: &pt->handle, |
923 | PERF_AUX_FLAG_TRUNCATED); |
924 | advance++; |
925 | } |
926 | } |
927 | |
928 | /* |
929 | * Also on single-entry ToPA implementations, interrupt will come |
930 | * before the output reaches its output region's boundary. |
931 | */ |
932 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && |
933 | !buf->snapshot && |
934 | pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) { |
935 | void *head = pt_buffer_region(buf); |
936 | |
937 | /* everything within this margin needs to be zeroed out */ |
938 | memset(head + buf->output_off, 0, |
939 | pt_buffer_region_size(buf) - |
940 | buf->output_off); |
941 | advance++; |
942 | } |
943 | |
944 | if (advance) |
945 | pt_buffer_advance(buf); |
946 | |
947 | wrmsrl(MSR_IA32_RTIT_STATUS, val: status); |
948 | } |
949 | |
950 | /** |
951 | * pt_read_offset() - translate registers into buffer pointers |
952 | * @buf: PT buffer. |
953 | * |
954 | * Set buffer's output pointers from MSR values. |
955 | */ |
956 | static void pt_read_offset(struct pt_buffer *buf) |
957 | { |
958 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
959 | struct topa_page *tp; |
960 | |
961 | if (!buf->single) { |
962 | rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); |
963 | tp = phys_to_virt(address: pt->output_base); |
964 | buf->cur = &tp->topa; |
965 | } |
966 | |
967 | rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); |
968 | /* offset within current output region */ |
969 | buf->output_off = pt->output_mask >> 32; |
970 | /* index of current output region within this table */ |
971 | if (!buf->single) |
972 | buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7; |
973 | } |
974 | |
975 | static struct topa_entry * |
976 | pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) |
977 | { |
978 | struct topa_page *tp; |
979 | struct topa *topa; |
980 | unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0; |
981 | |
982 | /* |
983 | * Indicates a bug in the caller. |
984 | */ |
985 | if (WARN_ON_ONCE(pg >= buf->nr_pages)) |
986 | return NULL; |
987 | |
988 | /* |
989 | * First, find the ToPA table where @pg fits. With high |
990 | * order allocations, there shouldn't be many of these. |
991 | */ |
992 | list_for_each_entry(topa, &buf->tables, list) { |
993 | if (topa->offset + topa->size > pg << PAGE_SHIFT) |
994 | goto found; |
995 | } |
996 | |
997 | /* |
998 | * Hitting this means we have a problem in the ToPA |
999 | * allocation code. |
1000 | */ |
1001 | WARN_ON_ONCE(1); |
1002 | |
1003 | return NULL; |
1004 | |
1005 | found: |
1006 | /* |
1007 | * Indicates a problem in the ToPA allocation code. |
1008 | */ |
1009 | if (WARN_ON_ONCE(topa->last == -1)) |
1010 | return NULL; |
1011 | |
1012 | tp = topa_to_page(topa); |
1013 | cur_pg = PFN_DOWN(topa->offset); |
1014 | if (topa->z_count) { |
1015 | z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1); |
1016 | start_idx = topa->z_count + 1; |
1017 | } |
1018 | |
1019 | /* |
1020 | * Multiple entries at the beginning of the table have the same size, |
1021 | * ideally all of them; if @pg falls there, the search is done. |
1022 | */ |
1023 | if (pg >= cur_pg && pg < cur_pg + z_pg) { |
1024 | idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0); |
1025 | return &tp->table[idx]; |
1026 | } |
1027 | |
1028 | /* |
1029 | * Otherwise, slow path: iterate through the remaining entries. |
1030 | */ |
1031 | for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) { |
1032 | if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg) |
1033 | return &tp->table[idx]; |
1034 | |
1035 | cur_pg += TOPA_ENTRY_PAGES(topa, idx); |
1036 | } |
1037 | |
1038 | /* |
1039 | * Means we couldn't find a ToPA entry in the table that does match. |
1040 | */ |
1041 | WARN_ON_ONCE(1); |
1042 | |
1043 | return NULL; |
1044 | } |
1045 | |
1046 | static struct topa_entry * |
1047 | pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te) |
1048 | { |
1049 | unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1); |
1050 | struct topa_page *tp; |
1051 | struct topa *topa; |
1052 | |
1053 | tp = (struct topa_page *)table; |
1054 | if (tp->table != te) |
1055 | return --te; |
1056 | |
1057 | topa = &tp->topa; |
1058 | if (topa == buf->first) |
1059 | topa = buf->last; |
1060 | else |
1061 | topa = list_prev_entry(topa, list); |
1062 | |
1063 | tp = topa_to_page(topa); |
1064 | |
1065 | return &tp->table[topa->last - 1]; |
1066 | } |
1067 | |
1068 | /** |
1069 | * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer |
1070 | * @buf: PT buffer. |
1071 | * @handle: Current output handle. |
1072 | * |
1073 | * Place INT and STOP marks to prevent overwriting old data that the consumer |
1074 | * hasn't yet collected and waking up the consumer after a certain fraction of |
1075 | * the buffer has filled up. Only needed and sensible for non-snapshot counters. |
1076 | * |
1077 | * This obviously relies on buf::head to figure out buffer markers, so it has |
1078 | * to be called after pt_buffer_reset_offsets() and before the hardware tracing |
1079 | * is enabled. |
1080 | */ |
1081 | static int pt_buffer_reset_markers(struct pt_buffer *buf, |
1082 | struct perf_output_handle *handle) |
1083 | |
1084 | { |
1085 | unsigned long head = local64_read(&buf->head); |
1086 | unsigned long idx, npages, wakeup; |
1087 | |
1088 | if (buf->single) |
1089 | return 0; |
1090 | |
1091 | /* can't stop in the middle of an output region */ |
1092 | if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) { |
1093 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
1094 | return -EINVAL; |
1095 | } |
1096 | |
1097 | |
1098 | /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */ |
1099 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
1100 | return 0; |
1101 | |
1102 | /* clear STOP and INT from current entry */ |
1103 | if (buf->stop_te) { |
1104 | buf->stop_te->stop = 0; |
1105 | buf->stop_te->intr = 0; |
1106 | } |
1107 | |
1108 | if (buf->intr_te) |
1109 | buf->intr_te->intr = 0; |
1110 | |
1111 | /* how many pages till the STOP marker */ |
1112 | npages = handle->size >> PAGE_SHIFT; |
1113 | |
1114 | /* if it's on a page boundary, fill up one more page */ |
1115 | if (!offset_in_page(head + handle->size + 1)) |
1116 | npages++; |
1117 | |
1118 | idx = (head >> PAGE_SHIFT) + npages; |
1119 | idx &= buf->nr_pages - 1; |
1120 | |
1121 | if (idx != buf->stop_pos) { |
1122 | buf->stop_pos = idx; |
1123 | buf->stop_te = pt_topa_entry_for_page(buf, pg: idx); |
1124 | buf->stop_te = pt_topa_prev_entry(buf, te: buf->stop_te); |
1125 | } |
1126 | |
1127 | wakeup = handle->wakeup >> PAGE_SHIFT; |
1128 | |
1129 | /* in the worst case, wake up the consumer one page before hard stop */ |
1130 | idx = (head >> PAGE_SHIFT) + npages - 1; |
1131 | if (idx > wakeup) |
1132 | idx = wakeup; |
1133 | |
1134 | idx &= buf->nr_pages - 1; |
1135 | if (idx != buf->intr_pos) { |
1136 | buf->intr_pos = idx; |
1137 | buf->intr_te = pt_topa_entry_for_page(buf, pg: idx); |
1138 | buf->intr_te = pt_topa_prev_entry(buf, te: buf->intr_te); |
1139 | } |
1140 | |
1141 | buf->stop_te->stop = 1; |
1142 | buf->stop_te->intr = 1; |
1143 | buf->intr_te->intr = 1; |
1144 | |
1145 | return 0; |
1146 | } |
1147 | |
1148 | /** |
1149 | * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head |
1150 | * @buf: PT buffer. |
1151 | * @head: Write pointer (aux_head) from AUX buffer. |
1152 | * |
1153 | * Find the ToPA table and entry corresponding to given @head and set buffer's |
1154 | * "current" pointers accordingly. This is done after we have obtained the |
1155 | * current aux_head position from a successful call to perf_aux_output_begin() |
1156 | * to make sure the hardware is writing to the right place. |
1157 | * |
1158 | * This function modifies buf::{cur,cur_idx,output_off} that will be programmed |
1159 | * into PT msrs when the tracing is enabled and buf::head and buf::data_size, |
1160 | * which are used to determine INT and STOP markers' locations by a subsequent |
1161 | * call to pt_buffer_reset_markers(). |
1162 | */ |
1163 | static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) |
1164 | { |
1165 | struct topa_page *cur_tp; |
1166 | struct topa_entry *te; |
1167 | int pg; |
1168 | |
1169 | if (buf->snapshot) |
1170 | head &= (buf->nr_pages << PAGE_SHIFT) - 1; |
1171 | |
1172 | if (!buf->single) { |
1173 | pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); |
1174 | te = pt_topa_entry_for_page(buf, pg); |
1175 | |
1176 | cur_tp = topa_entry_to_page(te); |
1177 | buf->cur = &cur_tp->topa; |
1178 | buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); |
1179 | buf->output_off = head & (pt_buffer_region_size(buf) - 1); |
1180 | } else { |
1181 | buf->output_off = head; |
1182 | } |
1183 | |
1184 | local64_set(&buf->head, head); |
1185 | local_set(&buf->data_size, 0); |
1186 | } |
1187 | |
1188 | /** |
1189 | * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer |
1190 | * @buf: PT buffer. |
1191 | */ |
1192 | static void pt_buffer_fini_topa(struct pt_buffer *buf) |
1193 | { |
1194 | struct topa *topa, *iter; |
1195 | |
1196 | if (buf->single) |
1197 | return; |
1198 | |
1199 | list_for_each_entry_safe(topa, iter, &buf->tables, list) { |
1200 | /* |
1201 | * right now, this is in free_aux() path only, so |
1202 | * no need to unlink this table from the list |
1203 | */ |
1204 | topa_free(topa); |
1205 | } |
1206 | } |
1207 | |
1208 | /** |
1209 | * pt_buffer_init_topa() - initialize ToPA table for pt buffer |
1210 | * @buf: PT buffer. |
1211 | * @cpu: CPU on which to allocate. |
1212 | * @nr_pages: No. of pages to allocate. |
1213 | * @gfp: Allocation flags. |
1214 | * |
1215 | * Return: 0 on success or error code. |
1216 | */ |
1217 | static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu, |
1218 | unsigned long nr_pages, gfp_t gfp) |
1219 | { |
1220 | struct topa *topa; |
1221 | int err; |
1222 | |
1223 | topa = topa_alloc(cpu, gfp); |
1224 | if (!topa) |
1225 | return -ENOMEM; |
1226 | |
1227 | topa_insert_table(buf, topa); |
1228 | |
1229 | while (buf->nr_pages < nr_pages) { |
1230 | err = topa_insert_pages(buf, cpu, gfp); |
1231 | if (err) { |
1232 | pt_buffer_fini_topa(buf); |
1233 | return -ENOMEM; |
1234 | } |
1235 | } |
1236 | |
1237 | /* link last table to the first one, unless we're double buffering */ |
1238 | if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { |
1239 | TOPA_ENTRY(buf->last, -1)->base = topa_pfn(topa: buf->first); |
1240 | TOPA_ENTRY(buf->last, -1)->end = 1; |
1241 | } |
1242 | |
1243 | pt_topa_dump(buf); |
1244 | return 0; |
1245 | } |
1246 | |
1247 | static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) |
1248 | { |
1249 | struct page *p = virt_to_page(buf->data_pages[0]); |
1250 | int ret = -ENOTSUPP, order = 0; |
1251 | |
1252 | /* |
1253 | * We can use single range output mode |
1254 | * + in snapshot mode, where we don't need interrupts; |
1255 | * + if the hardware supports it; |
1256 | * + if the entire buffer is one contiguous allocation. |
1257 | */ |
1258 | if (!buf->snapshot) |
1259 | goto out; |
1260 | |
1261 | if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output)) |
1262 | goto out; |
1263 | |
1264 | if (PagePrivate(page: p)) |
1265 | order = page_private(p); |
1266 | |
1267 | if (1 << order != nr_pages) |
1268 | goto out; |
1269 | |
1270 | /* |
1271 | * Some processors cannot always support single range for more than |
1272 | * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might |
1273 | * also be affected, so for now rather than trying to keep track of |
1274 | * which ones, just disable it for all. |
1275 | */ |
1276 | if (nr_pages > 1) |
1277 | goto out; |
1278 | |
1279 | buf->single = true; |
1280 | buf->nr_pages = nr_pages; |
1281 | ret = 0; |
1282 | out: |
1283 | return ret; |
1284 | } |
1285 | |
1286 | /** |
1287 | * pt_buffer_setup_aux() - set up topa tables for a PT buffer |
1288 | * @event: Performance event |
1289 | * @pages: Array of pointers to buffer pages passed from perf core. |
1290 | * @nr_pages: Number of pages in the buffer. |
1291 | * @snapshot: If this is a snapshot/overwrite counter. |
1292 | * |
1293 | * This is a pmu::setup_aux callback that sets up ToPA tables and all the |
1294 | * bookkeeping for an AUX buffer. |
1295 | * |
1296 | * Return: Our private PT buffer structure. |
1297 | */ |
1298 | static void * |
1299 | pt_buffer_setup_aux(struct perf_event *event, void **pages, |
1300 | int nr_pages, bool snapshot) |
1301 | { |
1302 | struct pt_buffer *buf; |
1303 | int node, ret, cpu = event->cpu; |
1304 | |
1305 | if (!nr_pages) |
1306 | return NULL; |
1307 | |
1308 | /* |
1309 | * Only support AUX sampling in snapshot mode, where we don't |
1310 | * generate NMIs. |
1311 | */ |
1312 | if (event->attr.aux_sample_size && !snapshot) |
1313 | return NULL; |
1314 | |
1315 | if (cpu == -1) |
1316 | cpu = raw_smp_processor_id(); |
1317 | node = cpu_to_node(cpu); |
1318 | |
1319 | buf = kzalloc_node(size: sizeof(struct pt_buffer), GFP_KERNEL, node); |
1320 | if (!buf) |
1321 | return NULL; |
1322 | |
1323 | buf->snapshot = snapshot; |
1324 | buf->data_pages = pages; |
1325 | buf->stop_pos = -1; |
1326 | buf->intr_pos = -1; |
1327 | |
1328 | INIT_LIST_HEAD(list: &buf->tables); |
1329 | |
1330 | ret = pt_buffer_try_single(buf, nr_pages); |
1331 | if (!ret) |
1332 | return buf; |
1333 | |
1334 | ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL); |
1335 | if (ret) { |
1336 | kfree(objp: buf); |
1337 | return NULL; |
1338 | } |
1339 | |
1340 | return buf; |
1341 | } |
1342 | |
1343 | /** |
1344 | * pt_buffer_free_aux() - perf AUX deallocation path callback |
1345 | * @data: PT buffer. |
1346 | */ |
1347 | static void pt_buffer_free_aux(void *data) |
1348 | { |
1349 | struct pt_buffer *buf = data; |
1350 | |
1351 | pt_buffer_fini_topa(buf); |
1352 | kfree(objp: buf); |
1353 | } |
1354 | |
1355 | static int pt_addr_filters_init(struct perf_event *event) |
1356 | { |
1357 | struct pt_filters *filters; |
1358 | int node = event->cpu == -1 ? -1 : cpu_to_node(cpu: event->cpu); |
1359 | |
1360 | if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) |
1361 | return 0; |
1362 | |
1363 | filters = kzalloc_node(size: sizeof(struct pt_filters), GFP_KERNEL, node); |
1364 | if (!filters) |
1365 | return -ENOMEM; |
1366 | |
1367 | if (event->parent) |
1368 | memcpy(filters, event->parent->hw.addr_filters, |
1369 | sizeof(*filters)); |
1370 | |
1371 | event->hw.addr_filters = filters; |
1372 | |
1373 | return 0; |
1374 | } |
1375 | |
1376 | static void pt_addr_filters_fini(struct perf_event *event) |
1377 | { |
1378 | kfree(objp: event->hw.addr_filters); |
1379 | event->hw.addr_filters = NULL; |
1380 | } |
1381 | |
1382 | #ifdef CONFIG_X86_64 |
1383 | /* Clamp to a canonical address greater-than-or-equal-to the address given */ |
1384 | static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) |
1385 | { |
1386 | return __is_canonical_address(vaddr, vaddr_bits) ? |
1387 | vaddr : |
1388 | -BIT_ULL(vaddr_bits - 1); |
1389 | } |
1390 | |
1391 | /* Clamp to a canonical address less-than-or-equal-to the address given */ |
1392 | static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) |
1393 | { |
1394 | return __is_canonical_address(vaddr, vaddr_bits) ? |
1395 | vaddr : |
1396 | BIT_ULL(vaddr_bits - 1) - 1; |
1397 | } |
1398 | #else |
1399 | #define clamp_to_ge_canonical_addr(x, y) (x) |
1400 | #define clamp_to_le_canonical_addr(x, y) (x) |
1401 | #endif |
1402 | |
1403 | static int pt_event_addr_filters_validate(struct list_head *filters) |
1404 | { |
1405 | struct perf_addr_filter *filter; |
1406 | int range = 0; |
1407 | |
1408 | list_for_each_entry(filter, filters, entry) { |
1409 | /* |
1410 | * PT doesn't support single address triggers and |
1411 | * 'start' filters. |
1412 | */ |
1413 | if (!filter->size || |
1414 | filter->action == PERF_ADDR_FILTER_ACTION_START) |
1415 | return -EOPNOTSUPP; |
1416 | |
1417 | if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) |
1418 | return -EOPNOTSUPP; |
1419 | } |
1420 | |
1421 | return 0; |
1422 | } |
1423 | |
1424 | static void pt_event_addr_filters_sync(struct perf_event *event) |
1425 | { |
1426 | struct perf_addr_filters_head *head = perf_event_addr_filters(event); |
1427 | unsigned long msr_a, msr_b; |
1428 | struct perf_addr_filter_range *fr = event->addr_filter_ranges; |
1429 | struct pt_filters *filters = event->hw.addr_filters; |
1430 | struct perf_addr_filter *filter; |
1431 | int range = 0; |
1432 | |
1433 | if (!filters) |
1434 | return; |
1435 | |
1436 | list_for_each_entry(filter, &head->list, entry) { |
1437 | if (filter->path.dentry && !fr[range].start) { |
1438 | msr_a = msr_b = 0; |
1439 | } else { |
1440 | unsigned long n = fr[range].size - 1; |
1441 | unsigned long a = fr[range].start; |
1442 | unsigned long b; |
1443 | |
1444 | if (a > ULONG_MAX - n) |
1445 | b = ULONG_MAX; |
1446 | else |
1447 | b = a + n; |
1448 | /* |
1449 | * Apply the offset. 64-bit addresses written to the |
1450 | * MSRs must be canonical, but the range can encompass |
1451 | * non-canonical addresses. Since software cannot |
1452 | * execute at non-canonical addresses, adjusting to |
1453 | * canonical addresses does not affect the result of the |
1454 | * address filter. |
1455 | */ |
1456 | msr_a = clamp_to_ge_canonical_addr(vaddr: a, vaddr_bits: boot_cpu_data.x86_virt_bits); |
1457 | msr_b = clamp_to_le_canonical_addr(vaddr: b, vaddr_bits: boot_cpu_data.x86_virt_bits); |
1458 | if (msr_b < msr_a) |
1459 | msr_a = msr_b = 0; |
1460 | } |
1461 | |
1462 | filters->filter[range].msr_a = msr_a; |
1463 | filters->filter[range].msr_b = msr_b; |
1464 | if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER) |
1465 | filters->filter[range].config = 1; |
1466 | else |
1467 | filters->filter[range].config = 2; |
1468 | range++; |
1469 | } |
1470 | |
1471 | filters->nr_filters = range; |
1472 | } |
1473 | |
1474 | /** |
1475 | * intel_pt_interrupt() - PT PMI handler |
1476 | */ |
1477 | void intel_pt_interrupt(void) |
1478 | { |
1479 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1480 | struct pt_buffer *buf; |
1481 | struct perf_event *event = pt->handle.event; |
1482 | |
1483 | /* |
1484 | * There may be a dangling PT bit in the interrupt status register |
1485 | * after PT has been disabled by pt_event_stop(). Make sure we don't |
1486 | * do anything (particularly, re-enable) for this event here. |
1487 | */ |
1488 | if (!READ_ONCE(pt->handle_nmi)) |
1489 | return; |
1490 | |
1491 | if (!event) |
1492 | return; |
1493 | |
1494 | pt_config_stop(event); |
1495 | |
1496 | buf = perf_get_aux(handle: &pt->handle); |
1497 | if (!buf) |
1498 | return; |
1499 | |
1500 | pt_read_offset(buf); |
1501 | |
1502 | pt_handle_status(pt); |
1503 | |
1504 | pt_update_head(pt); |
1505 | |
1506 | perf_aux_output_end(handle: &pt->handle, local_xchg(&buf->data_size, 0)); |
1507 | |
1508 | if (!event->hw.state) { |
1509 | int ret; |
1510 | |
1511 | buf = perf_aux_output_begin(handle: &pt->handle, event); |
1512 | if (!buf) { |
1513 | event->hw.state = PERF_HES_STOPPED; |
1514 | return; |
1515 | } |
1516 | |
1517 | pt_buffer_reset_offsets(buf, head: pt->handle.head); |
1518 | /* snapshot counters don't use PMI, so it's safe */ |
1519 | ret = pt_buffer_reset_markers(buf, handle: &pt->handle); |
1520 | if (ret) { |
1521 | perf_aux_output_end(handle: &pt->handle, size: 0); |
1522 | return; |
1523 | } |
1524 | |
1525 | pt_config_buffer(buf); |
1526 | pt_config_start(event); |
1527 | } |
1528 | } |
1529 | |
1530 | void intel_pt_handle_vmx(int on) |
1531 | { |
1532 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1533 | struct perf_event *event; |
1534 | unsigned long flags; |
1535 | |
1536 | /* PT plays nice with VMX, do nothing */ |
1537 | if (pt_pmu.vmx) |
1538 | return; |
1539 | |
1540 | /* |
1541 | * VMXON will clear RTIT_CTL.TraceEn; we need to make |
1542 | * sure to not try to set it while VMX is on. Disable |
1543 | * interrupts to avoid racing with pmu callbacks; |
1544 | * concurrent PMI should be handled fine. |
1545 | */ |
1546 | local_irq_save(flags); |
1547 | WRITE_ONCE(pt->vmx_on, on); |
1548 | |
1549 | /* |
1550 | * If an AUX transaction is in progress, it will contain |
1551 | * gap(s), so flag it PARTIAL to inform the user. |
1552 | */ |
1553 | event = pt->handle.event; |
1554 | if (event) |
1555 | perf_aux_output_flag(handle: &pt->handle, |
1556 | PERF_AUX_FLAG_PARTIAL); |
1557 | |
1558 | /* Turn PTs back on */ |
1559 | if (!on && event) |
1560 | wrmsrl(MSR_IA32_RTIT_CTL, val: event->hw.config); |
1561 | |
1562 | local_irq_restore(flags); |
1563 | } |
1564 | EXPORT_SYMBOL_GPL(intel_pt_handle_vmx); |
1565 | |
1566 | /* |
1567 | * PMU callbacks |
1568 | */ |
1569 | |
1570 | static void pt_event_start(struct perf_event *event, int mode) |
1571 | { |
1572 | struct hw_perf_event *hwc = &event->hw; |
1573 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1574 | struct pt_buffer *buf; |
1575 | |
1576 | buf = perf_aux_output_begin(handle: &pt->handle, event); |
1577 | if (!buf) |
1578 | goto fail_stop; |
1579 | |
1580 | pt_buffer_reset_offsets(buf, head: pt->handle.head); |
1581 | if (!buf->snapshot) { |
1582 | if (pt_buffer_reset_markers(buf, handle: &pt->handle)) |
1583 | goto fail_end_stop; |
1584 | } |
1585 | |
1586 | WRITE_ONCE(pt->handle_nmi, 1); |
1587 | hwc->state = 0; |
1588 | |
1589 | pt_config_buffer(buf); |
1590 | pt_config(event); |
1591 | |
1592 | return; |
1593 | |
1594 | fail_end_stop: |
1595 | perf_aux_output_end(handle: &pt->handle, size: 0); |
1596 | fail_stop: |
1597 | hwc->state = PERF_HES_STOPPED; |
1598 | } |
1599 | |
1600 | static void pt_event_stop(struct perf_event *event, int mode) |
1601 | { |
1602 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1603 | |
1604 | /* |
1605 | * Protect against the PMI racing with disabling wrmsr, |
1606 | * see comment in intel_pt_interrupt(). |
1607 | */ |
1608 | WRITE_ONCE(pt->handle_nmi, 0); |
1609 | |
1610 | pt_config_stop(event); |
1611 | |
1612 | if (event->hw.state == PERF_HES_STOPPED) |
1613 | return; |
1614 | |
1615 | event->hw.state = PERF_HES_STOPPED; |
1616 | |
1617 | if (mode & PERF_EF_UPDATE) { |
1618 | struct pt_buffer *buf = perf_get_aux(handle: &pt->handle); |
1619 | |
1620 | if (!buf) |
1621 | return; |
1622 | |
1623 | if (WARN_ON_ONCE(pt->handle.event != event)) |
1624 | return; |
1625 | |
1626 | pt_read_offset(buf); |
1627 | |
1628 | pt_handle_status(pt); |
1629 | |
1630 | pt_update_head(pt); |
1631 | |
1632 | if (buf->snapshot) |
1633 | pt->handle.head = |
1634 | local_xchg(&buf->data_size, |
1635 | buf->nr_pages << PAGE_SHIFT); |
1636 | perf_aux_output_end(handle: &pt->handle, local_xchg(&buf->data_size, 0)); |
1637 | } |
1638 | } |
1639 | |
1640 | static long pt_event_snapshot_aux(struct perf_event *event, |
1641 | struct perf_output_handle *handle, |
1642 | unsigned long size) |
1643 | { |
1644 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1645 | struct pt_buffer *buf = perf_get_aux(handle: &pt->handle); |
1646 | unsigned long from = 0, to; |
1647 | long ret; |
1648 | |
1649 | if (WARN_ON_ONCE(!buf)) |
1650 | return 0; |
1651 | |
1652 | /* |
1653 | * Sampling is only allowed on snapshot events; |
1654 | * see pt_buffer_setup_aux(). |
1655 | */ |
1656 | if (WARN_ON_ONCE(!buf->snapshot)) |
1657 | return 0; |
1658 | |
1659 | /* |
1660 | * Here, handle_nmi tells us if the tracing is on |
1661 | */ |
1662 | if (READ_ONCE(pt->handle_nmi)) |
1663 | pt_config_stop(event); |
1664 | |
1665 | pt_read_offset(buf); |
1666 | pt_update_head(pt); |
1667 | |
1668 | to = local_read(&buf->data_size); |
1669 | if (to < size) |
1670 | from = buf->nr_pages << PAGE_SHIFT; |
1671 | from += to - size; |
1672 | |
1673 | ret = perf_output_copy_aux(aux_handle: &pt->handle, handle, from, to); |
1674 | |
1675 | /* |
1676 | * If the tracing was on when we turned up, restart it. |
1677 | * Compiler barrier not needed as we couldn't have been |
1678 | * preempted by anything that touches pt->handle_nmi. |
1679 | */ |
1680 | if (pt->handle_nmi) |
1681 | pt_config_start(event); |
1682 | |
1683 | return ret; |
1684 | } |
1685 | |
1686 | static void pt_event_del(struct perf_event *event, int mode) |
1687 | { |
1688 | pt_event_stop(event, PERF_EF_UPDATE); |
1689 | } |
1690 | |
1691 | static int pt_event_add(struct perf_event *event, int mode) |
1692 | { |
1693 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1694 | struct hw_perf_event *hwc = &event->hw; |
1695 | int ret = -EBUSY; |
1696 | |
1697 | if (pt->handle.event) |
1698 | goto fail; |
1699 | |
1700 | if (mode & PERF_EF_START) { |
1701 | pt_event_start(event, mode: 0); |
1702 | ret = -EINVAL; |
1703 | if (hwc->state == PERF_HES_STOPPED) |
1704 | goto fail; |
1705 | } else { |
1706 | hwc->state = PERF_HES_STOPPED; |
1707 | } |
1708 | |
1709 | ret = 0; |
1710 | fail: |
1711 | |
1712 | return ret; |
1713 | } |
1714 | |
1715 | static void pt_event_read(struct perf_event *event) |
1716 | { |
1717 | } |
1718 | |
1719 | static void pt_event_destroy(struct perf_event *event) |
1720 | { |
1721 | pt_addr_filters_fini(event); |
1722 | x86_del_exclusive(what: x86_lbr_exclusive_pt); |
1723 | } |
1724 | |
1725 | static int pt_event_init(struct perf_event *event) |
1726 | { |
1727 | if (event->attr.type != pt_pmu.pmu.type) |
1728 | return -ENOENT; |
1729 | |
1730 | if (!pt_event_valid(event)) |
1731 | return -EINVAL; |
1732 | |
1733 | if (x86_add_exclusive(what: x86_lbr_exclusive_pt)) |
1734 | return -EBUSY; |
1735 | |
1736 | if (pt_addr_filters_init(event)) { |
1737 | x86_del_exclusive(what: x86_lbr_exclusive_pt); |
1738 | return -ENOMEM; |
1739 | } |
1740 | |
1741 | event->destroy = pt_event_destroy; |
1742 | |
1743 | return 0; |
1744 | } |
1745 | |
1746 | void cpu_emergency_stop_pt(void) |
1747 | { |
1748 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
1749 | |
1750 | if (pt->handle.event) |
1751 | pt_event_stop(event: pt->handle.event, PERF_EF_UPDATE); |
1752 | } |
1753 | |
1754 | int is_intel_pt_event(struct perf_event *event) |
1755 | { |
1756 | return event->pmu == &pt_pmu.pmu; |
1757 | } |
1758 | |
1759 | static __init int pt_init(void) |
1760 | { |
1761 | int ret, cpu, prior_warn = 0; |
1762 | |
1763 | BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE); |
1764 | |
1765 | if (!boot_cpu_has(X86_FEATURE_INTEL_PT)) |
1766 | return -ENODEV; |
1767 | |
1768 | cpus_read_lock(); |
1769 | for_each_online_cpu(cpu) { |
1770 | u64 ctl; |
1771 | |
1772 | ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, q: &ctl); |
1773 | if (!ret && (ctl & RTIT_CTL_TRACEEN)) |
1774 | prior_warn++; |
1775 | } |
1776 | cpus_read_unlock(); |
1777 | |
1778 | if (prior_warn) { |
1779 | x86_add_exclusive(what: x86_lbr_exclusive_pt); |
1780 | pr_warn("PT is enabled at boot time, doing nothing\n" ); |
1781 | |
1782 | return -EBUSY; |
1783 | } |
1784 | |
1785 | ret = pt_pmu_hw_init(); |
1786 | if (ret) |
1787 | return ret; |
1788 | |
1789 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) { |
1790 | pr_warn("ToPA output is not supported on this CPU\n" ); |
1791 | return -ENODEV; |
1792 | } |
1793 | |
1794 | if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) |
1795 | pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; |
1796 | |
1797 | pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; |
1798 | pt_pmu.pmu.attr_groups = pt_attr_groups; |
1799 | pt_pmu.pmu.task_ctx_nr = perf_sw_context; |
1800 | pt_pmu.pmu.event_init = pt_event_init; |
1801 | pt_pmu.pmu.add = pt_event_add; |
1802 | pt_pmu.pmu.del = pt_event_del; |
1803 | pt_pmu.pmu.start = pt_event_start; |
1804 | pt_pmu.pmu.stop = pt_event_stop; |
1805 | pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux; |
1806 | pt_pmu.pmu.read = pt_event_read; |
1807 | pt_pmu.pmu.setup_aux = pt_buffer_setup_aux; |
1808 | pt_pmu.pmu.free_aux = pt_buffer_free_aux; |
1809 | pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync; |
1810 | pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate; |
1811 | pt_pmu.pmu.nr_addr_filters = |
1812 | intel_pt_validate_hw_cap(PT_CAP_num_address_ranges); |
1813 | |
1814 | ret = perf_pmu_register(pmu: &pt_pmu.pmu, name: "intel_pt" , type: -1); |
1815 | |
1816 | return ret; |
1817 | } |
1818 | arch_initcall(pt_init); |
1819 | |