1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * intel_pt.c: Intel Processor Trace support |
4 | * Copyright (c) 2013-2015, Intel Corporation. |
5 | */ |
6 | |
7 | #include <inttypes.h> |
8 | #include <linux/perf_event.h> |
9 | #include <stdio.h> |
10 | #include <stdbool.h> |
11 | #include <errno.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/string.h> |
14 | #include <linux/types.h> |
15 | #include <linux/zalloc.h> |
16 | |
17 | #include "session.h" |
18 | #include "machine.h" |
19 | #include "memswap.h" |
20 | #include "sort.h" |
21 | #include "tool.h" |
22 | #include "event.h" |
23 | #include "evlist.h" |
24 | #include "evsel.h" |
25 | #include "map.h" |
26 | #include "color.h" |
27 | #include "thread.h" |
28 | #include "thread-stack.h" |
29 | #include "symbol.h" |
30 | #include "callchain.h" |
31 | #include "dso.h" |
32 | #include "debug.h" |
33 | #include "auxtrace.h" |
34 | #include "tsc.h" |
35 | #include "intel-pt.h" |
36 | #include "config.h" |
37 | #include "util/perf_api_probe.h" |
38 | #include "util/synthetic-events.h" |
39 | #include "time-utils.h" |
40 | |
41 | #include "../arch/x86/include/uapi/asm/perf_regs.h" |
42 | |
43 | #include "intel-pt-decoder/intel-pt-log.h" |
44 | #include "intel-pt-decoder/intel-pt-decoder.h" |
45 | #include "intel-pt-decoder/intel-pt-insn-decoder.h" |
46 | #include "intel-pt-decoder/intel-pt-pkt-decoder.h" |
47 | |
48 | #define MAX_TIMESTAMP (~0ULL) |
49 | |
50 | #define INTEL_PT_CFG_PASS_THRU BIT_ULL(0) |
51 | #define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4) |
52 | #define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13) |
53 | #define INTEL_PT_CFG_EVT_EN BIT_ULL(31) |
54 | #define INTEL_PT_CFG_TNT_DIS BIT_ULL(55) |
55 | |
56 | struct range { |
57 | u64 start; |
58 | u64 end; |
59 | }; |
60 | |
61 | struct intel_pt { |
62 | struct auxtrace auxtrace; |
63 | struct auxtrace_queues queues; |
64 | struct auxtrace_heap heap; |
65 | u32 auxtrace_type; |
66 | struct perf_session *session; |
67 | struct machine *machine; |
68 | struct evsel *switch_evsel; |
69 | struct thread *unknown_thread; |
70 | bool timeless_decoding; |
71 | bool sampling_mode; |
72 | bool snapshot_mode; |
73 | bool per_cpu_mmaps; |
74 | bool have_tsc; |
75 | bool data_queued; |
76 | bool est_tsc; |
77 | bool sync_switch; |
78 | bool sync_switch_not_supported; |
79 | bool mispred_all; |
80 | bool use_thread_stack; |
81 | bool callstack; |
82 | bool cap_event_trace; |
83 | bool have_guest_sideband; |
84 | unsigned int br_stack_sz; |
85 | unsigned int br_stack_sz_plus; |
86 | int have_sched_switch; |
87 | u32 pmu_type; |
88 | u64 kernel_start; |
89 | u64 switch_ip; |
90 | u64 ptss_ip; |
91 | u64 first_timestamp; |
92 | |
93 | struct perf_tsc_conversion tc; |
94 | bool cap_user_time_zero; |
95 | |
96 | struct itrace_synth_opts synth_opts; |
97 | |
98 | bool sample_instructions; |
99 | u64 instructions_sample_type; |
100 | u64 instructions_id; |
101 | |
102 | bool sample_cycles; |
103 | u64 cycles_sample_type; |
104 | u64 cycles_id; |
105 | |
106 | bool sample_branches; |
107 | u32 branches_filter; |
108 | u64 branches_sample_type; |
109 | u64 branches_id; |
110 | |
111 | bool sample_transactions; |
112 | u64 transactions_sample_type; |
113 | u64 transactions_id; |
114 | |
115 | bool sample_ptwrites; |
116 | u64 ptwrites_sample_type; |
117 | u64 ptwrites_id; |
118 | |
119 | bool sample_pwr_events; |
120 | u64 pwr_events_sample_type; |
121 | u64 mwait_id; |
122 | u64 pwre_id; |
123 | u64 exstop_id; |
124 | u64 pwrx_id; |
125 | u64 cbr_id; |
126 | u64 psb_id; |
127 | |
128 | bool single_pebs; |
129 | bool sample_pebs; |
130 | struct evsel *pebs_evsel; |
131 | |
132 | u64 evt_sample_type; |
133 | u64 evt_id; |
134 | |
135 | u64 iflag_chg_sample_type; |
136 | u64 iflag_chg_id; |
137 | |
138 | u64 tsc_bit; |
139 | u64 mtc_bit; |
140 | u64 mtc_freq_bits; |
141 | u32 tsc_ctc_ratio_n; |
142 | u32 tsc_ctc_ratio_d; |
143 | u64 cyc_bit; |
144 | u64 noretcomp_bit; |
145 | unsigned max_non_turbo_ratio; |
146 | unsigned cbr2khz; |
147 | int max_loops; |
148 | |
149 | unsigned long num_events; |
150 | |
151 | char *filter; |
152 | struct addr_filters filts; |
153 | |
154 | struct range *time_ranges; |
155 | unsigned int range_cnt; |
156 | |
157 | struct ip_callchain *chain; |
158 | struct branch_stack *br_stack; |
159 | |
160 | u64 dflt_tsc_offset; |
161 | struct rb_root vmcs_info; |
162 | }; |
163 | |
164 | enum switch_state { |
165 | INTEL_PT_SS_NOT_TRACING, |
166 | INTEL_PT_SS_UNKNOWN, |
167 | INTEL_PT_SS_TRACING, |
168 | INTEL_PT_SS_EXPECTING_SWITCH_EVENT, |
169 | INTEL_PT_SS_EXPECTING_SWITCH_IP, |
170 | }; |
171 | |
172 | /* applicable_counters is 64-bits */ |
173 | #define INTEL_PT_MAX_PEBS 64 |
174 | |
175 | struct intel_pt_pebs_event { |
176 | struct evsel *evsel; |
177 | u64 id; |
178 | }; |
179 | |
180 | struct intel_pt_queue { |
181 | struct intel_pt *pt; |
182 | unsigned int queue_nr; |
183 | struct auxtrace_buffer *buffer; |
184 | struct auxtrace_buffer *old_buffer; |
185 | void *decoder; |
186 | const struct intel_pt_state *state; |
187 | struct ip_callchain *chain; |
188 | struct branch_stack *last_branch; |
189 | union perf_event *event_buf; |
190 | bool on_heap; |
191 | bool stop; |
192 | bool step_through_buffers; |
193 | bool use_buffer_pid_tid; |
194 | bool sync_switch; |
195 | bool sample_ipc; |
196 | pid_t pid, tid; |
197 | int cpu; |
198 | int switch_state; |
199 | pid_t next_tid; |
200 | struct thread *thread; |
201 | struct machine *guest_machine; |
202 | struct thread *guest_thread; |
203 | struct thread *unknown_guest_thread; |
204 | pid_t guest_machine_pid; |
205 | pid_t guest_pid; |
206 | pid_t guest_tid; |
207 | int vcpu; |
208 | bool exclude_kernel; |
209 | bool have_sample; |
210 | u64 time; |
211 | u64 timestamp; |
212 | u64 sel_timestamp; |
213 | bool sel_start; |
214 | unsigned int sel_idx; |
215 | u32 flags; |
216 | u16 insn_len; |
217 | u64 last_insn_cnt; |
218 | u64 ipc_insn_cnt; |
219 | u64 ipc_cyc_cnt; |
220 | u64 last_in_insn_cnt; |
221 | u64 last_in_cyc_cnt; |
222 | u64 last_cy_insn_cnt; |
223 | u64 last_cy_cyc_cnt; |
224 | u64 last_br_insn_cnt; |
225 | u64 last_br_cyc_cnt; |
226 | unsigned int cbr_seen; |
227 | char insn[INTEL_PT_INSN_BUF_SZ]; |
228 | struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS]; |
229 | }; |
230 | |
231 | static void intel_pt_dump(struct intel_pt *pt __maybe_unused, |
232 | unsigned char *buf, size_t len) |
233 | { |
234 | struct intel_pt_pkt packet; |
235 | size_t pos = 0; |
236 | int ret, pkt_len, i; |
237 | char desc[INTEL_PT_PKT_DESC_MAX]; |
238 | const char *color = PERF_COLOR_BLUE; |
239 | enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX; |
240 | |
241 | color_fprintf(stdout, color, |
242 | ". ... Intel Processor Trace data: size %zu bytes\n" , |
243 | len); |
244 | |
245 | while (len) { |
246 | ret = intel_pt_get_packet(buf, len, packet: &packet, ctx: &ctx); |
247 | if (ret > 0) |
248 | pkt_len = ret; |
249 | else |
250 | pkt_len = 1; |
251 | printf("." ); |
252 | color_fprintf(stdout, color, " %08x: " , pos); |
253 | for (i = 0; i < pkt_len; i++) |
254 | color_fprintf(stdout, color, " %02x" , buf[i]); |
255 | for (; i < 16; i++) |
256 | color_fprintf(stdout, color, " " ); |
257 | if (ret > 0) { |
258 | ret = intel_pt_pkt_desc(packet: &packet, buf: desc, |
259 | INTEL_PT_PKT_DESC_MAX); |
260 | if (ret > 0) |
261 | color_fprintf(stdout, color, " %s\n" , desc); |
262 | } else { |
263 | color_fprintf(stdout, color, " Bad packet!\n" ); |
264 | } |
265 | pos += pkt_len; |
266 | buf += pkt_len; |
267 | len -= pkt_len; |
268 | } |
269 | } |
270 | |
271 | static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, |
272 | size_t len) |
273 | { |
274 | printf(".\n" ); |
275 | intel_pt_dump(pt, buf, len); |
276 | } |
277 | |
278 | static void intel_pt_log_event(union perf_event *event) |
279 | { |
280 | FILE *f = intel_pt_log_fp(); |
281 | |
282 | if (!intel_pt_enable_logging || !f) |
283 | return; |
284 | |
285 | perf_event__fprintf(event, NULL, f); |
286 | } |
287 | |
288 | static void intel_pt_dump_sample(struct perf_session *session, |
289 | struct perf_sample *sample) |
290 | { |
291 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
292 | auxtrace); |
293 | |
294 | printf("\n" ); |
295 | intel_pt_dump(pt, buf: sample->aux_sample.data, len: sample->aux_sample.size); |
296 | } |
297 | |
298 | static bool intel_pt_log_events(struct intel_pt *pt, u64 tm) |
299 | { |
300 | struct perf_time_interval *range = pt->synth_opts.ptime_range; |
301 | int n = pt->synth_opts.range_num; |
302 | |
303 | if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) |
304 | return true; |
305 | |
306 | if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) |
307 | return false; |
308 | |
309 | /* perf_time__ranges_skip_sample does not work if time is zero */ |
310 | if (!tm) |
311 | tm = 1; |
312 | |
313 | return !n || !perf_time__ranges_skip_sample(ptime_buf: range, num: n, timestamp: tm); |
314 | } |
315 | |
316 | static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root, |
317 | u64 vmcs, |
318 | u64 dflt_tsc_offset) |
319 | { |
320 | struct rb_node **p = &rb_root->rb_node; |
321 | struct rb_node *parent = NULL; |
322 | struct intel_pt_vmcs_info *v; |
323 | |
324 | while (*p) { |
325 | parent = *p; |
326 | v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node); |
327 | |
328 | if (v->vmcs == vmcs) |
329 | return v; |
330 | |
331 | if (vmcs < v->vmcs) |
332 | p = &(*p)->rb_left; |
333 | else |
334 | p = &(*p)->rb_right; |
335 | } |
336 | |
337 | v = zalloc(sizeof(*v)); |
338 | if (v) { |
339 | v->vmcs = vmcs; |
340 | v->tsc_offset = dflt_tsc_offset; |
341 | v->reliable = dflt_tsc_offset; |
342 | |
343 | rb_link_node(node: &v->rb_node, parent, rb_link: p); |
344 | rb_insert_color(&v->rb_node, rb_root); |
345 | } |
346 | |
347 | return v; |
348 | } |
349 | |
350 | static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs) |
351 | { |
352 | struct intel_pt_queue *ptq = data; |
353 | struct intel_pt *pt = ptq->pt; |
354 | |
355 | if (!vmcs && !pt->dflt_tsc_offset) |
356 | return NULL; |
357 | |
358 | return intel_pt_findnew_vmcs(rb_root: &pt->vmcs_info, vmcs, dflt_tsc_offset: pt->dflt_tsc_offset); |
359 | } |
360 | |
361 | static void intel_pt_free_vmcs_info(struct intel_pt *pt) |
362 | { |
363 | struct intel_pt_vmcs_info *v; |
364 | struct rb_node *n; |
365 | |
366 | n = rb_first(&pt->vmcs_info); |
367 | while (n) { |
368 | v = rb_entry(n, struct intel_pt_vmcs_info, rb_node); |
369 | n = rb_next(n); |
370 | rb_erase(&v->rb_node, &pt->vmcs_info); |
371 | free(v); |
372 | } |
373 | } |
374 | |
375 | static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, |
376 | struct auxtrace_buffer *b) |
377 | { |
378 | bool consecutive = false; |
379 | void *start; |
380 | |
381 | start = intel_pt_find_overlap(buf_a: a->data, len_a: a->size, buf_b: b->data, len_b: b->size, |
382 | have_tsc: pt->have_tsc, consecutive: &consecutive, |
383 | ooo_tsc: pt->synth_opts.vm_time_correlation); |
384 | if (!start) |
385 | return -EINVAL; |
386 | /* |
387 | * In the case of vm_time_correlation, the overlap might contain TSC |
388 | * packets that will not be fixed, and that will then no longer work for |
389 | * overlap detection. Avoid that by zeroing out the overlap. |
390 | */ |
391 | if (pt->synth_opts.vm_time_correlation) |
392 | memset(b->data, 0, start - b->data); |
393 | b->use_size = b->data + b->size - start; |
394 | b->use_data = start; |
395 | if (b->use_size && consecutive) |
396 | b->consecutive = true; |
397 | return 0; |
398 | } |
399 | |
400 | static int intel_pt_get_buffer(struct intel_pt_queue *ptq, |
401 | struct auxtrace_buffer *buffer, |
402 | struct auxtrace_buffer *old_buffer, |
403 | struct intel_pt_buffer *b) |
404 | { |
405 | bool might_overlap; |
406 | |
407 | if (!buffer->data) { |
408 | int fd = perf_data__fd(data: ptq->pt->session->data); |
409 | |
410 | buffer->data = auxtrace_buffer__get_data(buffer, fd); |
411 | if (!buffer->data) |
412 | return -ENOMEM; |
413 | } |
414 | |
415 | might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; |
416 | if (might_overlap && !buffer->consecutive && old_buffer && |
417 | intel_pt_do_fix_overlap(pt: ptq->pt, a: old_buffer, b: buffer)) |
418 | return -ENOMEM; |
419 | |
420 | if (buffer->use_data) { |
421 | b->len = buffer->use_size; |
422 | b->buf = buffer->use_data; |
423 | } else { |
424 | b->len = buffer->size; |
425 | b->buf = buffer->data; |
426 | } |
427 | b->ref_timestamp = buffer->reference; |
428 | |
429 | if (!old_buffer || (might_overlap && !buffer->consecutive)) { |
430 | b->consecutive = false; |
431 | b->trace_nr = buffer->buffer_nr + 1; |
432 | } else { |
433 | b->consecutive = true; |
434 | } |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | /* Do not drop buffers with references - refer intel_pt_get_trace() */ |
440 | static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq, |
441 | struct auxtrace_buffer *buffer) |
442 | { |
443 | if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer) |
444 | return; |
445 | |
446 | auxtrace_buffer__drop_data(buffer); |
447 | } |
448 | |
449 | /* Must be serialized with respect to intel_pt_get_trace() */ |
450 | static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb, |
451 | void *cb_data) |
452 | { |
453 | struct intel_pt_queue *ptq = data; |
454 | struct auxtrace_buffer *buffer = ptq->buffer; |
455 | struct auxtrace_buffer *old_buffer = ptq->old_buffer; |
456 | struct auxtrace_queue *queue; |
457 | int err = 0; |
458 | |
459 | queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; |
460 | |
461 | while (1) { |
462 | struct intel_pt_buffer b = { .len = 0 }; |
463 | |
464 | buffer = auxtrace_buffer__next(queue, buffer); |
465 | if (!buffer) |
466 | break; |
467 | |
468 | err = intel_pt_get_buffer(ptq, buffer, old_buffer, b: &b); |
469 | if (err) |
470 | break; |
471 | |
472 | if (b.len) { |
473 | intel_pt_lookahead_drop_buffer(ptq, buffer: old_buffer); |
474 | old_buffer = buffer; |
475 | } else { |
476 | intel_pt_lookahead_drop_buffer(ptq, buffer); |
477 | continue; |
478 | } |
479 | |
480 | err = cb(&b, cb_data); |
481 | if (err) |
482 | break; |
483 | } |
484 | |
485 | if (buffer != old_buffer) |
486 | intel_pt_lookahead_drop_buffer(ptq, buffer); |
487 | intel_pt_lookahead_drop_buffer(ptq, buffer: old_buffer); |
488 | |
489 | return err; |
490 | } |
491 | |
492 | /* |
493 | * This function assumes data is processed sequentially only. |
494 | * Must be serialized with respect to intel_pt_lookahead() |
495 | */ |
496 | static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) |
497 | { |
498 | struct intel_pt_queue *ptq = data; |
499 | struct auxtrace_buffer *buffer = ptq->buffer; |
500 | struct auxtrace_buffer *old_buffer = ptq->old_buffer; |
501 | struct auxtrace_queue *queue; |
502 | int err; |
503 | |
504 | if (ptq->stop) { |
505 | b->len = 0; |
506 | return 0; |
507 | } |
508 | |
509 | queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; |
510 | |
511 | buffer = auxtrace_buffer__next(queue, buffer); |
512 | if (!buffer) { |
513 | if (old_buffer) |
514 | auxtrace_buffer__drop_data(old_buffer); |
515 | b->len = 0; |
516 | return 0; |
517 | } |
518 | |
519 | ptq->buffer = buffer; |
520 | |
521 | err = intel_pt_get_buffer(ptq, buffer, old_buffer, b); |
522 | if (err) |
523 | return err; |
524 | |
525 | if (ptq->step_through_buffers) |
526 | ptq->stop = true; |
527 | |
528 | if (b->len) { |
529 | if (old_buffer) |
530 | auxtrace_buffer__drop_data(old_buffer); |
531 | ptq->old_buffer = buffer; |
532 | } else { |
533 | auxtrace_buffer__drop_data(buffer); |
534 | return intel_pt_get_trace(b, data); |
535 | } |
536 | |
537 | return 0; |
538 | } |
539 | |
540 | struct intel_pt_cache_entry { |
541 | struct auxtrace_cache_entry entry; |
542 | u64 insn_cnt; |
543 | u64 byte_cnt; |
544 | enum intel_pt_insn_op op; |
545 | enum intel_pt_insn_branch branch; |
546 | bool emulated_ptwrite; |
547 | int length; |
548 | int32_t rel; |
549 | char insn[INTEL_PT_INSN_BUF_SZ]; |
550 | }; |
551 | |
552 | static int intel_pt_config_div(const char *var, const char *value, void *data) |
553 | { |
554 | int *d = data; |
555 | long val; |
556 | |
557 | if (!strcmp(var, "intel-pt.cache-divisor" )) { |
558 | val = strtol(value, NULL, 0); |
559 | if (val > 0 && val <= INT_MAX) |
560 | *d = val; |
561 | } |
562 | |
563 | return 0; |
564 | } |
565 | |
566 | static int intel_pt_cache_divisor(void) |
567 | { |
568 | static int d; |
569 | |
570 | if (d) |
571 | return d; |
572 | |
573 | perf_config(fn: intel_pt_config_div, &d); |
574 | |
575 | if (!d) |
576 | d = 64; |
577 | |
578 | return d; |
579 | } |
580 | |
581 | static unsigned int intel_pt_cache_size(struct dso *dso, |
582 | struct machine *machine) |
583 | { |
584 | off_t size; |
585 | |
586 | size = dso__data_size(dso, machine); |
587 | size /= intel_pt_cache_divisor(); |
588 | if (size < 1000) |
589 | return 10; |
590 | if (size > (1 << 21)) |
591 | return 21; |
592 | return 32 - __builtin_clz(size); |
593 | } |
594 | |
595 | static struct auxtrace_cache *intel_pt_cache(struct dso *dso, |
596 | struct machine *machine) |
597 | { |
598 | struct auxtrace_cache *c; |
599 | unsigned int bits; |
600 | |
601 | if (dso->auxtrace_cache) |
602 | return dso->auxtrace_cache; |
603 | |
604 | bits = intel_pt_cache_size(dso, machine); |
605 | |
606 | /* Ignoring cache creation failure */ |
607 | c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); |
608 | |
609 | dso->auxtrace_cache = c; |
610 | |
611 | return c; |
612 | } |
613 | |
614 | static int intel_pt_cache_add(struct dso *dso, struct machine *machine, |
615 | u64 offset, u64 insn_cnt, u64 byte_cnt, |
616 | struct intel_pt_insn *intel_pt_insn) |
617 | { |
618 | struct auxtrace_cache *c = intel_pt_cache(dso, machine); |
619 | struct intel_pt_cache_entry *e; |
620 | int err; |
621 | |
622 | if (!c) |
623 | return -ENOMEM; |
624 | |
625 | e = auxtrace_cache__alloc_entry(c); |
626 | if (!e) |
627 | return -ENOMEM; |
628 | |
629 | e->insn_cnt = insn_cnt; |
630 | e->byte_cnt = byte_cnt; |
631 | e->op = intel_pt_insn->op; |
632 | e->branch = intel_pt_insn->branch; |
633 | e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite; |
634 | e->length = intel_pt_insn->length; |
635 | e->rel = intel_pt_insn->rel; |
636 | memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); |
637 | |
638 | err = auxtrace_cache__add(c, offset, &e->entry); |
639 | if (err) |
640 | auxtrace_cache__free_entry(c, e); |
641 | |
642 | return err; |
643 | } |
644 | |
645 | static struct intel_pt_cache_entry * |
646 | intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) |
647 | { |
648 | struct auxtrace_cache *c = intel_pt_cache(dso, machine); |
649 | |
650 | if (!c) |
651 | return NULL; |
652 | |
653 | return auxtrace_cache__lookup(dso->auxtrace_cache, offset); |
654 | } |
655 | |
656 | static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, |
657 | u64 offset) |
658 | { |
659 | struct auxtrace_cache *c = intel_pt_cache(dso, machine); |
660 | |
661 | if (!c) |
662 | return; |
663 | |
664 | auxtrace_cache__remove(dso->auxtrace_cache, offset); |
665 | } |
666 | |
667 | static inline bool intel_pt_guest_kernel_ip(uint64_t ip) |
668 | { |
669 | /* Assumes 64-bit kernel */ |
670 | return ip & (1ULL << 63); |
671 | } |
672 | |
673 | static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr) |
674 | { |
675 | if (nr) { |
676 | return intel_pt_guest_kernel_ip(ip) ? |
677 | PERF_RECORD_MISC_GUEST_KERNEL : |
678 | PERF_RECORD_MISC_GUEST_USER; |
679 | } |
680 | |
681 | return ip >= ptq->pt->kernel_start ? |
682 | PERF_RECORD_MISC_KERNEL : |
683 | PERF_RECORD_MISC_USER; |
684 | } |
685 | |
686 | static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip) |
687 | { |
688 | /* No support for non-zero CS base */ |
689 | if (from_ip) |
690 | return intel_pt_nr_cpumode(ptq, ip: from_ip, nr: ptq->state->from_nr); |
691 | return intel_pt_nr_cpumode(ptq, ip: to_ip, nr: ptq->state->to_nr); |
692 | } |
693 | |
694 | static int intel_pt_get_guest(struct intel_pt_queue *ptq) |
695 | { |
696 | struct machines *machines = &ptq->pt->session->machines; |
697 | struct machine *machine; |
698 | pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid; |
699 | |
700 | if (ptq->guest_machine && pid == ptq->guest_machine->pid) |
701 | return 0; |
702 | |
703 | ptq->guest_machine = NULL; |
704 | thread__zput(ptq->unknown_guest_thread); |
705 | |
706 | if (symbol_conf.guest_code) { |
707 | thread__zput(ptq->guest_thread); |
708 | ptq->guest_thread = machines__findnew_guest_code(machines, pid); |
709 | } |
710 | |
711 | machine = machines__find_guest(machines, pid); |
712 | if (!machine) |
713 | return -1; |
714 | |
715 | ptq->unknown_guest_thread = machine__idle_thread(machine); |
716 | if (!ptq->unknown_guest_thread) |
717 | return -1; |
718 | |
719 | ptq->guest_machine = machine; |
720 | |
721 | return 0; |
722 | } |
723 | |
724 | static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn) |
725 | { |
726 | return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL; |
727 | } |
728 | |
729 | #define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite " |
730 | #define PTWRITE_MAGIC_LEN 16 |
731 | |
732 | static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset) |
733 | { |
734 | unsigned char buf[PTWRITE_MAGIC_LEN]; |
735 | ssize_t len; |
736 | |
737 | len = dso__data_read_offset(dso, machine, offset, data: buf, PTWRITE_MAGIC_LEN); |
738 | if (len == PTWRITE_MAGIC_LEN && !memcmp(p: buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) { |
739 | intel_pt_log("Emulated ptwrite signature found\n" ); |
740 | return true; |
741 | } |
742 | intel_pt_log("Emulated ptwrite signature not found\n" ); |
743 | return false; |
744 | } |
745 | |
746 | static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, |
747 | uint64_t *insn_cnt_ptr, uint64_t *ip, |
748 | uint64_t to_ip, uint64_t max_insn_cnt, |
749 | void *data) |
750 | { |
751 | struct intel_pt_queue *ptq = data; |
752 | struct machine *machine = ptq->pt->machine; |
753 | struct thread *thread; |
754 | struct addr_location al; |
755 | unsigned char buf[INTEL_PT_INSN_BUF_SZ]; |
756 | ssize_t len; |
757 | int x86_64, ret = 0; |
758 | u8 cpumode; |
759 | u64 offset, start_offset, start_ip; |
760 | u64 insn_cnt = 0; |
761 | bool one_map = true; |
762 | bool nr; |
763 | |
764 | |
765 | addr_location__init(al: &al); |
766 | intel_pt_insn->length = 0; |
767 | |
768 | if (to_ip && *ip == to_ip) |
769 | goto out_no_cache; |
770 | |
771 | nr = ptq->state->to_nr; |
772 | cpumode = intel_pt_nr_cpumode(ptq, ip: *ip, nr); |
773 | |
774 | if (nr) { |
775 | if (ptq->pt->have_guest_sideband) { |
776 | if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) { |
777 | intel_pt_log("ERROR: guest sideband but no guest machine\n" ); |
778 | ret = -EINVAL; |
779 | goto out_ret; |
780 | } |
781 | } else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) || |
782 | intel_pt_get_guest(ptq)) { |
783 | intel_pt_log("ERROR: no guest machine\n" ); |
784 | ret = -EINVAL; |
785 | goto out_ret; |
786 | } |
787 | machine = ptq->guest_machine; |
788 | thread = ptq->guest_thread; |
789 | if (!thread) { |
790 | if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) { |
791 | intel_pt_log("ERROR: no guest thread\n" ); |
792 | ret = -EINVAL; |
793 | goto out_ret; |
794 | } |
795 | thread = ptq->unknown_guest_thread; |
796 | } |
797 | } else { |
798 | thread = ptq->thread; |
799 | if (!thread) { |
800 | if (cpumode != PERF_RECORD_MISC_KERNEL) { |
801 | intel_pt_log("ERROR: no thread\n" ); |
802 | ret = -EINVAL; |
803 | goto out_ret; |
804 | } |
805 | thread = ptq->pt->unknown_thread; |
806 | } |
807 | } |
808 | |
809 | while (1) { |
810 | struct dso *dso; |
811 | |
812 | if (!thread__find_map(thread, cpumode, addr: *ip, al: &al) || !map__dso(map: al.map)) { |
813 | if (al.map) |
814 | intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n" , *ip); |
815 | else |
816 | intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n" , *ip); |
817 | addr_location__exit(al: &al); |
818 | ret = -EINVAL; |
819 | goto out_ret; |
820 | } |
821 | dso = map__dso(map: al.map); |
822 | |
823 | if (dso->data.status == DSO_DATA_STATUS_ERROR && |
824 | dso__data_status_seen(dso, by: DSO_DATA_STATUS_SEEN_ITRACE)) { |
825 | ret = -ENOENT; |
826 | goto out_ret; |
827 | } |
828 | |
829 | offset = map__map_ip(map: al.map, ip_or_rip: *ip); |
830 | |
831 | if (!to_ip && one_map) { |
832 | struct intel_pt_cache_entry *e; |
833 | |
834 | e = intel_pt_cache_lookup(dso, machine, offset); |
835 | if (e && |
836 | (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { |
837 | *insn_cnt_ptr = e->insn_cnt; |
838 | *ip += e->byte_cnt; |
839 | intel_pt_insn->op = e->op; |
840 | intel_pt_insn->branch = e->branch; |
841 | intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite; |
842 | intel_pt_insn->length = e->length; |
843 | intel_pt_insn->rel = e->rel; |
844 | memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ); |
845 | intel_pt_log_insn_no_data(intel_pt_insn, *ip); |
846 | ret = 0; |
847 | goto out_ret; |
848 | } |
849 | } |
850 | |
851 | start_offset = offset; |
852 | start_ip = *ip; |
853 | |
854 | /* Load maps to ensure dso->is_64_bit has been updated */ |
855 | map__load(map: al.map); |
856 | |
857 | x86_64 = dso->is_64_bit; |
858 | |
859 | while (1) { |
860 | len = dso__data_read_offset(dso, machine, |
861 | offset, data: buf, |
862 | INTEL_PT_INSN_BUF_SZ); |
863 | if (len <= 0) { |
864 | intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " " , |
865 | offset); |
866 | if (intel_pt_enable_logging) |
867 | dso__fprintf(dso, intel_pt_log_fp()); |
868 | ret = -EINVAL; |
869 | goto out_ret; |
870 | } |
871 | |
872 | if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) { |
873 | ret = -EINVAL; |
874 | goto out_ret; |
875 | } |
876 | |
877 | intel_pt_log_insn(intel_pt_insn, *ip); |
878 | |
879 | insn_cnt += 1; |
880 | |
881 | if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) { |
882 | bool eptw; |
883 | u64 offs; |
884 | |
885 | if (!intel_pt_jmp_16(intel_pt_insn)) |
886 | goto out; |
887 | /* Check for emulated ptwrite */ |
888 | offs = offset + intel_pt_insn->length; |
889 | eptw = intel_pt_emulated_ptwrite(dso, machine, offset: offs); |
890 | intel_pt_insn->emulated_ptwrite = eptw; |
891 | goto out; |
892 | } |
893 | |
894 | if (max_insn_cnt && insn_cnt >= max_insn_cnt) |
895 | goto out_no_cache; |
896 | |
897 | *ip += intel_pt_insn->length; |
898 | |
899 | if (to_ip && *ip == to_ip) { |
900 | intel_pt_insn->length = 0; |
901 | goto out_no_cache; |
902 | } |
903 | |
904 | if (*ip >= map__end(map: al.map)) |
905 | break; |
906 | |
907 | offset += intel_pt_insn->length; |
908 | } |
909 | one_map = false; |
910 | } |
911 | out: |
912 | *insn_cnt_ptr = insn_cnt; |
913 | |
914 | if (!one_map) |
915 | goto out_no_cache; |
916 | |
917 | /* |
918 | * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate |
919 | * entries. |
920 | */ |
921 | if (to_ip) { |
922 | struct intel_pt_cache_entry *e; |
923 | |
924 | e = intel_pt_cache_lookup(dso: map__dso(map: al.map), machine, offset: start_offset); |
925 | if (e) |
926 | goto out_ret; |
927 | } |
928 | |
929 | /* Ignore cache errors */ |
930 | intel_pt_cache_add(dso: map__dso(map: al.map), machine, offset: start_offset, insn_cnt, |
931 | byte_cnt: *ip - start_ip, intel_pt_insn); |
932 | |
933 | out_ret: |
934 | addr_location__exit(al: &al); |
935 | return ret; |
936 | |
937 | out_no_cache: |
938 | *insn_cnt_ptr = insn_cnt; |
939 | addr_location__exit(al: &al); |
940 | return 0; |
941 | } |
942 | |
943 | static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, |
944 | uint64_t offset, const char *filename) |
945 | { |
946 | struct addr_filter *filt; |
947 | bool have_filter = false; |
948 | bool hit_tracestop = false; |
949 | bool hit_filter = false; |
950 | |
951 | list_for_each_entry(filt, &pt->filts.head, list) { |
952 | if (filt->start) |
953 | have_filter = true; |
954 | |
955 | if ((filename && !filt->filename) || |
956 | (!filename && filt->filename) || |
957 | (filename && strcmp(filename, filt->filename))) |
958 | continue; |
959 | |
960 | if (!(offset >= filt->addr && offset < filt->addr + filt->size)) |
961 | continue; |
962 | |
963 | intel_pt_log("TIP.PGD ip %#" PRIx64" offset %#" PRIx64" in %s hit filter: %s offset %#" PRIx64" size %#" PRIx64"\n" , |
964 | ip, offset, filename ? filename : "[kernel]" , |
965 | filt->start ? "filter" : "stop" , |
966 | filt->addr, filt->size); |
967 | |
968 | if (filt->start) |
969 | hit_filter = true; |
970 | else |
971 | hit_tracestop = true; |
972 | } |
973 | |
974 | if (!hit_tracestop && !hit_filter) |
975 | intel_pt_log("TIP.PGD ip %#" PRIx64" offset %#" PRIx64" in %s is not in a filter region\n" , |
976 | ip, offset, filename ? filename : "[kernel]" ); |
977 | |
978 | return hit_tracestop || (have_filter && !hit_filter); |
979 | } |
980 | |
981 | static int __intel_pt_pgd_ip(uint64_t ip, void *data) |
982 | { |
983 | struct intel_pt_queue *ptq = data; |
984 | struct thread *thread; |
985 | struct addr_location al; |
986 | u8 cpumode; |
987 | u64 offset; |
988 | int res; |
989 | |
990 | if (ptq->state->to_nr) { |
991 | if (intel_pt_guest_kernel_ip(ip)) |
992 | return intel_pt_match_pgd_ip(pt: ptq->pt, ip, offset: ip, NULL); |
993 | /* No support for decoding guest user space */ |
994 | return -EINVAL; |
995 | } else if (ip >= ptq->pt->kernel_start) { |
996 | return intel_pt_match_pgd_ip(pt: ptq->pt, ip, offset: ip, NULL); |
997 | } |
998 | |
999 | cpumode = PERF_RECORD_MISC_USER; |
1000 | |
1001 | thread = ptq->thread; |
1002 | if (!thread) |
1003 | return -EINVAL; |
1004 | |
1005 | addr_location__init(al: &al); |
1006 | if (!thread__find_map(thread, cpumode, addr: ip, al: &al) || !map__dso(map: al.map)) |
1007 | return -EINVAL; |
1008 | |
1009 | offset = map__map_ip(map: al.map, ip_or_rip: ip); |
1010 | |
1011 | res = intel_pt_match_pgd_ip(pt: ptq->pt, ip, offset, filename: map__dso(map: al.map)->long_name); |
1012 | addr_location__exit(al: &al); |
1013 | return res; |
1014 | } |
1015 | |
1016 | static bool intel_pt_pgd_ip(uint64_t ip, void *data) |
1017 | { |
1018 | return __intel_pt_pgd_ip(ip, data) > 0; |
1019 | } |
1020 | |
1021 | static bool intel_pt_get_config(struct intel_pt *pt, |
1022 | struct perf_event_attr *attr, u64 *config) |
1023 | { |
1024 | if (attr->type == pt->pmu_type) { |
1025 | if (config) |
1026 | *config = attr->config; |
1027 | return true; |
1028 | } |
1029 | |
1030 | return false; |
1031 | } |
1032 | |
1033 | static bool intel_pt_exclude_kernel(struct intel_pt *pt) |
1034 | { |
1035 | struct evsel *evsel; |
1036 | |
1037 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1038 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, NULL) && |
1039 | !evsel->core.attr.exclude_kernel) |
1040 | return false; |
1041 | } |
1042 | return true; |
1043 | } |
1044 | |
1045 | static bool intel_pt_return_compression(struct intel_pt *pt) |
1046 | { |
1047 | struct evsel *evsel; |
1048 | u64 config; |
1049 | |
1050 | if (!pt->noretcomp_bit) |
1051 | return true; |
1052 | |
1053 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1054 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config) && |
1055 | (config & pt->noretcomp_bit)) |
1056 | return false; |
1057 | } |
1058 | return true; |
1059 | } |
1060 | |
1061 | static bool intel_pt_branch_enable(struct intel_pt *pt) |
1062 | { |
1063 | struct evsel *evsel; |
1064 | u64 config; |
1065 | |
1066 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1067 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config) && |
1068 | (config & INTEL_PT_CFG_PASS_THRU) && |
1069 | !(config & INTEL_PT_CFG_BRANCH_EN)) |
1070 | return false; |
1071 | } |
1072 | return true; |
1073 | } |
1074 | |
1075 | static bool intel_pt_disabled_tnt(struct intel_pt *pt) |
1076 | { |
1077 | struct evsel *evsel; |
1078 | u64 config; |
1079 | |
1080 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1081 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config) && |
1082 | config & INTEL_PT_CFG_TNT_DIS) |
1083 | return true; |
1084 | } |
1085 | return false; |
1086 | } |
1087 | |
1088 | static unsigned int intel_pt_mtc_period(struct intel_pt *pt) |
1089 | { |
1090 | struct evsel *evsel; |
1091 | unsigned int shift; |
1092 | u64 config; |
1093 | |
1094 | if (!pt->mtc_freq_bits) |
1095 | return 0; |
1096 | |
1097 | for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) |
1098 | config >>= 1; |
1099 | |
1100 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1101 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config)) |
1102 | return (config & pt->mtc_freq_bits) >> shift; |
1103 | } |
1104 | return 0; |
1105 | } |
1106 | |
1107 | static bool intel_pt_timeless_decoding(struct intel_pt *pt) |
1108 | { |
1109 | struct evsel *evsel; |
1110 | bool timeless_decoding = true; |
1111 | u64 config; |
1112 | |
1113 | if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding) |
1114 | return true; |
1115 | |
1116 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1117 | if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) |
1118 | return true; |
1119 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config)) { |
1120 | if (config & pt->tsc_bit) |
1121 | timeless_decoding = false; |
1122 | else |
1123 | return true; |
1124 | } |
1125 | } |
1126 | return timeless_decoding; |
1127 | } |
1128 | |
1129 | static bool intel_pt_tracing_kernel(struct intel_pt *pt) |
1130 | { |
1131 | struct evsel *evsel; |
1132 | |
1133 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1134 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, NULL) && |
1135 | !evsel->core.attr.exclude_kernel) |
1136 | return true; |
1137 | } |
1138 | return false; |
1139 | } |
1140 | |
1141 | static bool intel_pt_have_tsc(struct intel_pt *pt) |
1142 | { |
1143 | struct evsel *evsel; |
1144 | bool have_tsc = false; |
1145 | u64 config; |
1146 | |
1147 | if (!pt->tsc_bit) |
1148 | return false; |
1149 | |
1150 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1151 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config)) { |
1152 | if (config & pt->tsc_bit) |
1153 | have_tsc = true; |
1154 | else |
1155 | return false; |
1156 | } |
1157 | } |
1158 | return have_tsc; |
1159 | } |
1160 | |
1161 | static bool intel_pt_have_mtc(struct intel_pt *pt) |
1162 | { |
1163 | struct evsel *evsel; |
1164 | u64 config; |
1165 | |
1166 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1167 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config) && |
1168 | (config & pt->mtc_bit)) |
1169 | return true; |
1170 | } |
1171 | return false; |
1172 | } |
1173 | |
1174 | static bool intel_pt_sampling_mode(struct intel_pt *pt) |
1175 | { |
1176 | struct evsel *evsel; |
1177 | |
1178 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1179 | if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) && |
1180 | evsel->core.attr.aux_sample_size) |
1181 | return true; |
1182 | } |
1183 | return false; |
1184 | } |
1185 | |
1186 | static u64 intel_pt_ctl(struct intel_pt *pt) |
1187 | { |
1188 | struct evsel *evsel; |
1189 | u64 config; |
1190 | |
1191 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1192 | if (intel_pt_get_config(pt, attr: &evsel->core.attr, config: &config)) |
1193 | return config; |
1194 | } |
1195 | return 0; |
1196 | } |
1197 | |
1198 | static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) |
1199 | { |
1200 | u64 quot, rem; |
1201 | |
1202 | quot = ns / pt->tc.time_mult; |
1203 | rem = ns % pt->tc.time_mult; |
1204 | return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / |
1205 | pt->tc.time_mult; |
1206 | } |
1207 | |
1208 | static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt) |
1209 | { |
1210 | size_t sz = sizeof(struct ip_callchain); |
1211 | |
1212 | /* Add 1 to callchain_sz for callchain context */ |
1213 | sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); |
1214 | return zalloc(sz); |
1215 | } |
1216 | |
1217 | static int intel_pt_callchain_init(struct intel_pt *pt) |
1218 | { |
1219 | struct evsel *evsel; |
1220 | |
1221 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1222 | if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN)) |
1223 | evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN; |
1224 | } |
1225 | |
1226 | pt->chain = intel_pt_alloc_chain(pt); |
1227 | if (!pt->chain) |
1228 | return -ENOMEM; |
1229 | |
1230 | return 0; |
1231 | } |
1232 | |
1233 | static void intel_pt_add_callchain(struct intel_pt *pt, |
1234 | struct perf_sample *sample) |
1235 | { |
1236 | struct thread *thread = machine__findnew_thread(machine: pt->machine, |
1237 | pid: sample->pid, |
1238 | tid: sample->tid); |
1239 | |
1240 | thread_stack__sample_late(thread, cpu: sample->cpu, chain: pt->chain, |
1241 | sz: pt->synth_opts.callchain_sz + 1, ip: sample->ip, |
1242 | kernel_start: pt->kernel_start); |
1243 | |
1244 | sample->callchain = pt->chain; |
1245 | } |
1246 | |
1247 | static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt) |
1248 | { |
1249 | size_t sz = sizeof(struct branch_stack); |
1250 | |
1251 | sz += entry_cnt * sizeof(struct branch_entry); |
1252 | return zalloc(sz); |
1253 | } |
1254 | |
1255 | static int intel_pt_br_stack_init(struct intel_pt *pt) |
1256 | { |
1257 | struct evsel *evsel; |
1258 | |
1259 | evlist__for_each_entry(pt->session->evlist, evsel) { |
1260 | if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)) |
1261 | evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK; |
1262 | } |
1263 | |
1264 | pt->br_stack = intel_pt_alloc_br_stack(entry_cnt: pt->br_stack_sz); |
1265 | if (!pt->br_stack) |
1266 | return -ENOMEM; |
1267 | |
1268 | return 0; |
1269 | } |
1270 | |
1271 | static void intel_pt_add_br_stack(struct intel_pt *pt, |
1272 | struct perf_sample *sample) |
1273 | { |
1274 | struct thread *thread = machine__findnew_thread(machine: pt->machine, |
1275 | pid: sample->pid, |
1276 | tid: sample->tid); |
1277 | |
1278 | thread_stack__br_sample_late(thread, cpu: sample->cpu, dst: pt->br_stack, |
1279 | sz: pt->br_stack_sz, sample_ip: sample->ip, |
1280 | kernel_start: pt->kernel_start); |
1281 | |
1282 | sample->branch_stack = pt->br_stack; |
1283 | thread__put(thread); |
1284 | } |
1285 | |
1286 | /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */ |
1287 | #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U) |
1288 | |
1289 | static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, |
1290 | unsigned int queue_nr) |
1291 | { |
1292 | struct intel_pt_params params = { .get_trace = 0, }; |
1293 | struct perf_env *env = pt->machine->env; |
1294 | struct intel_pt_queue *ptq; |
1295 | |
1296 | ptq = zalloc(sizeof(struct intel_pt_queue)); |
1297 | if (!ptq) |
1298 | return NULL; |
1299 | |
1300 | if (pt->synth_opts.callchain) { |
1301 | ptq->chain = intel_pt_alloc_chain(pt); |
1302 | if (!ptq->chain) |
1303 | goto out_free; |
1304 | } |
1305 | |
1306 | if (pt->synth_opts.last_branch || pt->synth_opts.other_events) { |
1307 | unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz); |
1308 | |
1309 | ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt); |
1310 | if (!ptq->last_branch) |
1311 | goto out_free; |
1312 | } |
1313 | |
1314 | ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); |
1315 | if (!ptq->event_buf) |
1316 | goto out_free; |
1317 | |
1318 | ptq->pt = pt; |
1319 | ptq->queue_nr = queue_nr; |
1320 | ptq->exclude_kernel = intel_pt_exclude_kernel(pt); |
1321 | ptq->pid = -1; |
1322 | ptq->tid = -1; |
1323 | ptq->cpu = -1; |
1324 | ptq->next_tid = -1; |
1325 | |
1326 | params.get_trace = intel_pt_get_trace; |
1327 | params.walk_insn = intel_pt_walk_next_insn; |
1328 | params.lookahead = intel_pt_lookahead; |
1329 | params.findnew_vmcs_info = intel_pt_findnew_vmcs_info; |
1330 | params.data = ptq; |
1331 | params.return_compression = intel_pt_return_compression(pt); |
1332 | params.branch_enable = intel_pt_branch_enable(pt); |
1333 | params.ctl = intel_pt_ctl(pt); |
1334 | params.max_non_turbo_ratio = pt->max_non_turbo_ratio; |
1335 | params.mtc_period = intel_pt_mtc_period(pt); |
1336 | params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; |
1337 | params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; |
1338 | params.quick = pt->synth_opts.quick; |
1339 | params.vm_time_correlation = pt->synth_opts.vm_time_correlation; |
1340 | params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run; |
1341 | params.first_timestamp = pt->first_timestamp; |
1342 | params.max_loops = pt->max_loops; |
1343 | |
1344 | /* Cannot walk code without TNT, so force 'quick' mode */ |
1345 | if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick) |
1346 | params.quick = 1; |
1347 | |
1348 | if (pt->filts.cnt > 0) |
1349 | params.pgd_ip = intel_pt_pgd_ip; |
1350 | |
1351 | if (pt->synth_opts.instructions || pt->synth_opts.cycles) { |
1352 | if (pt->synth_opts.period) { |
1353 | switch (pt->synth_opts.period_type) { |
1354 | case PERF_ITRACE_PERIOD_INSTRUCTIONS: |
1355 | params.period_type = |
1356 | INTEL_PT_PERIOD_INSTRUCTIONS; |
1357 | params.period = pt->synth_opts.period; |
1358 | break; |
1359 | case PERF_ITRACE_PERIOD_TICKS: |
1360 | params.period_type = INTEL_PT_PERIOD_TICKS; |
1361 | params.period = pt->synth_opts.period; |
1362 | break; |
1363 | case PERF_ITRACE_PERIOD_NANOSECS: |
1364 | params.period_type = INTEL_PT_PERIOD_TICKS; |
1365 | params.period = intel_pt_ns_to_ticks(pt, |
1366 | ns: pt->synth_opts.period); |
1367 | break; |
1368 | default: |
1369 | break; |
1370 | } |
1371 | } |
1372 | |
1373 | if (!params.period) { |
1374 | params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; |
1375 | params.period = 1; |
1376 | } |
1377 | } |
1378 | |
1379 | if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92," , 18)) |
1380 | params.flags |= INTEL_PT_FUP_WITH_NLIP; |
1381 | |
1382 | ptq->decoder = intel_pt_decoder_new(params: ¶ms); |
1383 | if (!ptq->decoder) |
1384 | goto out_free; |
1385 | |
1386 | return ptq; |
1387 | |
1388 | out_free: |
1389 | zfree(&ptq->event_buf); |
1390 | zfree(&ptq->last_branch); |
1391 | zfree(&ptq->chain); |
1392 | free(ptq); |
1393 | return NULL; |
1394 | } |
1395 | |
1396 | static void intel_pt_free_queue(void *priv) |
1397 | { |
1398 | struct intel_pt_queue *ptq = priv; |
1399 | |
1400 | if (!ptq) |
1401 | return; |
1402 | thread__zput(ptq->thread); |
1403 | thread__zput(ptq->guest_thread); |
1404 | thread__zput(ptq->unknown_guest_thread); |
1405 | intel_pt_decoder_free(decoder: ptq->decoder); |
1406 | zfree(&ptq->event_buf); |
1407 | zfree(&ptq->last_branch); |
1408 | zfree(&ptq->chain); |
1409 | free(ptq); |
1410 | } |
1411 | |
1412 | static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp) |
1413 | { |
1414 | unsigned int i; |
1415 | |
1416 | pt->first_timestamp = timestamp; |
1417 | |
1418 | for (i = 0; i < pt->queues.nr_queues; i++) { |
1419 | struct auxtrace_queue *queue = &pt->queues.queue_array[i]; |
1420 | struct intel_pt_queue *ptq = queue->priv; |
1421 | |
1422 | if (ptq && ptq->decoder) |
1423 | intel_pt_set_first_timestamp(decoder: ptq->decoder, first_timestamp: timestamp); |
1424 | } |
1425 | } |
1426 | |
1427 | static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq) |
1428 | { |
1429 | struct machines *machines = &ptq->pt->session->machines; |
1430 | struct machine *machine; |
1431 | pid_t machine_pid = ptq->pid; |
1432 | pid_t tid; |
1433 | int vcpu; |
1434 | |
1435 | if (machine_pid <= 0) |
1436 | return 0; /* Not a guest machine */ |
1437 | |
1438 | machine = machines__find(machines, pid: machine_pid); |
1439 | if (!machine) |
1440 | return 0; /* Not a guest machine */ |
1441 | |
1442 | if (ptq->guest_machine != machine) { |
1443 | ptq->guest_machine = NULL; |
1444 | thread__zput(ptq->guest_thread); |
1445 | thread__zput(ptq->unknown_guest_thread); |
1446 | |
1447 | ptq->unknown_guest_thread = machine__find_thread(machine, pid: 0, tid: 0); |
1448 | if (!ptq->unknown_guest_thread) |
1449 | return -1; |
1450 | ptq->guest_machine = machine; |
1451 | } |
1452 | |
1453 | vcpu = ptq->thread ? thread__guest_cpu(thread: ptq->thread) : -1; |
1454 | if (vcpu < 0) |
1455 | return -1; |
1456 | |
1457 | tid = machine__get_current_tid(machine, cpu: vcpu); |
1458 | |
1459 | if (ptq->guest_thread && thread__tid(thread: ptq->guest_thread) != tid) |
1460 | thread__zput(ptq->guest_thread); |
1461 | |
1462 | if (!ptq->guest_thread) { |
1463 | ptq->guest_thread = machine__find_thread(machine, pid: -1, tid); |
1464 | if (!ptq->guest_thread) |
1465 | return -1; |
1466 | } |
1467 | |
1468 | ptq->guest_machine_pid = machine_pid; |
1469 | ptq->guest_pid = thread__pid(thread: ptq->guest_thread); |
1470 | ptq->guest_tid = tid; |
1471 | ptq->vcpu = vcpu; |
1472 | |
1473 | return 0; |
1474 | } |
1475 | |
1476 | static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, |
1477 | struct auxtrace_queue *queue) |
1478 | { |
1479 | struct intel_pt_queue *ptq = queue->priv; |
1480 | |
1481 | if (queue->tid == -1 || pt->have_sched_switch) { |
1482 | ptq->tid = machine__get_current_tid(machine: pt->machine, cpu: ptq->cpu); |
1483 | if (ptq->tid == -1) |
1484 | ptq->pid = -1; |
1485 | thread__zput(ptq->thread); |
1486 | } |
1487 | |
1488 | if (!ptq->thread && ptq->tid != -1) |
1489 | ptq->thread = machine__find_thread(machine: pt->machine, pid: -1, tid: ptq->tid); |
1490 | |
1491 | if (ptq->thread) { |
1492 | ptq->pid = thread__pid(thread: ptq->thread); |
1493 | if (queue->cpu == -1) |
1494 | ptq->cpu = thread__cpu(thread: ptq->thread); |
1495 | } |
1496 | |
1497 | if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) { |
1498 | ptq->guest_machine_pid = 0; |
1499 | ptq->guest_pid = -1; |
1500 | ptq->guest_tid = -1; |
1501 | ptq->vcpu = -1; |
1502 | } |
1503 | } |
1504 | |
1505 | static void intel_pt_sample_flags(struct intel_pt_queue *ptq) |
1506 | { |
1507 | struct intel_pt *pt = ptq->pt; |
1508 | |
1509 | ptq->insn_len = 0; |
1510 | if (ptq->state->flags & INTEL_PT_ABORT_TX) { |
1511 | ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; |
1512 | } else if (ptq->state->flags & INTEL_PT_ASYNC) { |
1513 | if (!ptq->state->to_ip) |
1514 | ptq->flags = PERF_IP_FLAG_BRANCH | |
1515 | PERF_IP_FLAG_ASYNC | |
1516 | PERF_IP_FLAG_TRACE_END; |
1517 | else if (ptq->state->from_nr && !ptq->state->to_nr) |
1518 | ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | |
1519 | PERF_IP_FLAG_ASYNC | |
1520 | PERF_IP_FLAG_VMEXIT; |
1521 | else |
1522 | ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | |
1523 | PERF_IP_FLAG_ASYNC | |
1524 | PERF_IP_FLAG_INTERRUPT; |
1525 | } else { |
1526 | if (ptq->state->from_ip) |
1527 | ptq->flags = intel_pt_insn_type(op: ptq->state->insn_op); |
1528 | else |
1529 | ptq->flags = PERF_IP_FLAG_BRANCH | |
1530 | PERF_IP_FLAG_TRACE_BEGIN; |
1531 | if (ptq->state->flags & INTEL_PT_IN_TX) |
1532 | ptq->flags |= PERF_IP_FLAG_IN_TX; |
1533 | ptq->insn_len = ptq->state->insn_len; |
1534 | memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); |
1535 | } |
1536 | |
1537 | if (ptq->state->type & INTEL_PT_TRACE_BEGIN) |
1538 | ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; |
1539 | if (ptq->state->type & INTEL_PT_TRACE_END) |
1540 | ptq->flags |= PERF_IP_FLAG_TRACE_END; |
1541 | |
1542 | if (pt->cap_event_trace) { |
1543 | if (ptq->state->type & INTEL_PT_IFLAG_CHG) { |
1544 | if (!ptq->state->from_iflag) |
1545 | ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; |
1546 | if (ptq->state->from_iflag != ptq->state->to_iflag) |
1547 | ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE; |
1548 | } else if (!ptq->state->to_iflag) { |
1549 | ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; |
1550 | } |
1551 | } |
1552 | } |
1553 | |
1554 | static void intel_pt_setup_time_range(struct intel_pt *pt, |
1555 | struct intel_pt_queue *ptq) |
1556 | { |
1557 | if (!pt->range_cnt) |
1558 | return; |
1559 | |
1560 | ptq->sel_timestamp = pt->time_ranges[0].start; |
1561 | ptq->sel_idx = 0; |
1562 | |
1563 | if (ptq->sel_timestamp) { |
1564 | ptq->sel_start = true; |
1565 | } else { |
1566 | ptq->sel_timestamp = pt->time_ranges[0].end; |
1567 | ptq->sel_start = false; |
1568 | } |
1569 | } |
1570 | |
1571 | static int intel_pt_setup_queue(struct intel_pt *pt, |
1572 | struct auxtrace_queue *queue, |
1573 | unsigned int queue_nr) |
1574 | { |
1575 | struct intel_pt_queue *ptq = queue->priv; |
1576 | |
1577 | if (list_empty(head: &queue->head)) |
1578 | return 0; |
1579 | |
1580 | if (!ptq) { |
1581 | ptq = intel_pt_alloc_queue(pt, queue_nr); |
1582 | if (!ptq) |
1583 | return -ENOMEM; |
1584 | queue->priv = ptq; |
1585 | |
1586 | if (queue->cpu != -1) |
1587 | ptq->cpu = queue->cpu; |
1588 | ptq->tid = queue->tid; |
1589 | |
1590 | ptq->cbr_seen = UINT_MAX; |
1591 | |
1592 | if (pt->sampling_mode && !pt->snapshot_mode && |
1593 | pt->timeless_decoding) |
1594 | ptq->step_through_buffers = true; |
1595 | |
1596 | ptq->sync_switch = pt->sync_switch; |
1597 | |
1598 | intel_pt_setup_time_range(pt, ptq); |
1599 | } |
1600 | |
1601 | if (!ptq->on_heap && |
1602 | (!ptq->sync_switch || |
1603 | ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { |
1604 | const struct intel_pt_state *state; |
1605 | int ret; |
1606 | |
1607 | if (pt->timeless_decoding) |
1608 | return 0; |
1609 | |
1610 | intel_pt_log("queue %u getting timestamp\n" , queue_nr); |
1611 | intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n" , |
1612 | queue_nr, ptq->cpu, ptq->pid, ptq->tid); |
1613 | |
1614 | if (ptq->sel_start && ptq->sel_timestamp) { |
1615 | ret = intel_pt_fast_forward(decoder: ptq->decoder, |
1616 | timestamp: ptq->sel_timestamp); |
1617 | if (ret) |
1618 | return ret; |
1619 | } |
1620 | |
1621 | while (1) { |
1622 | state = intel_pt_decode(decoder: ptq->decoder); |
1623 | if (state->err) { |
1624 | if (state->err == INTEL_PT_ERR_NODATA) { |
1625 | intel_pt_log("queue %u has no timestamp\n" , |
1626 | queue_nr); |
1627 | return 0; |
1628 | } |
1629 | continue; |
1630 | } |
1631 | if (state->timestamp) |
1632 | break; |
1633 | } |
1634 | |
1635 | ptq->timestamp = state->timestamp; |
1636 | intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n" , |
1637 | queue_nr, ptq->timestamp); |
1638 | ptq->state = state; |
1639 | ptq->have_sample = true; |
1640 | if (ptq->sel_start && ptq->sel_timestamp && |
1641 | ptq->timestamp < ptq->sel_timestamp) |
1642 | ptq->have_sample = false; |
1643 | intel_pt_sample_flags(ptq); |
1644 | ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); |
1645 | if (ret) |
1646 | return ret; |
1647 | ptq->on_heap = true; |
1648 | } |
1649 | |
1650 | return 0; |
1651 | } |
1652 | |
1653 | static int intel_pt_setup_queues(struct intel_pt *pt) |
1654 | { |
1655 | unsigned int i; |
1656 | int ret; |
1657 | |
1658 | for (i = 0; i < pt->queues.nr_queues; i++) { |
1659 | ret = intel_pt_setup_queue(pt, queue: &pt->queues.queue_array[i], queue_nr: i); |
1660 | if (ret) |
1661 | return ret; |
1662 | } |
1663 | return 0; |
1664 | } |
1665 | |
1666 | static inline bool intel_pt_skip_event(struct intel_pt *pt) |
1667 | { |
1668 | return pt->synth_opts.initial_skip && |
1669 | pt->num_events++ < pt->synth_opts.initial_skip; |
1670 | } |
1671 | |
1672 | /* |
1673 | * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen. |
1674 | * Also ensure CBR is first non-skipped event by allowing for 4 more samples |
1675 | * from this decoder state. |
1676 | */ |
1677 | static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt) |
1678 | { |
1679 | return pt->synth_opts.initial_skip && |
1680 | pt->num_events + 4 < pt->synth_opts.initial_skip; |
1681 | } |
1682 | |
1683 | static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq, |
1684 | union perf_event *event, |
1685 | struct perf_sample *sample) |
1686 | { |
1687 | event->sample.header.type = PERF_RECORD_SAMPLE; |
1688 | event->sample.header.size = sizeof(struct perf_event_header); |
1689 | |
1690 | sample->pid = ptq->pid; |
1691 | sample->tid = ptq->tid; |
1692 | |
1693 | if (ptq->pt->have_guest_sideband) { |
1694 | if ((ptq->state->from_ip && ptq->state->from_nr) || |
1695 | (ptq->state->to_ip && ptq->state->to_nr)) { |
1696 | sample->pid = ptq->guest_pid; |
1697 | sample->tid = ptq->guest_tid; |
1698 | sample->machine_pid = ptq->guest_machine_pid; |
1699 | sample->vcpu = ptq->vcpu; |
1700 | } |
1701 | } |
1702 | |
1703 | sample->cpu = ptq->cpu; |
1704 | sample->insn_len = ptq->insn_len; |
1705 | memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); |
1706 | } |
1707 | |
1708 | static void intel_pt_prep_b_sample(struct intel_pt *pt, |
1709 | struct intel_pt_queue *ptq, |
1710 | union perf_event *event, |
1711 | struct perf_sample *sample) |
1712 | { |
1713 | intel_pt_prep_a_sample(ptq, event, sample); |
1714 | |
1715 | if (!pt->timeless_decoding) |
1716 | sample->time = tsc_to_perf_time(cyc: ptq->timestamp, tc: &pt->tc); |
1717 | |
1718 | sample->ip = ptq->state->from_ip; |
1719 | sample->addr = ptq->state->to_ip; |
1720 | sample->cpumode = intel_pt_cpumode(ptq, from_ip: sample->ip, to_ip: sample->addr); |
1721 | sample->period = 1; |
1722 | sample->flags = ptq->flags; |
1723 | |
1724 | event->sample.header.misc = sample->cpumode; |
1725 | } |
1726 | |
1727 | static int intel_pt_inject_event(union perf_event *event, |
1728 | struct perf_sample *sample, u64 type) |
1729 | { |
1730 | event->header.size = perf_event__sample_event_size(sample, type, 0); |
1731 | return perf_event__synthesize_sample(event, type, 0, sample); |
1732 | } |
1733 | |
1734 | static inline int intel_pt_opt_inject(struct intel_pt *pt, |
1735 | union perf_event *event, |
1736 | struct perf_sample *sample, u64 type) |
1737 | { |
1738 | if (!pt->synth_opts.inject) |
1739 | return 0; |
1740 | |
1741 | return intel_pt_inject_event(event, sample, type); |
1742 | } |
1743 | |
1744 | static int intel_pt_deliver_synth_event(struct intel_pt *pt, |
1745 | union perf_event *event, |
1746 | struct perf_sample *sample, u64 type) |
1747 | { |
1748 | int ret; |
1749 | |
1750 | ret = intel_pt_opt_inject(pt, event, sample, type); |
1751 | if (ret) |
1752 | return ret; |
1753 | |
1754 | ret = perf_session__deliver_synth_event(session: pt->session, event, sample); |
1755 | if (ret) |
1756 | pr_err("Intel PT: failed to deliver event, error %d\n" , ret); |
1757 | |
1758 | return ret; |
1759 | } |
1760 | |
1761 | static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) |
1762 | { |
1763 | struct intel_pt *pt = ptq->pt; |
1764 | union perf_event *event = ptq->event_buf; |
1765 | struct perf_sample sample = { .ip = 0, }; |
1766 | struct dummy_branch_stack { |
1767 | u64 nr; |
1768 | u64 hw_idx; |
1769 | struct branch_entry entries; |
1770 | } dummy_bs; |
1771 | |
1772 | if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) |
1773 | return 0; |
1774 | |
1775 | if (intel_pt_skip_event(pt)) |
1776 | return 0; |
1777 | |
1778 | intel_pt_prep_b_sample(pt, ptq, event, sample: &sample); |
1779 | |
1780 | sample.id = ptq->pt->branches_id; |
1781 | sample.stream_id = ptq->pt->branches_id; |
1782 | |
1783 | /* |
1784 | * perf report cannot handle events without a branch stack when using |
1785 | * SORT_MODE__BRANCH so make a dummy one. |
1786 | */ |
1787 | if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { |
1788 | dummy_bs = (struct dummy_branch_stack){ |
1789 | .nr = 1, |
1790 | .hw_idx = -1ULL, |
1791 | .entries = { |
1792 | .from = sample.ip, |
1793 | .to = sample.addr, |
1794 | }, |
1795 | }; |
1796 | sample.branch_stack = (struct branch_stack *)&dummy_bs; |
1797 | } |
1798 | |
1799 | if (ptq->sample_ipc) |
1800 | sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; |
1801 | if (sample.cyc_cnt) { |
1802 | sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; |
1803 | ptq->last_br_insn_cnt = ptq->ipc_insn_cnt; |
1804 | ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt; |
1805 | } |
1806 | |
1807 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
1808 | type: pt->branches_sample_type); |
1809 | } |
1810 | |
1811 | static void intel_pt_prep_sample(struct intel_pt *pt, |
1812 | struct intel_pt_queue *ptq, |
1813 | union perf_event *event, |
1814 | struct perf_sample *sample) |
1815 | { |
1816 | intel_pt_prep_b_sample(pt, ptq, event, sample); |
1817 | |
1818 | if (pt->synth_opts.callchain) { |
1819 | thread_stack__sample(thread: ptq->thread, cpu: ptq->cpu, chain: ptq->chain, |
1820 | sz: pt->synth_opts.callchain_sz + 1, |
1821 | ip: sample->ip, kernel_start: pt->kernel_start); |
1822 | sample->callchain = ptq->chain; |
1823 | } |
1824 | |
1825 | if (pt->synth_opts.last_branch) { |
1826 | thread_stack__br_sample(thread: ptq->thread, cpu: ptq->cpu, dst: ptq->last_branch, |
1827 | sz: pt->br_stack_sz); |
1828 | sample->branch_stack = ptq->last_branch; |
1829 | } |
1830 | } |
1831 | |
1832 | static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) |
1833 | { |
1834 | struct intel_pt *pt = ptq->pt; |
1835 | union perf_event *event = ptq->event_buf; |
1836 | struct perf_sample sample = { .ip = 0, }; |
1837 | |
1838 | if (intel_pt_skip_event(pt)) |
1839 | return 0; |
1840 | |
1841 | intel_pt_prep_sample(pt, ptq, event, sample: &sample); |
1842 | |
1843 | sample.id = ptq->pt->instructions_id; |
1844 | sample.stream_id = ptq->pt->instructions_id; |
1845 | if (pt->synth_opts.quick) |
1846 | sample.period = 1; |
1847 | else |
1848 | sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; |
1849 | |
1850 | if (ptq->sample_ipc) |
1851 | sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; |
1852 | if (sample.cyc_cnt) { |
1853 | sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; |
1854 | ptq->last_in_insn_cnt = ptq->ipc_insn_cnt; |
1855 | ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt; |
1856 | } |
1857 | |
1858 | ptq->last_insn_cnt = ptq->state->tot_insn_cnt; |
1859 | |
1860 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
1861 | type: pt->instructions_sample_type); |
1862 | } |
1863 | |
1864 | static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq) |
1865 | { |
1866 | struct intel_pt *pt = ptq->pt; |
1867 | union perf_event *event = ptq->event_buf; |
1868 | struct perf_sample sample = { .ip = 0, }; |
1869 | u64 period = 0; |
1870 | |
1871 | if (ptq->sample_ipc) |
1872 | period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt; |
1873 | |
1874 | if (!period || intel_pt_skip_event(pt)) |
1875 | return 0; |
1876 | |
1877 | intel_pt_prep_sample(pt, ptq, event, sample: &sample); |
1878 | |
1879 | sample.id = ptq->pt->cycles_id; |
1880 | sample.stream_id = ptq->pt->cycles_id; |
1881 | sample.period = period; |
1882 | |
1883 | sample.cyc_cnt = period; |
1884 | sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt; |
1885 | ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt; |
1886 | ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt; |
1887 | |
1888 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, type: pt->cycles_sample_type); |
1889 | } |
1890 | |
1891 | static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) |
1892 | { |
1893 | struct intel_pt *pt = ptq->pt; |
1894 | union perf_event *event = ptq->event_buf; |
1895 | struct perf_sample sample = { .ip = 0, }; |
1896 | |
1897 | if (intel_pt_skip_event(pt)) |
1898 | return 0; |
1899 | |
1900 | intel_pt_prep_sample(pt, ptq, event, sample: &sample); |
1901 | |
1902 | sample.id = ptq->pt->transactions_id; |
1903 | sample.stream_id = ptq->pt->transactions_id; |
1904 | |
1905 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
1906 | type: pt->transactions_sample_type); |
1907 | } |
1908 | |
1909 | static void intel_pt_prep_p_sample(struct intel_pt *pt, |
1910 | struct intel_pt_queue *ptq, |
1911 | union perf_event *event, |
1912 | struct perf_sample *sample) |
1913 | { |
1914 | intel_pt_prep_sample(pt, ptq, event, sample); |
1915 | |
1916 | /* |
1917 | * Zero IP is used to mean "trace start" but that is not the case for |
1918 | * power or PTWRITE events with no IP, so clear the flags. |
1919 | */ |
1920 | if (!sample->ip) |
1921 | sample->flags = 0; |
1922 | } |
1923 | |
1924 | static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) |
1925 | { |
1926 | struct intel_pt *pt = ptq->pt; |
1927 | union perf_event *event = ptq->event_buf; |
1928 | struct perf_sample sample = { .ip = 0, }; |
1929 | struct perf_synth_intel_ptwrite raw; |
1930 | |
1931 | if (intel_pt_skip_event(pt)) |
1932 | return 0; |
1933 | |
1934 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
1935 | |
1936 | sample.id = ptq->pt->ptwrites_id; |
1937 | sample.stream_id = ptq->pt->ptwrites_id; |
1938 | |
1939 | raw.flags = 0; |
1940 | raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); |
1941 | raw.payload = cpu_to_le64(ptq->state->ptw_payload); |
1942 | |
1943 | sample.raw_size = perf_synth__raw_size(raw); |
1944 | sample.raw_data = perf_synth__raw_data(p: &raw); |
1945 | |
1946 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
1947 | type: pt->ptwrites_sample_type); |
1948 | } |
1949 | |
1950 | static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) |
1951 | { |
1952 | struct intel_pt *pt = ptq->pt; |
1953 | union perf_event *event = ptq->event_buf; |
1954 | struct perf_sample sample = { .ip = 0, }; |
1955 | struct perf_synth_intel_cbr raw; |
1956 | u32 flags; |
1957 | |
1958 | if (intel_pt_skip_cbr_event(pt)) |
1959 | return 0; |
1960 | |
1961 | ptq->cbr_seen = ptq->state->cbr; |
1962 | |
1963 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
1964 | |
1965 | sample.id = ptq->pt->cbr_id; |
1966 | sample.stream_id = ptq->pt->cbr_id; |
1967 | |
1968 | flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); |
1969 | raw.flags = cpu_to_le32(flags); |
1970 | raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); |
1971 | raw.reserved3 = 0; |
1972 | |
1973 | sample.raw_size = perf_synth__raw_size(raw); |
1974 | sample.raw_data = perf_synth__raw_data(p: &raw); |
1975 | |
1976 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
1977 | type: pt->pwr_events_sample_type); |
1978 | } |
1979 | |
1980 | static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq) |
1981 | { |
1982 | struct intel_pt *pt = ptq->pt; |
1983 | union perf_event *event = ptq->event_buf; |
1984 | struct perf_sample sample = { .ip = 0, }; |
1985 | struct perf_synth_intel_psb raw; |
1986 | |
1987 | if (intel_pt_skip_event(pt)) |
1988 | return 0; |
1989 | |
1990 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
1991 | |
1992 | sample.id = ptq->pt->psb_id; |
1993 | sample.stream_id = ptq->pt->psb_id; |
1994 | sample.flags = 0; |
1995 | |
1996 | raw.reserved = 0; |
1997 | raw.offset = ptq->state->psb_offset; |
1998 | |
1999 | sample.raw_size = perf_synth__raw_size(raw); |
2000 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2001 | |
2002 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2003 | type: pt->pwr_events_sample_type); |
2004 | } |
2005 | |
2006 | static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) |
2007 | { |
2008 | struct intel_pt *pt = ptq->pt; |
2009 | union perf_event *event = ptq->event_buf; |
2010 | struct perf_sample sample = { .ip = 0, }; |
2011 | struct perf_synth_intel_mwait raw; |
2012 | |
2013 | if (intel_pt_skip_event(pt)) |
2014 | return 0; |
2015 | |
2016 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2017 | |
2018 | sample.id = ptq->pt->mwait_id; |
2019 | sample.stream_id = ptq->pt->mwait_id; |
2020 | |
2021 | raw.reserved = 0; |
2022 | raw.payload = cpu_to_le64(ptq->state->mwait_payload); |
2023 | |
2024 | sample.raw_size = perf_synth__raw_size(raw); |
2025 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2026 | |
2027 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2028 | type: pt->pwr_events_sample_type); |
2029 | } |
2030 | |
2031 | static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) |
2032 | { |
2033 | struct intel_pt *pt = ptq->pt; |
2034 | union perf_event *event = ptq->event_buf; |
2035 | struct perf_sample sample = { .ip = 0, }; |
2036 | struct perf_synth_intel_pwre raw; |
2037 | |
2038 | if (intel_pt_skip_event(pt)) |
2039 | return 0; |
2040 | |
2041 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2042 | |
2043 | sample.id = ptq->pt->pwre_id; |
2044 | sample.stream_id = ptq->pt->pwre_id; |
2045 | |
2046 | raw.reserved = 0; |
2047 | raw.payload = cpu_to_le64(ptq->state->pwre_payload); |
2048 | |
2049 | sample.raw_size = perf_synth__raw_size(raw); |
2050 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2051 | |
2052 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2053 | type: pt->pwr_events_sample_type); |
2054 | } |
2055 | |
2056 | static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) |
2057 | { |
2058 | struct intel_pt *pt = ptq->pt; |
2059 | union perf_event *event = ptq->event_buf; |
2060 | struct perf_sample sample = { .ip = 0, }; |
2061 | struct perf_synth_intel_exstop raw; |
2062 | |
2063 | if (intel_pt_skip_event(pt)) |
2064 | return 0; |
2065 | |
2066 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2067 | |
2068 | sample.id = ptq->pt->exstop_id; |
2069 | sample.stream_id = ptq->pt->exstop_id; |
2070 | |
2071 | raw.flags = 0; |
2072 | raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); |
2073 | |
2074 | sample.raw_size = perf_synth__raw_size(raw); |
2075 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2076 | |
2077 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2078 | type: pt->pwr_events_sample_type); |
2079 | } |
2080 | |
2081 | static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) |
2082 | { |
2083 | struct intel_pt *pt = ptq->pt; |
2084 | union perf_event *event = ptq->event_buf; |
2085 | struct perf_sample sample = { .ip = 0, }; |
2086 | struct perf_synth_intel_pwrx raw; |
2087 | |
2088 | if (intel_pt_skip_event(pt)) |
2089 | return 0; |
2090 | |
2091 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2092 | |
2093 | sample.id = ptq->pt->pwrx_id; |
2094 | sample.stream_id = ptq->pt->pwrx_id; |
2095 | |
2096 | raw.reserved = 0; |
2097 | raw.payload = cpu_to_le64(ptq->state->pwrx_payload); |
2098 | |
2099 | sample.raw_size = perf_synth__raw_size(raw); |
2100 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2101 | |
2102 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2103 | type: pt->pwr_events_sample_type); |
2104 | } |
2105 | |
2106 | /* |
2107 | * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer |
2108 | * intel_pt_add_gp_regs(). |
2109 | */ |
2110 | static const int pebs_gp_regs[] = { |
2111 | [PERF_REG_X86_FLAGS] = 1, |
2112 | [PERF_REG_X86_IP] = 2, |
2113 | [PERF_REG_X86_AX] = 3, |
2114 | [PERF_REG_X86_CX] = 4, |
2115 | [PERF_REG_X86_DX] = 5, |
2116 | [PERF_REG_X86_BX] = 6, |
2117 | [PERF_REG_X86_SP] = 7, |
2118 | [PERF_REG_X86_BP] = 8, |
2119 | [PERF_REG_X86_SI] = 9, |
2120 | [PERF_REG_X86_DI] = 10, |
2121 | [PERF_REG_X86_R8] = 11, |
2122 | [PERF_REG_X86_R9] = 12, |
2123 | [PERF_REG_X86_R10] = 13, |
2124 | [PERF_REG_X86_R11] = 14, |
2125 | [PERF_REG_X86_R12] = 15, |
2126 | [PERF_REG_X86_R13] = 16, |
2127 | [PERF_REG_X86_R14] = 17, |
2128 | [PERF_REG_X86_R15] = 18, |
2129 | }; |
2130 | |
2131 | static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos, |
2132 | const struct intel_pt_blk_items *items, |
2133 | u64 regs_mask) |
2134 | { |
2135 | const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS]; |
2136 | u32 mask = items->mask[INTEL_PT_GP_REGS_POS]; |
2137 | u32 bit; |
2138 | int i; |
2139 | |
2140 | for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) { |
2141 | /* Get the PEBS gp_regs array index */ |
2142 | int n = pebs_gp_regs[i] - 1; |
2143 | |
2144 | if (n < 0) |
2145 | continue; |
2146 | /* |
2147 | * Add only registers that were requested (i.e. 'regs_mask') and |
2148 | * that were provided (i.e. 'mask'), and update the resulting |
2149 | * mask (i.e. 'intr_regs->mask') accordingly. |
2150 | */ |
2151 | if (mask & 1 << n && regs_mask & bit) { |
2152 | intr_regs->mask |= bit; |
2153 | *pos++ = gp_regs[n]; |
2154 | } |
2155 | } |
2156 | |
2157 | return pos; |
2158 | } |
2159 | |
2160 | #ifndef PERF_REG_X86_XMM0 |
2161 | #define PERF_REG_X86_XMM0 32 |
2162 | #endif |
2163 | |
2164 | static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos, |
2165 | const struct intel_pt_blk_items *items, |
2166 | u64 regs_mask) |
2167 | { |
2168 | u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0); |
2169 | const u64 *xmm = items->xmm; |
2170 | |
2171 | /* |
2172 | * If there are any XMM registers, then there should be all of them. |
2173 | * Nevertheless, follow the logic to add only registers that were |
2174 | * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'), |
2175 | * and update the resulting mask (i.e. 'intr_regs->mask') accordingly. |
2176 | */ |
2177 | intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0; |
2178 | |
2179 | for (; mask; mask >>= 1, xmm++) { |
2180 | if (mask & 1) |
2181 | *pos++ = *xmm; |
2182 | } |
2183 | } |
2184 | |
2185 | #define LBR_INFO_MISPRED (1ULL << 63) |
2186 | #define LBR_INFO_IN_TX (1ULL << 62) |
2187 | #define LBR_INFO_ABORT (1ULL << 61) |
2188 | #define LBR_INFO_CYCLES 0xffff |
2189 | |
2190 | /* Refer kernel's intel_pmu_store_pebs_lbrs() */ |
2191 | static u64 intel_pt_lbr_flags(u64 info) |
2192 | { |
2193 | union { |
2194 | struct branch_flags flags; |
2195 | u64 result; |
2196 | } u; |
2197 | |
2198 | u.result = 0; |
2199 | u.flags.mispred = !!(info & LBR_INFO_MISPRED); |
2200 | u.flags.predicted = !(info & LBR_INFO_MISPRED); |
2201 | u.flags.in_tx = !!(info & LBR_INFO_IN_TX); |
2202 | u.flags.abort = !!(info & LBR_INFO_ABORT); |
2203 | u.flags.cycles = info & LBR_INFO_CYCLES; |
2204 | |
2205 | return u.result; |
2206 | } |
2207 | |
2208 | static void intel_pt_add_lbrs(struct branch_stack *br_stack, |
2209 | const struct intel_pt_blk_items *items) |
2210 | { |
2211 | u64 *to; |
2212 | int i; |
2213 | |
2214 | br_stack->nr = 0; |
2215 | |
2216 | to = &br_stack->entries[0].from; |
2217 | |
2218 | for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) { |
2219 | u32 mask = items->mask[i]; |
2220 | const u64 *from = items->val[i]; |
2221 | |
2222 | for (; mask; mask >>= 3, from += 3) { |
2223 | if ((mask & 7) == 7) { |
2224 | *to++ = from[0]; |
2225 | *to++ = from[1]; |
2226 | *to++ = intel_pt_lbr_flags(info: from[2]); |
2227 | br_stack->nr += 1; |
2228 | } |
2229 | } |
2230 | } |
2231 | } |
2232 | |
2233 | static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id) |
2234 | { |
2235 | const struct intel_pt_blk_items *items = &ptq->state->items; |
2236 | struct perf_sample sample = { .ip = 0, }; |
2237 | union perf_event *event = ptq->event_buf; |
2238 | struct intel_pt *pt = ptq->pt; |
2239 | u64 sample_type = evsel->core.attr.sample_type; |
2240 | u8 cpumode; |
2241 | u64 regs[8 * sizeof(sample.intr_regs.mask)]; |
2242 | |
2243 | if (intel_pt_skip_event(pt)) |
2244 | return 0; |
2245 | |
2246 | intel_pt_prep_a_sample(ptq, event, sample: &sample); |
2247 | |
2248 | sample.id = id; |
2249 | sample.stream_id = id; |
2250 | |
2251 | if (!evsel->core.attr.freq) |
2252 | sample.period = evsel->core.attr.sample_period; |
2253 | |
2254 | /* No support for non-zero CS base */ |
2255 | if (items->has_ip) |
2256 | sample.ip = items->ip; |
2257 | else if (items->has_rip) |
2258 | sample.ip = items->rip; |
2259 | else |
2260 | sample.ip = ptq->state->from_ip; |
2261 | |
2262 | cpumode = intel_pt_cpumode(ptq, from_ip: sample.ip, to_ip: 0); |
2263 | |
2264 | event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP; |
2265 | |
2266 | sample.cpumode = cpumode; |
2267 | |
2268 | if (sample_type & PERF_SAMPLE_TIME) { |
2269 | u64 timestamp = 0; |
2270 | |
2271 | if (items->has_timestamp) |
2272 | timestamp = items->timestamp; |
2273 | else if (!pt->timeless_decoding) |
2274 | timestamp = ptq->timestamp; |
2275 | if (timestamp) |
2276 | sample.time = tsc_to_perf_time(cyc: timestamp, tc: &pt->tc); |
2277 | } |
2278 | |
2279 | if (sample_type & PERF_SAMPLE_CALLCHAIN && |
2280 | pt->synth_opts.callchain) { |
2281 | thread_stack__sample(thread: ptq->thread, cpu: ptq->cpu, chain: ptq->chain, |
2282 | sz: pt->synth_opts.callchain_sz, ip: sample.ip, |
2283 | kernel_start: pt->kernel_start); |
2284 | sample.callchain = ptq->chain; |
2285 | } |
2286 | |
2287 | if (sample_type & PERF_SAMPLE_REGS_INTR && |
2288 | (items->mask[INTEL_PT_GP_REGS_POS] || |
2289 | items->mask[INTEL_PT_XMM_POS])) { |
2290 | u64 regs_mask = evsel->core.attr.sample_regs_intr; |
2291 | u64 *pos; |
2292 | |
2293 | sample.intr_regs.abi = items->is_32_bit ? |
2294 | PERF_SAMPLE_REGS_ABI_32 : |
2295 | PERF_SAMPLE_REGS_ABI_64; |
2296 | sample.intr_regs.regs = regs; |
2297 | |
2298 | pos = intel_pt_add_gp_regs(intr_regs: &sample.intr_regs, pos: regs, items, regs_mask); |
2299 | |
2300 | intel_pt_add_xmm(intr_regs: &sample.intr_regs, pos, items, regs_mask); |
2301 | } |
2302 | |
2303 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
2304 | if (items->mask[INTEL_PT_LBR_0_POS] || |
2305 | items->mask[INTEL_PT_LBR_1_POS] || |
2306 | items->mask[INTEL_PT_LBR_2_POS]) { |
2307 | intel_pt_add_lbrs(br_stack: ptq->last_branch, items); |
2308 | } else if (pt->synth_opts.last_branch) { |
2309 | thread_stack__br_sample(thread: ptq->thread, cpu: ptq->cpu, |
2310 | dst: ptq->last_branch, |
2311 | sz: pt->br_stack_sz); |
2312 | } else { |
2313 | ptq->last_branch->nr = 0; |
2314 | } |
2315 | sample.branch_stack = ptq->last_branch; |
2316 | } |
2317 | |
2318 | if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address) |
2319 | sample.addr = items->mem_access_address; |
2320 | |
2321 | if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { |
2322 | /* |
2323 | * Refer kernel's setup_pebs_adaptive_sample_data() and |
2324 | * intel_hsw_weight(). |
2325 | */ |
2326 | if (items->has_mem_access_latency) { |
2327 | u64 weight = items->mem_access_latency >> 32; |
2328 | |
2329 | /* |
2330 | * Starts from SPR, the mem access latency field |
2331 | * contains both cache latency [47:32] and instruction |
2332 | * latency [15:0]. The cache latency is the same as the |
2333 | * mem access latency on previous platforms. |
2334 | * |
2335 | * In practice, no memory access could last than 4G |
2336 | * cycles. Use latency >> 32 to distinguish the |
2337 | * different format of the mem access latency field. |
2338 | */ |
2339 | if (weight > 0) { |
2340 | sample.weight = weight & 0xffff; |
2341 | sample.ins_lat = items->mem_access_latency & 0xffff; |
2342 | } else |
2343 | sample.weight = items->mem_access_latency; |
2344 | } |
2345 | if (!sample.weight && items->has_tsx_aux_info) { |
2346 | /* Cycles last block */ |
2347 | sample.weight = (u32)items->tsx_aux_info; |
2348 | } |
2349 | } |
2350 | |
2351 | if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) { |
2352 | u64 ax = items->has_rax ? items->rax : 0; |
2353 | /* Refer kernel's intel_hsw_transaction() */ |
2354 | u64 txn = (u8)(items->tsx_aux_info >> 32); |
2355 | |
2356 | /* For RTM XABORTs also log the abort code from AX */ |
2357 | if (txn & PERF_TXN_TRANSACTION && ax & 1) |
2358 | txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; |
2359 | sample.transaction = txn; |
2360 | } |
2361 | |
2362 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, type: sample_type); |
2363 | } |
2364 | |
2365 | static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq) |
2366 | { |
2367 | struct intel_pt *pt = ptq->pt; |
2368 | struct evsel *evsel = pt->pebs_evsel; |
2369 | u64 id = evsel->core.id[0]; |
2370 | |
2371 | return intel_pt_do_synth_pebs_sample(ptq, evsel, id); |
2372 | } |
2373 | |
2374 | static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) |
2375 | { |
2376 | const struct intel_pt_blk_items *items = &ptq->state->items; |
2377 | struct intel_pt_pebs_event *pe; |
2378 | struct intel_pt *pt = ptq->pt; |
2379 | int err = -EINVAL; |
2380 | int hw_id; |
2381 | |
2382 | if (!items->has_applicable_counters || !items->applicable_counters) { |
2383 | if (!pt->single_pebs) |
2384 | pr_err("PEBS-via-PT record with no applicable_counters\n" ); |
2385 | return intel_pt_synth_single_pebs_sample(ptq); |
2386 | } |
2387 | |
2388 | for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) { |
2389 | pe = &ptq->pebs[hw_id]; |
2390 | if (!pe->evsel) { |
2391 | if (!pt->single_pebs) |
2392 | pr_err("PEBS-via-PT record with no matching event, hw_id %d\n" , |
2393 | hw_id); |
2394 | return intel_pt_synth_single_pebs_sample(ptq); |
2395 | } |
2396 | err = intel_pt_do_synth_pebs_sample(ptq, evsel: pe->evsel, id: pe->id); |
2397 | if (err) |
2398 | return err; |
2399 | } |
2400 | |
2401 | return err; |
2402 | } |
2403 | |
2404 | static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq) |
2405 | { |
2406 | struct intel_pt *pt = ptq->pt; |
2407 | union perf_event *event = ptq->event_buf; |
2408 | struct perf_sample sample = { .ip = 0, }; |
2409 | struct { |
2410 | struct perf_synth_intel_evt cfe; |
2411 | struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS]; |
2412 | } raw; |
2413 | int i; |
2414 | |
2415 | if (intel_pt_skip_event(pt)) |
2416 | return 0; |
2417 | |
2418 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2419 | |
2420 | sample.id = ptq->pt->evt_id; |
2421 | sample.stream_id = ptq->pt->evt_id; |
2422 | |
2423 | raw.cfe.type = ptq->state->cfe_type; |
2424 | raw.cfe.reserved = 0; |
2425 | raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); |
2426 | raw.cfe.vector = ptq->state->cfe_vector; |
2427 | raw.cfe.evd_cnt = ptq->state->evd_cnt; |
2428 | |
2429 | for (i = 0; i < ptq->state->evd_cnt; i++) { |
2430 | raw.evd[i].et = 0; |
2431 | raw.evd[i].evd_type = ptq->state->evd[i].type; |
2432 | raw.evd[i].payload = ptq->state->evd[i].payload; |
2433 | } |
2434 | |
2435 | sample.raw_size = perf_synth__raw_size(raw) + |
2436 | ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd); |
2437 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2438 | |
2439 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2440 | type: pt->evt_sample_type); |
2441 | } |
2442 | |
2443 | static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq) |
2444 | { |
2445 | struct intel_pt *pt = ptq->pt; |
2446 | union perf_event *event = ptq->event_buf; |
2447 | struct perf_sample sample = { .ip = 0, }; |
2448 | struct perf_synth_intel_iflag_chg raw; |
2449 | |
2450 | if (intel_pt_skip_event(pt)) |
2451 | return 0; |
2452 | |
2453 | intel_pt_prep_p_sample(pt, ptq, event, sample: &sample); |
2454 | |
2455 | sample.id = ptq->pt->iflag_chg_id; |
2456 | sample.stream_id = ptq->pt->iflag_chg_id; |
2457 | |
2458 | raw.flags = 0; |
2459 | raw.iflag = ptq->state->to_iflag; |
2460 | |
2461 | if (ptq->state->type & INTEL_PT_BRANCH) { |
2462 | raw.via_branch = 1; |
2463 | raw.branch_ip = ptq->state->to_ip; |
2464 | } else { |
2465 | sample.addr = 0; |
2466 | } |
2467 | sample.flags = ptq->flags; |
2468 | |
2469 | sample.raw_size = perf_synth__raw_size(raw); |
2470 | sample.raw_data = perf_synth__raw_data(p: &raw); |
2471 | |
2472 | return intel_pt_deliver_synth_event(pt, event, sample: &sample, |
2473 | type: pt->iflag_chg_sample_type); |
2474 | } |
2475 | |
2476 | static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, |
2477 | pid_t pid, pid_t tid, u64 ip, u64 timestamp, |
2478 | pid_t machine_pid, int vcpu) |
2479 | { |
2480 | bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR; |
2481 | bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT; |
2482 | union perf_event event; |
2483 | char msg[MAX_AUXTRACE_ERROR_MSG]; |
2484 | int err; |
2485 | |
2486 | if (pt->synth_opts.error_minus_flags) { |
2487 | if (code == INTEL_PT_ERR_OVR && |
2488 | pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW) |
2489 | return 0; |
2490 | if (code == INTEL_PT_ERR_LOST && |
2491 | pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST) |
2492 | return 0; |
2493 | } |
2494 | |
2495 | intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); |
2496 | |
2497 | auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, |
2498 | code, cpu, pid, tid, ip, msg, timestamp, |
2499 | machine_pid, vcpu); |
2500 | |
2501 | if (intel_pt_enable_logging && !log_on_stdout) { |
2502 | FILE *fp = intel_pt_log_fp(); |
2503 | |
2504 | if (fp) |
2505 | perf_event__fprintf_auxtrace_error(&event, fp); |
2506 | } |
2507 | |
2508 | if (code != INTEL_PT_ERR_LOST && dump_log_on_error) |
2509 | intel_pt_log_dump_buf(); |
2510 | |
2511 | err = perf_session__deliver_synth_event(session: pt->session, event: &event, NULL); |
2512 | if (err) |
2513 | pr_err("Intel Processor Trace: failed to deliver error event, error %d\n" , |
2514 | err); |
2515 | |
2516 | return err; |
2517 | } |
2518 | |
2519 | static int intel_ptq_synth_error(struct intel_pt_queue *ptq, |
2520 | const struct intel_pt_state *state) |
2521 | { |
2522 | struct intel_pt *pt = ptq->pt; |
2523 | u64 tm = ptq->timestamp; |
2524 | pid_t machine_pid = 0; |
2525 | pid_t pid = ptq->pid; |
2526 | pid_t tid = ptq->tid; |
2527 | int vcpu = -1; |
2528 | |
2529 | tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(cyc: tm, tc: &pt->tc); |
2530 | |
2531 | if (pt->have_guest_sideband && state->from_nr) { |
2532 | machine_pid = ptq->guest_machine_pid; |
2533 | vcpu = ptq->vcpu; |
2534 | pid = ptq->guest_pid; |
2535 | tid = ptq->guest_tid; |
2536 | } |
2537 | |
2538 | return intel_pt_synth_error(pt, code: state->err, cpu: ptq->cpu, pid, tid, |
2539 | ip: state->from_ip, timestamp: tm, machine_pid, vcpu); |
2540 | } |
2541 | |
2542 | static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) |
2543 | { |
2544 | struct auxtrace_queue *queue; |
2545 | pid_t tid = ptq->next_tid; |
2546 | int err; |
2547 | |
2548 | if (tid == -1) |
2549 | return 0; |
2550 | |
2551 | intel_pt_log("switch: cpu %d tid %d\n" , ptq->cpu, tid); |
2552 | |
2553 | err = machine__set_current_tid(machine: pt->machine, cpu: ptq->cpu, pid: -1, tid); |
2554 | |
2555 | queue = &pt->queues.queue_array[ptq->queue_nr]; |
2556 | intel_pt_set_pid_tid_cpu(pt, queue); |
2557 | |
2558 | ptq->next_tid = -1; |
2559 | |
2560 | return err; |
2561 | } |
2562 | |
2563 | static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) |
2564 | { |
2565 | struct intel_pt *pt = ptq->pt; |
2566 | |
2567 | return ip == pt->switch_ip && |
2568 | (ptq->flags & PERF_IP_FLAG_BRANCH) && |
2569 | !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | |
2570 | PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); |
2571 | } |
2572 | |
2573 | #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ |
2574 | INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT) |
2575 | |
2576 | static int intel_pt_sample(struct intel_pt_queue *ptq) |
2577 | { |
2578 | const struct intel_pt_state *state = ptq->state; |
2579 | struct intel_pt *pt = ptq->pt; |
2580 | int err; |
2581 | |
2582 | if (!ptq->have_sample) |
2583 | return 0; |
2584 | |
2585 | ptq->have_sample = false; |
2586 | |
2587 | if (pt->synth_opts.approx_ipc) { |
2588 | ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; |
2589 | ptq->ipc_cyc_cnt = ptq->state->cycles; |
2590 | ptq->sample_ipc = true; |
2591 | } else { |
2592 | ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; |
2593 | ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; |
2594 | ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC; |
2595 | } |
2596 | |
2597 | /* Ensure guest code maps are set up */ |
2598 | if (symbol_conf.guest_code && (state->from_nr || state->to_nr)) |
2599 | intel_pt_get_guest(ptq); |
2600 | |
2601 | /* |
2602 | * Do PEBS first to allow for the possibility that the PEBS timestamp |
2603 | * precedes the current timestamp. |
2604 | */ |
2605 | if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) { |
2606 | err = intel_pt_synth_pebs_sample(ptq); |
2607 | if (err) |
2608 | return err; |
2609 | } |
2610 | |
2611 | if (pt->synth_opts.intr_events) { |
2612 | if (state->type & INTEL_PT_EVT) { |
2613 | err = intel_pt_synth_events_sample(ptq); |
2614 | if (err) |
2615 | return err; |
2616 | } |
2617 | if (state->type & INTEL_PT_IFLAG_CHG) { |
2618 | err = intel_pt_synth_iflag_chg_sample(ptq); |
2619 | if (err) |
2620 | return err; |
2621 | } |
2622 | } |
2623 | |
2624 | if (pt->sample_pwr_events) { |
2625 | if (state->type & INTEL_PT_PSB_EVT) { |
2626 | err = intel_pt_synth_psb_sample(ptq); |
2627 | if (err) |
2628 | return err; |
2629 | } |
2630 | if (ptq->state->cbr != ptq->cbr_seen) { |
2631 | err = intel_pt_synth_cbr_sample(ptq); |
2632 | if (err) |
2633 | return err; |
2634 | } |
2635 | if (state->type & INTEL_PT_PWR_EVT) { |
2636 | if (state->type & INTEL_PT_MWAIT_OP) { |
2637 | err = intel_pt_synth_mwait_sample(ptq); |
2638 | if (err) |
2639 | return err; |
2640 | } |
2641 | if (state->type & INTEL_PT_PWR_ENTRY) { |
2642 | err = intel_pt_synth_pwre_sample(ptq); |
2643 | if (err) |
2644 | return err; |
2645 | } |
2646 | if (state->type & INTEL_PT_EX_STOP) { |
2647 | err = intel_pt_synth_exstop_sample(ptq); |
2648 | if (err) |
2649 | return err; |
2650 | } |
2651 | if (state->type & INTEL_PT_PWR_EXIT) { |
2652 | err = intel_pt_synth_pwrx_sample(ptq); |
2653 | if (err) |
2654 | return err; |
2655 | } |
2656 | } |
2657 | } |
2658 | |
2659 | if (state->type & INTEL_PT_INSTRUCTION) { |
2660 | if (pt->sample_instructions) { |
2661 | err = intel_pt_synth_instruction_sample(ptq); |
2662 | if (err) |
2663 | return err; |
2664 | } |
2665 | if (pt->sample_cycles) { |
2666 | err = intel_pt_synth_cycle_sample(ptq); |
2667 | if (err) |
2668 | return err; |
2669 | } |
2670 | } |
2671 | |
2672 | if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { |
2673 | err = intel_pt_synth_transaction_sample(ptq); |
2674 | if (err) |
2675 | return err; |
2676 | } |
2677 | |
2678 | if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { |
2679 | err = intel_pt_synth_ptwrite_sample(ptq); |
2680 | if (err) |
2681 | return err; |
2682 | } |
2683 | |
2684 | if (!(state->type & INTEL_PT_BRANCH)) |
2685 | return 0; |
2686 | |
2687 | if (pt->use_thread_stack) { |
2688 | thread_stack__event(thread: ptq->thread, cpu: ptq->cpu, flags: ptq->flags, |
2689 | from_ip: state->from_ip, to_ip: state->to_ip, insn_len: ptq->insn_len, |
2690 | trace_nr: state->trace_nr, callstack: pt->callstack, |
2691 | br_stack_sz: pt->br_stack_sz_plus, |
2692 | mispred_all: pt->mispred_all); |
2693 | } else { |
2694 | thread_stack__set_trace_nr(thread: ptq->thread, cpu: ptq->cpu, trace_nr: state->trace_nr); |
2695 | } |
2696 | |
2697 | if (pt->sample_branches) { |
2698 | if (state->from_nr != state->to_nr && |
2699 | state->from_ip && state->to_ip) { |
2700 | struct intel_pt_state *st = (struct intel_pt_state *)state; |
2701 | u64 to_ip = st->to_ip; |
2702 | u64 from_ip = st->from_ip; |
2703 | |
2704 | /* |
2705 | * perf cannot handle having different machines for ip |
2706 | * and addr, so create 2 branches. |
2707 | */ |
2708 | st->to_ip = 0; |
2709 | err = intel_pt_synth_branch_sample(ptq); |
2710 | if (err) |
2711 | return err; |
2712 | st->from_ip = 0; |
2713 | st->to_ip = to_ip; |
2714 | err = intel_pt_synth_branch_sample(ptq); |
2715 | st->from_ip = from_ip; |
2716 | } else { |
2717 | err = intel_pt_synth_branch_sample(ptq); |
2718 | } |
2719 | if (err) |
2720 | return err; |
2721 | } |
2722 | |
2723 | if (!ptq->sync_switch) |
2724 | return 0; |
2725 | |
2726 | if (intel_pt_is_switch_ip(ptq, ip: state->to_ip)) { |
2727 | switch (ptq->switch_state) { |
2728 | case INTEL_PT_SS_NOT_TRACING: |
2729 | case INTEL_PT_SS_UNKNOWN: |
2730 | case INTEL_PT_SS_EXPECTING_SWITCH_IP: |
2731 | err = intel_pt_next_tid(pt, ptq); |
2732 | if (err) |
2733 | return err; |
2734 | ptq->switch_state = INTEL_PT_SS_TRACING; |
2735 | break; |
2736 | default: |
2737 | ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; |
2738 | return 1; |
2739 | } |
2740 | } else if (!state->to_ip) { |
2741 | ptq->switch_state = INTEL_PT_SS_NOT_TRACING; |
2742 | } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { |
2743 | ptq->switch_state = INTEL_PT_SS_UNKNOWN; |
2744 | } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && |
2745 | state->to_ip == pt->ptss_ip && |
2746 | (ptq->flags & PERF_IP_FLAG_CALL)) { |
2747 | ptq->switch_state = INTEL_PT_SS_TRACING; |
2748 | } |
2749 | |
2750 | return 0; |
2751 | } |
2752 | |
2753 | static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) |
2754 | { |
2755 | struct machine *machine = pt->machine; |
2756 | struct map *map; |
2757 | struct symbol *sym, *start; |
2758 | u64 ip, switch_ip = 0; |
2759 | const char *ptss; |
2760 | |
2761 | if (ptss_ip) |
2762 | *ptss_ip = 0; |
2763 | |
2764 | map = machine__kernel_map(machine); |
2765 | if (!map) |
2766 | return 0; |
2767 | |
2768 | if (map__load(map)) |
2769 | return 0; |
2770 | |
2771 | start = dso__first_symbol(dso: map__dso(map)); |
2772 | |
2773 | for (sym = start; sym; sym = dso__next_symbol(sym)) { |
2774 | if (sym->binding == STB_GLOBAL && |
2775 | !strcmp(sym->name, "__switch_to" )) { |
2776 | ip = map__unmap_ip(map, ip_or_rip: sym->start); |
2777 | if (ip >= map__start(map) && ip < map__end(map)) { |
2778 | switch_ip = ip; |
2779 | break; |
2780 | } |
2781 | } |
2782 | } |
2783 | |
2784 | if (!switch_ip || !ptss_ip) |
2785 | return 0; |
2786 | |
2787 | if (pt->have_sched_switch == 1) |
2788 | ptss = "perf_trace_sched_switch" ; |
2789 | else |
2790 | ptss = "__perf_event_task_sched_out" ; |
2791 | |
2792 | for (sym = start; sym; sym = dso__next_symbol(sym)) { |
2793 | if (!strcmp(sym->name, ptss)) { |
2794 | ip = map__unmap_ip(map, ip_or_rip: sym->start); |
2795 | if (ip >= map__start(map) && ip < map__end(map)) { |
2796 | *ptss_ip = ip; |
2797 | break; |
2798 | } |
2799 | } |
2800 | } |
2801 | |
2802 | return switch_ip; |
2803 | } |
2804 | |
2805 | static void intel_pt_enable_sync_switch(struct intel_pt *pt) |
2806 | { |
2807 | unsigned int i; |
2808 | |
2809 | if (pt->sync_switch_not_supported) |
2810 | return; |
2811 | |
2812 | pt->sync_switch = true; |
2813 | |
2814 | for (i = 0; i < pt->queues.nr_queues; i++) { |
2815 | struct auxtrace_queue *queue = &pt->queues.queue_array[i]; |
2816 | struct intel_pt_queue *ptq = queue->priv; |
2817 | |
2818 | if (ptq) |
2819 | ptq->sync_switch = true; |
2820 | } |
2821 | } |
2822 | |
2823 | static void intel_pt_disable_sync_switch(struct intel_pt *pt) |
2824 | { |
2825 | unsigned int i; |
2826 | |
2827 | pt->sync_switch = false; |
2828 | |
2829 | for (i = 0; i < pt->queues.nr_queues; i++) { |
2830 | struct auxtrace_queue *queue = &pt->queues.queue_array[i]; |
2831 | struct intel_pt_queue *ptq = queue->priv; |
2832 | |
2833 | if (ptq) { |
2834 | ptq->sync_switch = false; |
2835 | intel_pt_next_tid(pt, ptq); |
2836 | } |
2837 | } |
2838 | } |
2839 | |
2840 | /* |
2841 | * To filter against time ranges, it is only necessary to look at the next start |
2842 | * or end time. |
2843 | */ |
2844 | static bool intel_pt_next_time(struct intel_pt_queue *ptq) |
2845 | { |
2846 | struct intel_pt *pt = ptq->pt; |
2847 | |
2848 | if (ptq->sel_start) { |
2849 | /* Next time is an end time */ |
2850 | ptq->sel_start = false; |
2851 | ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end; |
2852 | return true; |
2853 | } else if (ptq->sel_idx + 1 < pt->range_cnt) { |
2854 | /* Next time is a start time */ |
2855 | ptq->sel_start = true; |
2856 | ptq->sel_idx += 1; |
2857 | ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start; |
2858 | return true; |
2859 | } |
2860 | |
2861 | /* No next time */ |
2862 | return false; |
2863 | } |
2864 | |
2865 | static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp) |
2866 | { |
2867 | int err; |
2868 | |
2869 | while (1) { |
2870 | if (ptq->sel_start) { |
2871 | if (ptq->timestamp >= ptq->sel_timestamp) { |
2872 | /* After start time, so consider next time */ |
2873 | intel_pt_next_time(ptq); |
2874 | if (!ptq->sel_timestamp) { |
2875 | /* No end time */ |
2876 | return 0; |
2877 | } |
2878 | /* Check against end time */ |
2879 | continue; |
2880 | } |
2881 | /* Before start time, so fast forward */ |
2882 | ptq->have_sample = false; |
2883 | if (ptq->sel_timestamp > *ff_timestamp) { |
2884 | if (ptq->sync_switch) { |
2885 | intel_pt_next_tid(pt: ptq->pt, ptq); |
2886 | ptq->switch_state = INTEL_PT_SS_UNKNOWN; |
2887 | } |
2888 | *ff_timestamp = ptq->sel_timestamp; |
2889 | err = intel_pt_fast_forward(decoder: ptq->decoder, |
2890 | timestamp: ptq->sel_timestamp); |
2891 | if (err) |
2892 | return err; |
2893 | } |
2894 | return 0; |
2895 | } else if (ptq->timestamp > ptq->sel_timestamp) { |
2896 | /* After end time, so consider next time */ |
2897 | if (!intel_pt_next_time(ptq)) { |
2898 | /* No next time range, so stop decoding */ |
2899 | ptq->have_sample = false; |
2900 | ptq->switch_state = INTEL_PT_SS_NOT_TRACING; |
2901 | return 1; |
2902 | } |
2903 | /* Check against next start time */ |
2904 | continue; |
2905 | } else { |
2906 | /* Before end time */ |
2907 | return 0; |
2908 | } |
2909 | } |
2910 | } |
2911 | |
2912 | static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) |
2913 | { |
2914 | const struct intel_pt_state *state = ptq->state; |
2915 | struct intel_pt *pt = ptq->pt; |
2916 | u64 ff_timestamp = 0; |
2917 | int err; |
2918 | |
2919 | if (!pt->kernel_start) { |
2920 | pt->kernel_start = machine__kernel_start(machine: pt->machine); |
2921 | if (pt->per_cpu_mmaps && |
2922 | (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && |
2923 | !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && |
2924 | !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) { |
2925 | pt->switch_ip = intel_pt_switch_ip(pt, ptss_ip: &pt->ptss_ip); |
2926 | if (pt->switch_ip) { |
2927 | intel_pt_log("switch_ip: %" PRIx64" ptss_ip: %" PRIx64"\n" , |
2928 | pt->switch_ip, pt->ptss_ip); |
2929 | intel_pt_enable_sync_switch(pt); |
2930 | } |
2931 | } |
2932 | } |
2933 | |
2934 | intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n" , |
2935 | ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); |
2936 | while (1) { |
2937 | err = intel_pt_sample(ptq); |
2938 | if (err) |
2939 | return err; |
2940 | |
2941 | state = intel_pt_decode(decoder: ptq->decoder); |
2942 | if (state->err) { |
2943 | if (state->err == INTEL_PT_ERR_NODATA) |
2944 | return 1; |
2945 | if (ptq->sync_switch && |
2946 | state->from_ip >= pt->kernel_start) { |
2947 | ptq->sync_switch = false; |
2948 | intel_pt_next_tid(pt, ptq); |
2949 | } |
2950 | ptq->timestamp = state->est_timestamp; |
2951 | if (pt->synth_opts.errors) { |
2952 | err = intel_ptq_synth_error(ptq, state); |
2953 | if (err) |
2954 | return err; |
2955 | } |
2956 | continue; |
2957 | } |
2958 | |
2959 | ptq->state = state; |
2960 | ptq->have_sample = true; |
2961 | intel_pt_sample_flags(ptq); |
2962 | |
2963 | /* Use estimated TSC upon return to user space */ |
2964 | if (pt->est_tsc && |
2965 | (state->from_ip >= pt->kernel_start || !state->from_ip) && |
2966 | state->to_ip && state->to_ip < pt->kernel_start) { |
2967 | intel_pt_log("TSC %" PRIx64" est. TSC %" PRIx64"\n" , |
2968 | state->timestamp, state->est_timestamp); |
2969 | ptq->timestamp = state->est_timestamp; |
2970 | /* Use estimated TSC in unknown switch state */ |
2971 | } else if (ptq->sync_switch && |
2972 | ptq->switch_state == INTEL_PT_SS_UNKNOWN && |
2973 | intel_pt_is_switch_ip(ptq, ip: state->to_ip) && |
2974 | ptq->next_tid == -1) { |
2975 | intel_pt_log("TSC %" PRIx64" est. TSC %" PRIx64"\n" , |
2976 | state->timestamp, state->est_timestamp); |
2977 | ptq->timestamp = state->est_timestamp; |
2978 | } else if (state->timestamp > ptq->timestamp) { |
2979 | ptq->timestamp = state->timestamp; |
2980 | } |
2981 | |
2982 | if (ptq->sel_timestamp) { |
2983 | err = intel_pt_time_filter(ptq, ff_timestamp: &ff_timestamp); |
2984 | if (err) |
2985 | return err; |
2986 | } |
2987 | |
2988 | if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { |
2989 | *timestamp = ptq->timestamp; |
2990 | return 0; |
2991 | } |
2992 | } |
2993 | return 0; |
2994 | } |
2995 | |
2996 | static inline int intel_pt_update_queues(struct intel_pt *pt) |
2997 | { |
2998 | if (pt->queues.new_data) { |
2999 | pt->queues.new_data = false; |
3000 | return intel_pt_setup_queues(pt); |
3001 | } |
3002 | return 0; |
3003 | } |
3004 | |
3005 | static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) |
3006 | { |
3007 | unsigned int queue_nr; |
3008 | u64 ts; |
3009 | int ret; |
3010 | |
3011 | while (1) { |
3012 | struct auxtrace_queue *queue; |
3013 | struct intel_pt_queue *ptq; |
3014 | |
3015 | if (!pt->heap.heap_cnt) |
3016 | return 0; |
3017 | |
3018 | if (pt->heap.heap_array[0].ordinal >= timestamp) |
3019 | return 0; |
3020 | |
3021 | queue_nr = pt->heap.heap_array[0].queue_nr; |
3022 | queue = &pt->queues.queue_array[queue_nr]; |
3023 | ptq = queue->priv; |
3024 | |
3025 | intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n" , |
3026 | queue_nr, pt->heap.heap_array[0].ordinal, |
3027 | timestamp); |
3028 | |
3029 | auxtrace_heap__pop(&pt->heap); |
3030 | |
3031 | if (pt->heap.heap_cnt) { |
3032 | ts = pt->heap.heap_array[0].ordinal + 1; |
3033 | if (ts > timestamp) |
3034 | ts = timestamp; |
3035 | } else { |
3036 | ts = timestamp; |
3037 | } |
3038 | |
3039 | intel_pt_set_pid_tid_cpu(pt, queue); |
3040 | |
3041 | ret = intel_pt_run_decoder(ptq, timestamp: &ts); |
3042 | |
3043 | if (ret < 0) { |
3044 | auxtrace_heap__add(&pt->heap, queue_nr, ts); |
3045 | return ret; |
3046 | } |
3047 | |
3048 | if (!ret) { |
3049 | ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); |
3050 | if (ret < 0) |
3051 | return ret; |
3052 | } else { |
3053 | ptq->on_heap = false; |
3054 | } |
3055 | } |
3056 | |
3057 | return 0; |
3058 | } |
3059 | |
3060 | static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, |
3061 | u64 time_) |
3062 | { |
3063 | struct auxtrace_queues *queues = &pt->queues; |
3064 | unsigned int i; |
3065 | u64 ts = 0; |
3066 | |
3067 | for (i = 0; i < queues->nr_queues; i++) { |
3068 | struct auxtrace_queue *queue = &pt->queues.queue_array[i]; |
3069 | struct intel_pt_queue *ptq = queue->priv; |
3070 | |
3071 | if (ptq && (tid == -1 || ptq->tid == tid)) { |
3072 | ptq->time = time_; |
3073 | intel_pt_set_pid_tid_cpu(pt, queue); |
3074 | intel_pt_run_decoder(ptq, timestamp: &ts); |
3075 | } |
3076 | } |
3077 | return 0; |
3078 | } |
3079 | |
3080 | static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq, |
3081 | struct auxtrace_queue *queue, |
3082 | struct perf_sample *sample) |
3083 | { |
3084 | struct machine *m = ptq->pt->machine; |
3085 | |
3086 | ptq->pid = sample->pid; |
3087 | ptq->tid = sample->tid; |
3088 | ptq->cpu = queue->cpu; |
3089 | |
3090 | intel_pt_log("queue %u cpu %d pid %d tid %d\n" , |
3091 | ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); |
3092 | |
3093 | thread__zput(ptq->thread); |
3094 | |
3095 | if (ptq->tid == -1) |
3096 | return; |
3097 | |
3098 | if (ptq->pid == -1) { |
3099 | ptq->thread = machine__find_thread(machine: m, pid: -1, tid: ptq->tid); |
3100 | if (ptq->thread) |
3101 | ptq->pid = thread__pid(thread: ptq->thread); |
3102 | return; |
3103 | } |
3104 | |
3105 | ptq->thread = machine__findnew_thread(machine: m, pid: ptq->pid, tid: ptq->tid); |
3106 | } |
3107 | |
3108 | static int intel_pt_process_timeless_sample(struct intel_pt *pt, |
3109 | struct perf_sample *sample) |
3110 | { |
3111 | struct auxtrace_queue *queue; |
3112 | struct intel_pt_queue *ptq; |
3113 | u64 ts = 0; |
3114 | |
3115 | queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); |
3116 | if (!queue) |
3117 | return -EINVAL; |
3118 | |
3119 | ptq = queue->priv; |
3120 | if (!ptq) |
3121 | return 0; |
3122 | |
3123 | ptq->stop = false; |
3124 | ptq->time = sample->time; |
3125 | intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample); |
3126 | intel_pt_run_decoder(ptq, timestamp: &ts); |
3127 | return 0; |
3128 | } |
3129 | |
3130 | static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) |
3131 | { |
3132 | return intel_pt_synth_error(pt, code: INTEL_PT_ERR_LOST, cpu: sample->cpu, |
3133 | pid: sample->pid, tid: sample->tid, ip: 0, timestamp: sample->time, |
3134 | machine_pid: sample->machine_pid, vcpu: sample->vcpu); |
3135 | } |
3136 | |
3137 | static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) |
3138 | { |
3139 | unsigned i, j; |
3140 | |
3141 | if (cpu < 0 || !pt->queues.nr_queues) |
3142 | return NULL; |
3143 | |
3144 | if ((unsigned)cpu >= pt->queues.nr_queues) |
3145 | i = pt->queues.nr_queues - 1; |
3146 | else |
3147 | i = cpu; |
3148 | |
3149 | if (pt->queues.queue_array[i].cpu == cpu) |
3150 | return pt->queues.queue_array[i].priv; |
3151 | |
3152 | for (j = 0; i > 0; j++) { |
3153 | if (pt->queues.queue_array[--i].cpu == cpu) |
3154 | return pt->queues.queue_array[i].priv; |
3155 | } |
3156 | |
3157 | for (; j < pt->queues.nr_queues; j++) { |
3158 | if (pt->queues.queue_array[j].cpu == cpu) |
3159 | return pt->queues.queue_array[j].priv; |
3160 | } |
3161 | |
3162 | return NULL; |
3163 | } |
3164 | |
3165 | static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, |
3166 | u64 timestamp) |
3167 | { |
3168 | struct intel_pt_queue *ptq; |
3169 | int err; |
3170 | |
3171 | if (!pt->sync_switch) |
3172 | return 1; |
3173 | |
3174 | ptq = intel_pt_cpu_to_ptq(pt, cpu); |
3175 | if (!ptq || !ptq->sync_switch) |
3176 | return 1; |
3177 | |
3178 | switch (ptq->switch_state) { |
3179 | case INTEL_PT_SS_NOT_TRACING: |
3180 | break; |
3181 | case INTEL_PT_SS_UNKNOWN: |
3182 | case INTEL_PT_SS_TRACING: |
3183 | ptq->next_tid = tid; |
3184 | ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; |
3185 | return 0; |
3186 | case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: |
3187 | if (!ptq->on_heap) { |
3188 | ptq->timestamp = perf_time_to_tsc(ns: timestamp, |
3189 | tc: &pt->tc); |
3190 | err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, |
3191 | ptq->timestamp); |
3192 | if (err) |
3193 | return err; |
3194 | ptq->on_heap = true; |
3195 | } |
3196 | ptq->switch_state = INTEL_PT_SS_TRACING; |
3197 | break; |
3198 | case INTEL_PT_SS_EXPECTING_SWITCH_IP: |
3199 | intel_pt_log("ERROR: cpu %d expecting switch ip\n" , cpu); |
3200 | break; |
3201 | default: |
3202 | break; |
3203 | } |
3204 | |
3205 | ptq->next_tid = -1; |
3206 | |
3207 | return 1; |
3208 | } |
3209 | |
3210 | #ifdef HAVE_LIBTRACEEVENT |
3211 | static int intel_pt_process_switch(struct intel_pt *pt, |
3212 | struct perf_sample *sample) |
3213 | { |
3214 | pid_t tid; |
3215 | int cpu, ret; |
3216 | struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id); |
3217 | |
3218 | if (evsel != pt->switch_evsel) |
3219 | return 0; |
3220 | |
3221 | tid = evsel__intval(evsel, sample, "next_pid" ); |
3222 | cpu = sample->cpu; |
3223 | |
3224 | intel_pt_log("sched_switch: cpu %d tid %d time %" PRIu64" tsc %#" PRIx64"\n" , |
3225 | cpu, tid, sample->time, perf_time_to_tsc(sample->time, |
3226 | &pt->tc)); |
3227 | |
3228 | ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); |
3229 | if (ret <= 0) |
3230 | return ret; |
3231 | |
3232 | return machine__set_current_tid(pt->machine, cpu, -1, tid); |
3233 | } |
3234 | #endif /* HAVE_LIBTRACEEVENT */ |
3235 | |
3236 | static int intel_pt_context_switch_in(struct intel_pt *pt, |
3237 | struct perf_sample *sample) |
3238 | { |
3239 | pid_t pid = sample->pid; |
3240 | pid_t tid = sample->tid; |
3241 | int cpu = sample->cpu; |
3242 | |
3243 | if (pt->sync_switch) { |
3244 | struct intel_pt_queue *ptq; |
3245 | |
3246 | ptq = intel_pt_cpu_to_ptq(pt, cpu); |
3247 | if (ptq && ptq->sync_switch) { |
3248 | ptq->next_tid = -1; |
3249 | switch (ptq->switch_state) { |
3250 | case INTEL_PT_SS_NOT_TRACING: |
3251 | case INTEL_PT_SS_UNKNOWN: |
3252 | case INTEL_PT_SS_TRACING: |
3253 | break; |
3254 | case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: |
3255 | case INTEL_PT_SS_EXPECTING_SWITCH_IP: |
3256 | ptq->switch_state = INTEL_PT_SS_TRACING; |
3257 | break; |
3258 | default: |
3259 | break; |
3260 | } |
3261 | } |
3262 | } |
3263 | |
3264 | /* |
3265 | * If the current tid has not been updated yet, ensure it is now that |
3266 | * a "switch in" event has occurred. |
3267 | */ |
3268 | if (machine__get_current_tid(machine: pt->machine, cpu) == tid) |
3269 | return 0; |
3270 | |
3271 | return machine__set_current_tid(machine: pt->machine, cpu, pid, tid); |
3272 | } |
3273 | |
3274 | static int intel_pt_guest_context_switch(struct intel_pt *pt, |
3275 | union perf_event *event, |
3276 | struct perf_sample *sample) |
3277 | { |
3278 | bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; |
3279 | struct machines *machines = &pt->session->machines; |
3280 | struct machine *machine = machines__find(machines, pid: sample->machine_pid); |
3281 | |
3282 | pt->have_guest_sideband = true; |
3283 | |
3284 | /* |
3285 | * sync_switch cannot handle guest machines at present, so just disable |
3286 | * it. |
3287 | */ |
3288 | pt->sync_switch_not_supported = true; |
3289 | if (pt->sync_switch) |
3290 | intel_pt_disable_sync_switch(pt); |
3291 | |
3292 | if (out) |
3293 | return 0; |
3294 | |
3295 | if (!machine) |
3296 | return -EINVAL; |
3297 | |
3298 | return machine__set_current_tid(machine, cpu: sample->vcpu, pid: sample->pid, tid: sample->tid); |
3299 | } |
3300 | |
3301 | static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, |
3302 | struct perf_sample *sample) |
3303 | { |
3304 | bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; |
3305 | pid_t pid, tid; |
3306 | int cpu, ret; |
3307 | |
3308 | if (perf_event__is_guest(event)) |
3309 | return intel_pt_guest_context_switch(pt, event, sample); |
3310 | |
3311 | cpu = sample->cpu; |
3312 | |
3313 | if (pt->have_sched_switch == 3) { |
3314 | if (!out) |
3315 | return intel_pt_context_switch_in(pt, sample); |
3316 | if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { |
3317 | pr_err("Expecting CPU-wide context switch event\n" ); |
3318 | return -EINVAL; |
3319 | } |
3320 | pid = event->context_switch.next_prev_pid; |
3321 | tid = event->context_switch.next_prev_tid; |
3322 | } else { |
3323 | if (out) |
3324 | return 0; |
3325 | pid = sample->pid; |
3326 | tid = sample->tid; |
3327 | } |
3328 | |
3329 | if (tid == -1) |
3330 | intel_pt_log("context_switch event has no tid\n" ); |
3331 | |
3332 | ret = intel_pt_sync_switch(pt, cpu, tid, timestamp: sample->time); |
3333 | if (ret <= 0) |
3334 | return ret; |
3335 | |
3336 | return machine__set_current_tid(machine: pt->machine, cpu, pid, tid); |
3337 | } |
3338 | |
3339 | static int intel_pt_process_itrace_start(struct intel_pt *pt, |
3340 | union perf_event *event, |
3341 | struct perf_sample *sample) |
3342 | { |
3343 | if (!pt->per_cpu_mmaps) |
3344 | return 0; |
3345 | |
3346 | intel_pt_log("itrace_start: cpu %d pid %d tid %d time %" PRIu64" tsc %#" PRIx64"\n" , |
3347 | sample->cpu, event->itrace_start.pid, |
3348 | event->itrace_start.tid, sample->time, |
3349 | perf_time_to_tsc(sample->time, &pt->tc)); |
3350 | |
3351 | return machine__set_current_tid(machine: pt->machine, cpu: sample->cpu, |
3352 | pid: event->itrace_start.pid, |
3353 | tid: event->itrace_start.tid); |
3354 | } |
3355 | |
3356 | static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt, |
3357 | union perf_event *event, |
3358 | struct perf_sample *sample) |
3359 | { |
3360 | u64 hw_id = event->aux_output_hw_id.hw_id; |
3361 | struct auxtrace_queue *queue; |
3362 | struct intel_pt_queue *ptq; |
3363 | struct evsel *evsel; |
3364 | |
3365 | queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); |
3366 | evsel = evlist__id2evsel_strict(evlist: pt->session->evlist, id: sample->id); |
3367 | if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) { |
3368 | pr_err("Bad AUX output hardware ID\n" ); |
3369 | return -EINVAL; |
3370 | } |
3371 | |
3372 | ptq = queue->priv; |
3373 | |
3374 | ptq->pebs[hw_id].evsel = evsel; |
3375 | ptq->pebs[hw_id].id = sample->id; |
3376 | |
3377 | return 0; |
3378 | } |
3379 | |
3380 | static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr, |
3381 | struct addr_location *al) |
3382 | { |
3383 | if (!al->map || addr < map__start(map: al->map) || addr >= map__end(map: al->map)) { |
3384 | if (!thread__find_map(thread, cpumode, addr, al)) |
3385 | return -1; |
3386 | } |
3387 | |
3388 | return 0; |
3389 | } |
3390 | |
3391 | /* Invalidate all instruction cache entries that overlap the text poke */ |
3392 | static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) |
3393 | { |
3394 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
3395 | u64 addr = event->text_poke.addr + event->text_poke.new_len - 1; |
3396 | /* Assume text poke begins in a basic block no more than 4096 bytes */ |
3397 | int cnt = 4096 + event->text_poke.new_len; |
3398 | struct thread *thread = pt->unknown_thread; |
3399 | struct addr_location al; |
3400 | struct machine *machine = pt->machine; |
3401 | struct intel_pt_cache_entry *e; |
3402 | u64 offset; |
3403 | int ret = 0; |
3404 | |
3405 | addr_location__init(al: &al); |
3406 | if (!event->text_poke.new_len) |
3407 | goto out; |
3408 | |
3409 | for (; cnt; cnt--, addr--) { |
3410 | struct dso *dso; |
3411 | |
3412 | if (intel_pt_find_map(thread, cpumode, addr, al: &al)) { |
3413 | if (addr < event->text_poke.addr) |
3414 | goto out; |
3415 | continue; |
3416 | } |
3417 | |
3418 | dso = map__dso(map: al.map); |
3419 | if (!dso || !dso->auxtrace_cache) |
3420 | continue; |
3421 | |
3422 | offset = map__map_ip(map: al.map, ip_or_rip: addr); |
3423 | |
3424 | e = intel_pt_cache_lookup(dso, machine, offset); |
3425 | if (!e) |
3426 | continue; |
3427 | |
3428 | if (addr + e->byte_cnt + e->length <= event->text_poke.addr) { |
3429 | /* |
3430 | * No overlap. Working backwards there cannot be another |
3431 | * basic block that overlaps the text poke if there is a |
3432 | * branch instruction before the text poke address. |
3433 | */ |
3434 | if (e->branch != INTEL_PT_BR_NO_BRANCH) |
3435 | goto out; |
3436 | } else { |
3437 | intel_pt_cache_invalidate(dso, machine, offset); |
3438 | intel_pt_log("Invalidated instruction cache for %s at %#" PRIx64"\n" , |
3439 | dso->long_name, addr); |
3440 | } |
3441 | } |
3442 | out: |
3443 | addr_location__exit(al: &al); |
3444 | return ret; |
3445 | } |
3446 | |
3447 | static int intel_pt_process_event(struct perf_session *session, |
3448 | union perf_event *event, |
3449 | struct perf_sample *sample, |
3450 | struct perf_tool *tool) |
3451 | { |
3452 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3453 | auxtrace); |
3454 | u64 timestamp; |
3455 | int err = 0; |
3456 | |
3457 | if (dump_trace) |
3458 | return 0; |
3459 | |
3460 | if (!tool->ordered_events) { |
3461 | pr_err("Intel Processor Trace requires ordered events\n" ); |
3462 | return -EINVAL; |
3463 | } |
3464 | |
3465 | if (sample->time && sample->time != (u64)-1) |
3466 | timestamp = perf_time_to_tsc(ns: sample->time, tc: &pt->tc); |
3467 | else |
3468 | timestamp = 0; |
3469 | |
3470 | if (timestamp || pt->timeless_decoding) { |
3471 | err = intel_pt_update_queues(pt); |
3472 | if (err) |
3473 | return err; |
3474 | } |
3475 | |
3476 | if (pt->timeless_decoding) { |
3477 | if (pt->sampling_mode) { |
3478 | if (sample->aux_sample.size) |
3479 | err = intel_pt_process_timeless_sample(pt, |
3480 | sample); |
3481 | } else if (event->header.type == PERF_RECORD_EXIT) { |
3482 | err = intel_pt_process_timeless_queues(pt, |
3483 | tid: event->fork.tid, |
3484 | time_: sample->time); |
3485 | } |
3486 | } else if (timestamp) { |
3487 | if (!pt->first_timestamp) |
3488 | intel_pt_first_timestamp(pt, timestamp); |
3489 | err = intel_pt_process_queues(pt, timestamp); |
3490 | } |
3491 | if (err) |
3492 | return err; |
3493 | |
3494 | if (event->header.type == PERF_RECORD_SAMPLE) { |
3495 | if (pt->synth_opts.add_callchain && !sample->callchain) |
3496 | intel_pt_add_callchain(pt, sample); |
3497 | if (pt->synth_opts.add_last_branch && !sample->branch_stack) |
3498 | intel_pt_add_br_stack(pt, sample); |
3499 | } |
3500 | |
3501 | if (event->header.type == PERF_RECORD_AUX && |
3502 | (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && |
3503 | pt->synth_opts.errors) { |
3504 | err = intel_pt_lost(pt, sample); |
3505 | if (err) |
3506 | return err; |
3507 | } |
3508 | |
3509 | #ifdef HAVE_LIBTRACEEVENT |
3510 | if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) |
3511 | err = intel_pt_process_switch(pt, sample); |
3512 | else |
3513 | #endif |
3514 | if (event->header.type == PERF_RECORD_ITRACE_START) |
3515 | err = intel_pt_process_itrace_start(pt, event, sample); |
3516 | else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) |
3517 | err = intel_pt_process_aux_output_hw_id(pt, event, sample); |
3518 | else if (event->header.type == PERF_RECORD_SWITCH || |
3519 | event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) |
3520 | err = intel_pt_context_switch(pt, event, sample); |
3521 | |
3522 | if (!err && event->header.type == PERF_RECORD_TEXT_POKE) |
3523 | err = intel_pt_text_poke(pt, event); |
3524 | |
3525 | if (intel_pt_enable_logging && intel_pt_log_events(pt, tm: sample->time)) { |
3526 | intel_pt_log("event %u: cpu %d time %" PRIu64" tsc %#" PRIx64" " , |
3527 | event->header.type, sample->cpu, sample->time, timestamp); |
3528 | intel_pt_log_event(event); |
3529 | } |
3530 | |
3531 | return err; |
3532 | } |
3533 | |
3534 | static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) |
3535 | { |
3536 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3537 | auxtrace); |
3538 | int ret; |
3539 | |
3540 | if (dump_trace) |
3541 | return 0; |
3542 | |
3543 | if (!tool->ordered_events) |
3544 | return -EINVAL; |
3545 | |
3546 | ret = intel_pt_update_queues(pt); |
3547 | if (ret < 0) |
3548 | return ret; |
3549 | |
3550 | if (pt->timeless_decoding) |
3551 | return intel_pt_process_timeless_queues(pt, tid: -1, |
3552 | MAX_TIMESTAMP - 1); |
3553 | |
3554 | return intel_pt_process_queues(pt, MAX_TIMESTAMP); |
3555 | } |
3556 | |
3557 | static void intel_pt_free_events(struct perf_session *session) |
3558 | { |
3559 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3560 | auxtrace); |
3561 | struct auxtrace_queues *queues = &pt->queues; |
3562 | unsigned int i; |
3563 | |
3564 | for (i = 0; i < queues->nr_queues; i++) { |
3565 | intel_pt_free_queue(priv: queues->queue_array[i].priv); |
3566 | queues->queue_array[i].priv = NULL; |
3567 | } |
3568 | intel_pt_log_disable(); |
3569 | auxtrace_queues__free(queues); |
3570 | } |
3571 | |
3572 | static void intel_pt_free(struct perf_session *session) |
3573 | { |
3574 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3575 | auxtrace); |
3576 | |
3577 | auxtrace_heap__free(&pt->heap); |
3578 | intel_pt_free_events(session); |
3579 | session->auxtrace = NULL; |
3580 | intel_pt_free_vmcs_info(pt); |
3581 | thread__put(thread: pt->unknown_thread); |
3582 | addr_filters__exit(&pt->filts); |
3583 | zfree(&pt->chain); |
3584 | zfree(&pt->filter); |
3585 | zfree(&pt->time_ranges); |
3586 | zfree(&pt->br_stack); |
3587 | free(pt); |
3588 | } |
3589 | |
3590 | static bool intel_pt_evsel_is_auxtrace(struct perf_session *session, |
3591 | struct evsel *evsel) |
3592 | { |
3593 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3594 | auxtrace); |
3595 | |
3596 | return evsel->core.attr.type == pt->pmu_type; |
3597 | } |
3598 | |
3599 | static int intel_pt_process_auxtrace_event(struct perf_session *session, |
3600 | union perf_event *event, |
3601 | struct perf_tool *tool __maybe_unused) |
3602 | { |
3603 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3604 | auxtrace); |
3605 | |
3606 | if (!pt->data_queued) { |
3607 | struct auxtrace_buffer *buffer; |
3608 | off_t data_offset; |
3609 | int fd = perf_data__fd(data: session->data); |
3610 | int err; |
3611 | |
3612 | if (perf_data__is_pipe(data: session->data)) { |
3613 | data_offset = 0; |
3614 | } else { |
3615 | data_offset = lseek(fd, 0, SEEK_CUR); |
3616 | if (data_offset == -1) |
3617 | return -errno; |
3618 | } |
3619 | |
3620 | err = auxtrace_queues__add_event(&pt->queues, session, event, |
3621 | data_offset, &buffer); |
3622 | if (err) |
3623 | return err; |
3624 | |
3625 | /* Dump here now we have copied a piped trace out of the pipe */ |
3626 | if (dump_trace) { |
3627 | if (auxtrace_buffer__get_data(buffer, fd)) { |
3628 | intel_pt_dump_event(pt, buf: buffer->data, |
3629 | len: buffer->size); |
3630 | auxtrace_buffer__put_data(buffer); |
3631 | } |
3632 | } |
3633 | } |
3634 | |
3635 | return 0; |
3636 | } |
3637 | |
3638 | static int intel_pt_queue_data(struct perf_session *session, |
3639 | struct perf_sample *sample, |
3640 | union perf_event *event, u64 data_offset) |
3641 | { |
3642 | struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, |
3643 | auxtrace); |
3644 | u64 timestamp; |
3645 | |
3646 | if (event) { |
3647 | return auxtrace_queues__add_event(&pt->queues, session, event, |
3648 | data_offset, NULL); |
3649 | } |
3650 | |
3651 | if (sample->time && sample->time != (u64)-1) |
3652 | timestamp = perf_time_to_tsc(ns: sample->time, tc: &pt->tc); |
3653 | else |
3654 | timestamp = 0; |
3655 | |
3656 | return auxtrace_queues__add_sample(&pt->queues, session, sample, |
3657 | data_offset, timestamp); |
3658 | } |
3659 | |
3660 | struct intel_pt_synth { |
3661 | struct perf_tool dummy_tool; |
3662 | struct perf_session *session; |
3663 | }; |
3664 | |
3665 | static int intel_pt_event_synth(struct perf_tool *tool, |
3666 | union perf_event *event, |
3667 | struct perf_sample *sample __maybe_unused, |
3668 | struct machine *machine __maybe_unused) |
3669 | { |
3670 | struct intel_pt_synth *intel_pt_synth = |
3671 | container_of(tool, struct intel_pt_synth, dummy_tool); |
3672 | |
3673 | return perf_session__deliver_synth_event(session: intel_pt_synth->session, event, |
3674 | NULL); |
3675 | } |
3676 | |
3677 | static int intel_pt_synth_event(struct perf_session *session, const char *name, |
3678 | struct perf_event_attr *attr, u64 id) |
3679 | { |
3680 | struct intel_pt_synth intel_pt_synth; |
3681 | int err; |
3682 | |
3683 | pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n" , |
3684 | name, id, (u64)attr->sample_type); |
3685 | |
3686 | memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); |
3687 | intel_pt_synth.session = session; |
3688 | |
3689 | err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, |
3690 | &id, intel_pt_event_synth); |
3691 | if (err) |
3692 | pr_err("%s: failed to synthesize '%s' event type\n" , |
3693 | __func__, name); |
3694 | |
3695 | return err; |
3696 | } |
3697 | |
3698 | static void intel_pt_set_event_name(struct evlist *evlist, u64 id, |
3699 | const char *name) |
3700 | { |
3701 | struct evsel *evsel; |
3702 | |
3703 | evlist__for_each_entry(evlist, evsel) { |
3704 | if (evsel->core.id && evsel->core.id[0] == id) { |
3705 | if (evsel->name) |
3706 | zfree(&evsel->name); |
3707 | evsel->name = strdup(name); |
3708 | break; |
3709 | } |
3710 | } |
3711 | } |
3712 | |
3713 | static struct evsel *intel_pt_evsel(struct intel_pt *pt, |
3714 | struct evlist *evlist) |
3715 | { |
3716 | struct evsel *evsel; |
3717 | |
3718 | evlist__for_each_entry(evlist, evsel) { |
3719 | if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids) |
3720 | return evsel; |
3721 | } |
3722 | |
3723 | return NULL; |
3724 | } |
3725 | |
3726 | static int intel_pt_synth_events(struct intel_pt *pt, |
3727 | struct perf_session *session) |
3728 | { |
3729 | struct evlist *evlist = session->evlist; |
3730 | struct evsel *evsel = intel_pt_evsel(pt, evlist); |
3731 | struct perf_event_attr attr; |
3732 | u64 id; |
3733 | int err; |
3734 | |
3735 | if (!evsel) { |
3736 | pr_debug("There are no selected events with Intel Processor Trace data\n" ); |
3737 | return 0; |
3738 | } |
3739 | |
3740 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
3741 | attr.size = sizeof(struct perf_event_attr); |
3742 | attr.type = PERF_TYPE_HARDWARE; |
3743 | attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; |
3744 | attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | |
3745 | PERF_SAMPLE_PERIOD; |
3746 | if (pt->timeless_decoding) |
3747 | attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; |
3748 | else |
3749 | attr.sample_type |= PERF_SAMPLE_TIME; |
3750 | if (!pt->per_cpu_mmaps) |
3751 | attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; |
3752 | attr.exclude_user = evsel->core.attr.exclude_user; |
3753 | attr.exclude_kernel = evsel->core.attr.exclude_kernel; |
3754 | attr.exclude_hv = evsel->core.attr.exclude_hv; |
3755 | attr.exclude_host = evsel->core.attr.exclude_host; |
3756 | attr.exclude_guest = evsel->core.attr.exclude_guest; |
3757 | attr.sample_id_all = evsel->core.attr.sample_id_all; |
3758 | attr.read_format = evsel->core.attr.read_format; |
3759 | |
3760 | id = evsel->core.id[0] + 1000000000; |
3761 | if (!id) |
3762 | id = 1; |
3763 | |
3764 | if (pt->synth_opts.branches) { |
3765 | attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; |
3766 | attr.sample_period = 1; |
3767 | attr.sample_type |= PERF_SAMPLE_ADDR; |
3768 | err = intel_pt_synth_event(session, name: "branches" , attr: &attr, id); |
3769 | if (err) |
3770 | return err; |
3771 | pt->sample_branches = true; |
3772 | pt->branches_sample_type = attr.sample_type; |
3773 | pt->branches_id = id; |
3774 | id += 1; |
3775 | attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; |
3776 | } |
3777 | |
3778 | if (pt->synth_opts.callchain) |
3779 | attr.sample_type |= PERF_SAMPLE_CALLCHAIN; |
3780 | if (pt->synth_opts.last_branch) { |
3781 | attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; |
3782 | /* |
3783 | * We don't use the hardware index, but the sample generation |
3784 | * code uses the new format branch_stack with this field, |
3785 | * so the event attributes must indicate that it's present. |
3786 | */ |
3787 | attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; |
3788 | } |
3789 | |
3790 | if (pt->synth_opts.instructions) { |
3791 | attr.config = PERF_COUNT_HW_INSTRUCTIONS; |
3792 | if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) |
3793 | attr.sample_period = |
3794 | intel_pt_ns_to_ticks(pt, ns: pt->synth_opts.period); |
3795 | else |
3796 | attr.sample_period = pt->synth_opts.period; |
3797 | err = intel_pt_synth_event(session, name: "instructions" , attr: &attr, id); |
3798 | if (err) |
3799 | return err; |
3800 | pt->sample_instructions = true; |
3801 | pt->instructions_sample_type = attr.sample_type; |
3802 | pt->instructions_id = id; |
3803 | id += 1; |
3804 | } |
3805 | |
3806 | if (pt->synth_opts.cycles) { |
3807 | attr.config = PERF_COUNT_HW_CPU_CYCLES; |
3808 | if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) |
3809 | attr.sample_period = |
3810 | intel_pt_ns_to_ticks(pt, ns: pt->synth_opts.period); |
3811 | else |
3812 | attr.sample_period = pt->synth_opts.period; |
3813 | err = intel_pt_synth_event(session, name: "cycles" , attr: &attr, id); |
3814 | if (err) |
3815 | return err; |
3816 | pt->sample_cycles = true; |
3817 | pt->cycles_sample_type = attr.sample_type; |
3818 | pt->cycles_id = id; |
3819 | id += 1; |
3820 | } |
3821 | |
3822 | attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; |
3823 | attr.sample_period = 1; |
3824 | |
3825 | if (pt->synth_opts.transactions) { |
3826 | attr.config = PERF_COUNT_HW_INSTRUCTIONS; |
3827 | err = intel_pt_synth_event(session, name: "transactions" , attr: &attr, id); |
3828 | if (err) |
3829 | return err; |
3830 | pt->sample_transactions = true; |
3831 | pt->transactions_sample_type = attr.sample_type; |
3832 | pt->transactions_id = id; |
3833 | intel_pt_set_event_name(evlist, id, name: "transactions" ); |
3834 | id += 1; |
3835 | } |
3836 | |
3837 | attr.type = PERF_TYPE_SYNTH; |
3838 | attr.sample_type |= PERF_SAMPLE_RAW; |
3839 | |
3840 | if (pt->synth_opts.ptwrites) { |
3841 | attr.config = PERF_SYNTH_INTEL_PTWRITE; |
3842 | err = intel_pt_synth_event(session, name: "ptwrite" , attr: &attr, id); |
3843 | if (err) |
3844 | return err; |
3845 | pt->sample_ptwrites = true; |
3846 | pt->ptwrites_sample_type = attr.sample_type; |
3847 | pt->ptwrites_id = id; |
3848 | intel_pt_set_event_name(evlist, id, name: "ptwrite" ); |
3849 | id += 1; |
3850 | } |
3851 | |
3852 | if (pt->synth_opts.pwr_events) { |
3853 | pt->sample_pwr_events = true; |
3854 | pt->pwr_events_sample_type = attr.sample_type; |
3855 | |
3856 | attr.config = PERF_SYNTH_INTEL_CBR; |
3857 | err = intel_pt_synth_event(session, name: "cbr" , attr: &attr, id); |
3858 | if (err) |
3859 | return err; |
3860 | pt->cbr_id = id; |
3861 | intel_pt_set_event_name(evlist, id, name: "cbr" ); |
3862 | id += 1; |
3863 | |
3864 | attr.config = PERF_SYNTH_INTEL_PSB; |
3865 | err = intel_pt_synth_event(session, name: "psb" , attr: &attr, id); |
3866 | if (err) |
3867 | return err; |
3868 | pt->psb_id = id; |
3869 | intel_pt_set_event_name(evlist, id, name: "psb" ); |
3870 | id += 1; |
3871 | } |
3872 | |
3873 | if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) { |
3874 | attr.config = PERF_SYNTH_INTEL_MWAIT; |
3875 | err = intel_pt_synth_event(session, name: "mwait" , attr: &attr, id); |
3876 | if (err) |
3877 | return err; |
3878 | pt->mwait_id = id; |
3879 | intel_pt_set_event_name(evlist, id, name: "mwait" ); |
3880 | id += 1; |
3881 | |
3882 | attr.config = PERF_SYNTH_INTEL_PWRE; |
3883 | err = intel_pt_synth_event(session, name: "pwre" , attr: &attr, id); |
3884 | if (err) |
3885 | return err; |
3886 | pt->pwre_id = id; |
3887 | intel_pt_set_event_name(evlist, id, name: "pwre" ); |
3888 | id += 1; |
3889 | |
3890 | attr.config = PERF_SYNTH_INTEL_EXSTOP; |
3891 | err = intel_pt_synth_event(session, name: "exstop" , attr: &attr, id); |
3892 | if (err) |
3893 | return err; |
3894 | pt->exstop_id = id; |
3895 | intel_pt_set_event_name(evlist, id, name: "exstop" ); |
3896 | id += 1; |
3897 | |
3898 | attr.config = PERF_SYNTH_INTEL_PWRX; |
3899 | err = intel_pt_synth_event(session, name: "pwrx" , attr: &attr, id); |
3900 | if (err) |
3901 | return err; |
3902 | pt->pwrx_id = id; |
3903 | intel_pt_set_event_name(evlist, id, name: "pwrx" ); |
3904 | id += 1; |
3905 | } |
3906 | |
3907 | if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) { |
3908 | attr.config = PERF_SYNTH_INTEL_EVT; |
3909 | err = intel_pt_synth_event(session, name: "evt" , attr: &attr, id); |
3910 | if (err) |
3911 | return err; |
3912 | pt->evt_sample_type = attr.sample_type; |
3913 | pt->evt_id = id; |
3914 | intel_pt_set_event_name(evlist, id, name: "evt" ); |
3915 | id += 1; |
3916 | } |
3917 | |
3918 | if (pt->synth_opts.intr_events && pt->cap_event_trace) { |
3919 | attr.config = PERF_SYNTH_INTEL_IFLAG_CHG; |
3920 | err = intel_pt_synth_event(session, name: "iflag" , attr: &attr, id); |
3921 | if (err) |
3922 | return err; |
3923 | pt->iflag_chg_sample_type = attr.sample_type; |
3924 | pt->iflag_chg_id = id; |
3925 | intel_pt_set_event_name(evlist, id, name: "iflag" ); |
3926 | id += 1; |
3927 | } |
3928 | |
3929 | return 0; |
3930 | } |
3931 | |
3932 | static void intel_pt_setup_pebs_events(struct intel_pt *pt) |
3933 | { |
3934 | struct evsel *evsel; |
3935 | |
3936 | if (!pt->synth_opts.other_events) |
3937 | return; |
3938 | |
3939 | evlist__for_each_entry(pt->session->evlist, evsel) { |
3940 | if (evsel->core.attr.aux_output && evsel->core.id) { |
3941 | if (pt->single_pebs) { |
3942 | pt->single_pebs = false; |
3943 | return; |
3944 | } |
3945 | pt->single_pebs = true; |
3946 | pt->sample_pebs = true; |
3947 | pt->pebs_evsel = evsel; |
3948 | } |
3949 | } |
3950 | } |
3951 | |
3952 | static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist) |
3953 | { |
3954 | struct evsel *evsel; |
3955 | |
3956 | evlist__for_each_entry_reverse(evlist, evsel) { |
3957 | const char *name = evsel__name(evsel); |
3958 | |
3959 | if (!strcmp(name, "sched:sched_switch" )) |
3960 | return evsel; |
3961 | } |
3962 | |
3963 | return NULL; |
3964 | } |
3965 | |
3966 | static bool intel_pt_find_switch(struct evlist *evlist) |
3967 | { |
3968 | struct evsel *evsel; |
3969 | |
3970 | evlist__for_each_entry(evlist, evsel) { |
3971 | if (evsel->core.attr.context_switch) |
3972 | return true; |
3973 | } |
3974 | |
3975 | return false; |
3976 | } |
3977 | |
3978 | static int intel_pt_perf_config(const char *var, const char *value, void *data) |
3979 | { |
3980 | struct intel_pt *pt = data; |
3981 | |
3982 | if (!strcmp(var, "intel-pt.mispred-all" )) |
3983 | pt->mispred_all = perf_config_bool(var, value); |
3984 | |
3985 | if (!strcmp(var, "intel-pt.max-loops" )) |
3986 | perf_config_int(dest: &pt->max_loops, var, value); |
3987 | |
3988 | return 0; |
3989 | } |
3990 | |
3991 | /* Find least TSC which converts to ns or later */ |
3992 | static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt) |
3993 | { |
3994 | u64 tsc, tm; |
3995 | |
3996 | tsc = perf_time_to_tsc(ns, tc: &pt->tc); |
3997 | |
3998 | while (1) { |
3999 | tm = tsc_to_perf_time(cyc: tsc, tc: &pt->tc); |
4000 | if (tm < ns) |
4001 | break; |
4002 | tsc -= 1; |
4003 | } |
4004 | |
4005 | while (tm < ns) |
4006 | tm = tsc_to_perf_time(cyc: ++tsc, tc: &pt->tc); |
4007 | |
4008 | return tsc; |
4009 | } |
4010 | |
4011 | /* Find greatest TSC which converts to ns or earlier */ |
4012 | static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt) |
4013 | { |
4014 | u64 tsc, tm; |
4015 | |
4016 | tsc = perf_time_to_tsc(ns, tc: &pt->tc); |
4017 | |
4018 | while (1) { |
4019 | tm = tsc_to_perf_time(cyc: tsc, tc: &pt->tc); |
4020 | if (tm > ns) |
4021 | break; |
4022 | tsc += 1; |
4023 | } |
4024 | |
4025 | while (tm > ns) |
4026 | tm = tsc_to_perf_time(cyc: --tsc, tc: &pt->tc); |
4027 | |
4028 | return tsc; |
4029 | } |
4030 | |
4031 | static int intel_pt_setup_time_ranges(struct intel_pt *pt, |
4032 | struct itrace_synth_opts *opts) |
4033 | { |
4034 | struct perf_time_interval *p = opts->ptime_range; |
4035 | int n = opts->range_num; |
4036 | int i; |
4037 | |
4038 | if (!n || !p || pt->timeless_decoding) |
4039 | return 0; |
4040 | |
4041 | pt->time_ranges = calloc(n, sizeof(struct range)); |
4042 | if (!pt->time_ranges) |
4043 | return -ENOMEM; |
4044 | |
4045 | pt->range_cnt = n; |
4046 | |
4047 | intel_pt_log("%s: %u range(s)\n" , __func__, n); |
4048 | |
4049 | for (i = 0; i < n; i++) { |
4050 | struct range *r = &pt->time_ranges[i]; |
4051 | u64 ts = p[i].start; |
4052 | u64 te = p[i].end; |
4053 | |
4054 | /* |
4055 | * Take care to ensure the TSC range matches the perf-time range |
4056 | * when converted back to perf-time. |
4057 | */ |
4058 | r->start = ts ? intel_pt_tsc_start(ns: ts, pt) : 0; |
4059 | r->end = te ? intel_pt_tsc_end(ns: te, pt) : 0; |
4060 | |
4061 | intel_pt_log("range %d: perf time interval: %" PRIu64" to %" PRIu64"\n" , |
4062 | i, ts, te); |
4063 | intel_pt_log("range %d: TSC time interval: %#" PRIx64" to %#" PRIx64"\n" , |
4064 | i, r->start, r->end); |
4065 | } |
4066 | |
4067 | return 0; |
4068 | } |
4069 | |
4070 | static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args) |
4071 | { |
4072 | struct intel_pt_vmcs_info *vmcs_info; |
4073 | u64 tsc_offset, vmcs; |
4074 | char *p = *args; |
4075 | |
4076 | errno = 0; |
4077 | |
4078 | p = skip_spaces(p); |
4079 | if (!*p) |
4080 | return 1; |
4081 | |
4082 | tsc_offset = strtoull(p, &p, 0); |
4083 | if (errno) |
4084 | return -errno; |
4085 | p = skip_spaces(p); |
4086 | if (*p != ':') { |
4087 | pt->dflt_tsc_offset = tsc_offset; |
4088 | *args = p; |
4089 | return 0; |
4090 | } |
4091 | p += 1; |
4092 | while (1) { |
4093 | vmcs = strtoull(p, &p, 0); |
4094 | if (errno) |
4095 | return -errno; |
4096 | if (!vmcs) |
4097 | return -EINVAL; |
4098 | vmcs_info = intel_pt_findnew_vmcs(rb_root: &pt->vmcs_info, vmcs, dflt_tsc_offset: tsc_offset); |
4099 | if (!vmcs_info) |
4100 | return -ENOMEM; |
4101 | p = skip_spaces(p); |
4102 | if (*p != ',') |
4103 | break; |
4104 | p += 1; |
4105 | } |
4106 | *args = p; |
4107 | return 0; |
4108 | } |
4109 | |
4110 | static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt) |
4111 | { |
4112 | char *args = pt->synth_opts.vm_tm_corr_args; |
4113 | int ret; |
4114 | |
4115 | if (!args) |
4116 | return 0; |
4117 | |
4118 | do { |
4119 | ret = intel_pt_parse_vm_tm_corr_arg(pt, args: &args); |
4120 | } while (!ret); |
4121 | |
4122 | if (ret < 0) { |
4123 | pr_err("Failed to parse VM Time Correlation options\n" ); |
4124 | return ret; |
4125 | } |
4126 | |
4127 | return 0; |
4128 | } |
4129 | |
4130 | static const char * const intel_pt_info_fmts[] = { |
4131 | [INTEL_PT_PMU_TYPE] = " PMU Type %" PRId64"\n" , |
4132 | [INTEL_PT_TIME_SHIFT] = " Time Shift %" PRIu64"\n" , |
4133 | [INTEL_PT_TIME_MULT] = " Time Muliplier %" PRIu64"\n" , |
4134 | [INTEL_PT_TIME_ZERO] = " Time Zero %" PRIu64"\n" , |
4135 | [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %" PRId64"\n" , |
4136 | [INTEL_PT_TSC_BIT] = " TSC bit %#" PRIx64"\n" , |
4137 | [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#" PRIx64"\n" , |
4138 | [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %" PRId64"\n" , |
4139 | [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %" PRId64"\n" , |
4140 | [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %" PRId64"\n" , |
4141 | [INTEL_PT_MTC_BIT] = " MTC bit %#" PRIx64"\n" , |
4142 | [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#" PRIx64"\n" , |
4143 | [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %" PRIu64"\n" , |
4144 | [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %" PRIu64"\n" , |
4145 | [INTEL_PT_CYC_BIT] = " CYC bit %#" PRIx64"\n" , |
4146 | [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %" PRIu64"\n" , |
4147 | [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %" PRIu64"\n" , |
4148 | }; |
4149 | |
4150 | static void intel_pt_print_info(__u64 *arr, int start, int finish) |
4151 | { |
4152 | int i; |
4153 | |
4154 | if (!dump_trace) |
4155 | return; |
4156 | |
4157 | for (i = start; i <= finish; i++) { |
4158 | const char *fmt = intel_pt_info_fmts[i]; |
4159 | |
4160 | if (fmt) |
4161 | fprintf(stdout, fmt, arr[i]); |
4162 | } |
4163 | } |
4164 | |
4165 | static void intel_pt_print_info_str(const char *name, const char *str) |
4166 | { |
4167 | if (!dump_trace) |
4168 | return; |
4169 | |
4170 | fprintf(stdout, " %-20s%s\n" , name, str ? str : "" ); |
4171 | } |
4172 | |
4173 | static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos) |
4174 | { |
4175 | return auxtrace_info->header.size >= |
4176 | sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1)); |
4177 | } |
4178 | |
4179 | int intel_pt_process_auxtrace_info(union perf_event *event, |
4180 | struct perf_session *session) |
4181 | { |
4182 | struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; |
4183 | size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; |
4184 | struct intel_pt *pt; |
4185 | void *info_end; |
4186 | __u64 *info; |
4187 | int err; |
4188 | |
4189 | if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + |
4190 | min_sz) |
4191 | return -EINVAL; |
4192 | |
4193 | pt = zalloc(sizeof(struct intel_pt)); |
4194 | if (!pt) |
4195 | return -ENOMEM; |
4196 | |
4197 | pt->vmcs_info = RB_ROOT; |
4198 | |
4199 | addr_filters__init(&pt->filts); |
4200 | |
4201 | err = perf_config(fn: intel_pt_perf_config, pt); |
4202 | if (err) |
4203 | goto err_free; |
4204 | |
4205 | err = auxtrace_queues__init(&pt->queues); |
4206 | if (err) |
4207 | goto err_free; |
4208 | |
4209 | if (session->itrace_synth_opts->set) { |
4210 | pt->synth_opts = *session->itrace_synth_opts; |
4211 | } else { |
4212 | struct itrace_synth_opts *opts = session->itrace_synth_opts; |
4213 | |
4214 | itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample); |
4215 | if (!opts->default_no_sample && !opts->inject) { |
4216 | pt->synth_opts.branches = false; |
4217 | pt->synth_opts.callchain = true; |
4218 | pt->synth_opts.add_callchain = true; |
4219 | } |
4220 | pt->synth_opts.thread_stack = opts->thread_stack; |
4221 | } |
4222 | |
4223 | if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT)) |
4224 | intel_pt_log_set_name(INTEL_PT_PMU_NAME); |
4225 | |
4226 | pt->session = session; |
4227 | pt->machine = &session->machines.host; /* No kvm support */ |
4228 | pt->auxtrace_type = auxtrace_info->type; |
4229 | pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; |
4230 | pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; |
4231 | pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; |
4232 | pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; |
4233 | pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; |
4234 | pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; |
4235 | pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; |
4236 | pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; |
4237 | pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; |
4238 | pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; |
4239 | intel_pt_print_info(arr: &auxtrace_info->priv[0], start: INTEL_PT_PMU_TYPE, |
4240 | finish: INTEL_PT_PER_CPU_MMAPS); |
4241 | |
4242 | if (intel_pt_has(auxtrace_info, pos: INTEL_PT_CYC_BIT)) { |
4243 | pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; |
4244 | pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; |
4245 | pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; |
4246 | pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; |
4247 | pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; |
4248 | intel_pt_print_info(arr: &auxtrace_info->priv[0], start: INTEL_PT_MTC_BIT, |
4249 | finish: INTEL_PT_CYC_BIT); |
4250 | } |
4251 | |
4252 | if (intel_pt_has(auxtrace_info, pos: INTEL_PT_MAX_NONTURBO_RATIO)) { |
4253 | pt->max_non_turbo_ratio = |
4254 | auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; |
4255 | intel_pt_print_info(arr: &auxtrace_info->priv[0], |
4256 | start: INTEL_PT_MAX_NONTURBO_RATIO, |
4257 | finish: INTEL_PT_MAX_NONTURBO_RATIO); |
4258 | } |
4259 | |
4260 | info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; |
4261 | info_end = (void *)auxtrace_info + auxtrace_info->header.size; |
4262 | |
4263 | if (intel_pt_has(auxtrace_info, pos: INTEL_PT_FILTER_STR_LEN)) { |
4264 | size_t len; |
4265 | |
4266 | len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; |
4267 | intel_pt_print_info(arr: &auxtrace_info->priv[0], |
4268 | start: INTEL_PT_FILTER_STR_LEN, |
4269 | finish: INTEL_PT_FILTER_STR_LEN); |
4270 | if (len) { |
4271 | const char *filter = (const char *)info; |
4272 | |
4273 | len = roundup(len + 1, 8); |
4274 | info += len >> 3; |
4275 | if ((void *)info > info_end) { |
4276 | pr_err("%s: bad filter string length\n" , __func__); |
4277 | err = -EINVAL; |
4278 | goto err_free_queues; |
4279 | } |
4280 | pt->filter = memdup(filter, len); |
4281 | if (!pt->filter) { |
4282 | err = -ENOMEM; |
4283 | goto err_free_queues; |
4284 | } |
4285 | if (session->header.needs_swap) |
4286 | mem_bswap_64(src: pt->filter, byte_size: len); |
4287 | if (pt->filter[len - 1]) { |
4288 | pr_err("%s: filter string not null terminated\n" , __func__); |
4289 | err = -EINVAL; |
4290 | goto err_free_queues; |
4291 | } |
4292 | err = addr_filters__parse_bare_filter(&pt->filts, |
4293 | filter); |
4294 | if (err) |
4295 | goto err_free_queues; |
4296 | } |
4297 | intel_pt_print_info_str(name: "Filter string" , str: pt->filter); |
4298 | } |
4299 | |
4300 | if ((void *)info < info_end) { |
4301 | pt->cap_event_trace = *info++; |
4302 | if (dump_trace) |
4303 | fprintf(stdout, " Cap Event Trace %d\n" , |
4304 | pt->cap_event_trace); |
4305 | } |
4306 | |
4307 | pt->timeless_decoding = intel_pt_timeless_decoding(pt); |
4308 | if (pt->timeless_decoding && !pt->tc.time_mult) |
4309 | pt->tc.time_mult = 1; |
4310 | pt->have_tsc = intel_pt_have_tsc(pt); |
4311 | pt->sampling_mode = intel_pt_sampling_mode(pt); |
4312 | pt->est_tsc = !pt->timeless_decoding; |
4313 | |
4314 | if (pt->synth_opts.vm_time_correlation) { |
4315 | if (pt->timeless_decoding) { |
4316 | pr_err("Intel PT has no time information for VM Time Correlation\n" ); |
4317 | err = -EINVAL; |
4318 | goto err_free_queues; |
4319 | } |
4320 | if (session->itrace_synth_opts->ptime_range) { |
4321 | pr_err("Time ranges cannot be specified with VM Time Correlation\n" ); |
4322 | err = -EINVAL; |
4323 | goto err_free_queues; |
4324 | } |
4325 | /* Currently TSC Offset is calculated using MTC packets */ |
4326 | if (!intel_pt_have_mtc(pt)) { |
4327 | pr_err("MTC packets must have been enabled for VM Time Correlation\n" ); |
4328 | err = -EINVAL; |
4329 | goto err_free_queues; |
4330 | } |
4331 | err = intel_pt_parse_vm_tm_corr_args(pt); |
4332 | if (err) |
4333 | goto err_free_queues; |
4334 | } |
4335 | |
4336 | pt->unknown_thread = thread__new(pid: 999999999, tid: 999999999); |
4337 | if (!pt->unknown_thread) { |
4338 | err = -ENOMEM; |
4339 | goto err_free_queues; |
4340 | } |
4341 | |
4342 | err = thread__set_comm(thread: pt->unknown_thread, comm: "unknown" , timestamp: 0); |
4343 | if (err) |
4344 | goto err_delete_thread; |
4345 | if (thread__init_maps(thread: pt->unknown_thread, machine: pt->machine)) { |
4346 | err = -ENOMEM; |
4347 | goto err_delete_thread; |
4348 | } |
4349 | |
4350 | pt->auxtrace.process_event = intel_pt_process_event; |
4351 | pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; |
4352 | pt->auxtrace.queue_data = intel_pt_queue_data; |
4353 | pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample; |
4354 | pt->auxtrace.flush_events = intel_pt_flush; |
4355 | pt->auxtrace.free_events = intel_pt_free_events; |
4356 | pt->auxtrace.free = intel_pt_free; |
4357 | pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace; |
4358 | session->auxtrace = &pt->auxtrace; |
4359 | |
4360 | if (dump_trace) |
4361 | return 0; |
4362 | |
4363 | if (pt->have_sched_switch == 1) { |
4364 | pt->switch_evsel = intel_pt_find_sched_switch(evlist: session->evlist); |
4365 | if (!pt->switch_evsel) { |
4366 | pr_err("%s: missing sched_switch event\n" , __func__); |
4367 | err = -EINVAL; |
4368 | goto err_delete_thread; |
4369 | } |
4370 | } else if (pt->have_sched_switch == 2 && |
4371 | !intel_pt_find_switch(evlist: session->evlist)) { |
4372 | pr_err("%s: missing context_switch attribute flag\n" , __func__); |
4373 | err = -EINVAL; |
4374 | goto err_delete_thread; |
4375 | } |
4376 | |
4377 | if (pt->synth_opts.log) { |
4378 | bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR; |
4379 | unsigned int log_on_error_size = pt->synth_opts.log_on_error_size; |
4380 | |
4381 | intel_pt_log_enable(dump_log_on_error: log_on_error, log_on_error_size); |
4382 | } |
4383 | |
4384 | /* Maximum non-turbo ratio is TSC freq / 100 MHz */ |
4385 | if (pt->tc.time_mult) { |
4386 | u64 tsc_freq = intel_pt_ns_to_ticks(pt, ns: 1000000000); |
4387 | |
4388 | if (!pt->max_non_turbo_ratio) |
4389 | pt->max_non_turbo_ratio = |
4390 | (tsc_freq + 50000000) / 100000000; |
4391 | intel_pt_log("TSC frequency %" PRIu64"\n" , tsc_freq); |
4392 | intel_pt_log("Maximum non-turbo ratio %u\n" , |
4393 | pt->max_non_turbo_ratio); |
4394 | pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; |
4395 | } |
4396 | |
4397 | err = intel_pt_setup_time_ranges(pt, opts: session->itrace_synth_opts); |
4398 | if (err) |
4399 | goto err_delete_thread; |
4400 | |
4401 | if (pt->synth_opts.calls) |
4402 | pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | |
4403 | PERF_IP_FLAG_TRACE_END; |
4404 | if (pt->synth_opts.returns) |
4405 | pt->branches_filter |= PERF_IP_FLAG_RETURN | |
4406 | PERF_IP_FLAG_TRACE_BEGIN; |
4407 | |
4408 | if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) && |
4409 | !symbol_conf.use_callchain) { |
4410 | symbol_conf.use_callchain = true; |
4411 | if (callchain_register_param(param: &callchain_param) < 0) { |
4412 | symbol_conf.use_callchain = false; |
4413 | pt->synth_opts.callchain = false; |
4414 | pt->synth_opts.add_callchain = false; |
4415 | } |
4416 | } |
4417 | |
4418 | if (pt->synth_opts.add_callchain) { |
4419 | err = intel_pt_callchain_init(pt); |
4420 | if (err) |
4421 | goto err_delete_thread; |
4422 | } |
4423 | |
4424 | if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) { |
4425 | pt->br_stack_sz = pt->synth_opts.last_branch_sz; |
4426 | pt->br_stack_sz_plus = pt->br_stack_sz; |
4427 | } |
4428 | |
4429 | if (pt->synth_opts.add_last_branch) { |
4430 | err = intel_pt_br_stack_init(pt); |
4431 | if (err) |
4432 | goto err_delete_thread; |
4433 | /* |
4434 | * Additional branch stack size to cater for tracing from the |
4435 | * actual sample ip to where the sample time is recorded. |
4436 | * Measured at about 200 branches, but generously set to 1024. |
4437 | * If kernel space is not being traced, then add just 1 for the |
4438 | * branch to kernel space. |
4439 | */ |
4440 | if (intel_pt_tracing_kernel(pt)) |
4441 | pt->br_stack_sz_plus += 1024; |
4442 | else |
4443 | pt->br_stack_sz_plus += 1; |
4444 | } |
4445 | |
4446 | pt->use_thread_stack = pt->synth_opts.callchain || |
4447 | pt->synth_opts.add_callchain || |
4448 | pt->synth_opts.thread_stack || |
4449 | pt->synth_opts.last_branch || |
4450 | pt->synth_opts.add_last_branch; |
4451 | |
4452 | pt->callstack = pt->synth_opts.callchain || |
4453 | pt->synth_opts.add_callchain || |
4454 | pt->synth_opts.thread_stack; |
4455 | |
4456 | err = intel_pt_synth_events(pt, session); |
4457 | if (err) |
4458 | goto err_delete_thread; |
4459 | |
4460 | intel_pt_setup_pebs_events(pt); |
4461 | |
4462 | if (perf_data__is_pipe(data: session->data)) { |
4463 | pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n" |
4464 | " The output cannot relied upon. In particular,\n" |
4465 | " timestamps and the order of events may be incorrect.\n" ); |
4466 | } |
4467 | |
4468 | if (pt->sampling_mode || list_empty(head: &session->auxtrace_index)) |
4469 | err = auxtrace_queue_data(session, true, true); |
4470 | else |
4471 | err = auxtrace_queues__process_index(&pt->queues, session); |
4472 | if (err) |
4473 | goto err_delete_thread; |
4474 | |
4475 | if (pt->queues.populated) |
4476 | pt->data_queued = true; |
4477 | |
4478 | if (pt->timeless_decoding) |
4479 | pr_debug2("Intel PT decoding without timestamps\n" ); |
4480 | |
4481 | return 0; |
4482 | |
4483 | err_delete_thread: |
4484 | zfree(&pt->chain); |
4485 | thread__zput(pt->unknown_thread); |
4486 | err_free_queues: |
4487 | intel_pt_log_disable(); |
4488 | auxtrace_queues__free(&pt->queues); |
4489 | session->auxtrace = NULL; |
4490 | err_free: |
4491 | addr_filters__exit(&pt->filts); |
4492 | zfree(&pt->filter); |
4493 | zfree(&pt->time_ranges); |
4494 | free(pt); |
4495 | return err; |
4496 | } |
4497 | |