1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * intel-bts.c: Intel Processor Trace support |
4 | * Copyright (c) 2013-2015, Intel Corporation. |
5 | */ |
6 | |
7 | #include <endian.h> |
8 | #include <errno.h> |
9 | #include <byteswap.h> |
10 | #include <inttypes.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> |
13 | #include <linux/bitops.h> |
14 | #include <linux/log2.h> |
15 | #include <linux/zalloc.h> |
16 | |
17 | #include "color.h" |
18 | #include "evsel.h" |
19 | #include "evlist.h" |
20 | #include "machine.h" |
21 | #include "symbol.h" |
22 | #include "session.h" |
23 | #include "tool.h" |
24 | #include "thread.h" |
25 | #include "thread-stack.h" |
26 | #include "debug.h" |
27 | #include "tsc.h" |
28 | #include "auxtrace.h" |
29 | #include "intel-pt-decoder/intel-pt-insn-decoder.h" |
30 | #include "intel-bts.h" |
31 | #include "util/synthetic-events.h" |
32 | |
33 | #define MAX_TIMESTAMP (~0ULL) |
34 | |
35 | #define INTEL_BTS_ERR_NOINSN 5 |
36 | #define INTEL_BTS_ERR_LOST 9 |
37 | |
38 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
39 | #define le64_to_cpu bswap_64 |
40 | #else |
41 | #define le64_to_cpu |
42 | #endif |
43 | |
44 | struct intel_bts { |
45 | struct auxtrace auxtrace; |
46 | struct auxtrace_queues queues; |
47 | struct auxtrace_heap heap; |
48 | u32 auxtrace_type; |
49 | struct perf_session *session; |
50 | struct machine *machine; |
51 | bool sampling_mode; |
52 | bool snapshot_mode; |
53 | bool data_queued; |
54 | u32 pmu_type; |
55 | struct perf_tsc_conversion tc; |
56 | bool cap_user_time_zero; |
57 | struct itrace_synth_opts synth_opts; |
58 | bool sample_branches; |
59 | u32 branches_filter; |
60 | u64 branches_sample_type; |
61 | u64 branches_id; |
62 | size_t branches_event_size; |
63 | unsigned long num_events; |
64 | }; |
65 | |
66 | struct intel_bts_queue { |
67 | struct intel_bts *bts; |
68 | unsigned int queue_nr; |
69 | struct auxtrace_buffer *buffer; |
70 | bool on_heap; |
71 | bool done; |
72 | pid_t pid; |
73 | pid_t tid; |
74 | int cpu; |
75 | u64 time; |
76 | struct intel_pt_insn intel_pt_insn; |
77 | u32 sample_flags; |
78 | }; |
79 | |
80 | struct branch { |
81 | u64 from; |
82 | u64 to; |
83 | u64 misc; |
84 | }; |
85 | |
86 | static void intel_bts_dump(struct intel_bts *bts __maybe_unused, |
87 | unsigned char *buf, size_t len) |
88 | { |
89 | struct branch *branch; |
90 | size_t i, pos = 0, br_sz = sizeof(struct branch), sz; |
91 | const char *color = PERF_COLOR_BLUE; |
92 | |
93 | color_fprintf(stdout, color, |
94 | ". ... Intel BTS data: size %zu bytes\n" , |
95 | len); |
96 | |
97 | while (len) { |
98 | if (len >= br_sz) |
99 | sz = br_sz; |
100 | else |
101 | sz = len; |
102 | printf("." ); |
103 | color_fprintf(stdout, color, " %08x: " , pos); |
104 | for (i = 0; i < sz; i++) |
105 | color_fprintf(stdout, color, " %02x" , buf[i]); |
106 | for (; i < br_sz; i++) |
107 | color_fprintf(stdout, color, " " ); |
108 | if (len >= br_sz) { |
109 | branch = (struct branch *)buf; |
110 | color_fprintf(stdout, color, " %" PRIx64" -> %" PRIx64" %s\n" , |
111 | le64_to_cpu(branch->from), |
112 | le64_to_cpu(branch->to), |
113 | le64_to_cpu(branch->misc) & 0x10 ? |
114 | "pred" : "miss" ); |
115 | } else { |
116 | color_fprintf(stdout, color, " Bad record!\n" ); |
117 | } |
118 | pos += sz; |
119 | buf += sz; |
120 | len -= sz; |
121 | } |
122 | } |
123 | |
124 | static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf, |
125 | size_t len) |
126 | { |
127 | printf(".\n" ); |
128 | intel_bts_dump(bts, buf, len); |
129 | } |
130 | |
131 | static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample) |
132 | { |
133 | union perf_event event; |
134 | int err; |
135 | |
136 | auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, |
137 | INTEL_BTS_ERR_LOST, sample->cpu, sample->pid, |
138 | sample->tid, 0, "Lost trace data" , sample->time); |
139 | |
140 | err = perf_session__deliver_synth_event(session: bts->session, event: &event, NULL); |
141 | if (err) |
142 | pr_err("Intel BTS: failed to deliver error event, error %d\n" , |
143 | err); |
144 | |
145 | return err; |
146 | } |
147 | |
148 | static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts, |
149 | unsigned int queue_nr) |
150 | { |
151 | struct intel_bts_queue *btsq; |
152 | |
153 | btsq = zalloc(sizeof(struct intel_bts_queue)); |
154 | if (!btsq) |
155 | return NULL; |
156 | |
157 | btsq->bts = bts; |
158 | btsq->queue_nr = queue_nr; |
159 | btsq->pid = -1; |
160 | btsq->tid = -1; |
161 | btsq->cpu = -1; |
162 | |
163 | return btsq; |
164 | } |
165 | |
166 | static int intel_bts_setup_queue(struct intel_bts *bts, |
167 | struct auxtrace_queue *queue, |
168 | unsigned int queue_nr) |
169 | { |
170 | struct intel_bts_queue *btsq = queue->priv; |
171 | |
172 | if (list_empty(head: &queue->head)) |
173 | return 0; |
174 | |
175 | if (!btsq) { |
176 | btsq = intel_bts_alloc_queue(bts, queue_nr); |
177 | if (!btsq) |
178 | return -ENOMEM; |
179 | queue->priv = btsq; |
180 | |
181 | if (queue->cpu != -1) |
182 | btsq->cpu = queue->cpu; |
183 | btsq->tid = queue->tid; |
184 | } |
185 | |
186 | if (bts->sampling_mode) |
187 | return 0; |
188 | |
189 | if (!btsq->on_heap && !btsq->buffer) { |
190 | int ret; |
191 | |
192 | btsq->buffer = auxtrace_buffer__next(queue, NULL); |
193 | if (!btsq->buffer) |
194 | return 0; |
195 | |
196 | ret = auxtrace_heap__add(&bts->heap, queue_nr, |
197 | btsq->buffer->reference); |
198 | if (ret) |
199 | return ret; |
200 | btsq->on_heap = true; |
201 | } |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static int intel_bts_setup_queues(struct intel_bts *bts) |
207 | { |
208 | unsigned int i; |
209 | int ret; |
210 | |
211 | for (i = 0; i < bts->queues.nr_queues; i++) { |
212 | ret = intel_bts_setup_queue(bts, queue: &bts->queues.queue_array[i], |
213 | queue_nr: i); |
214 | if (ret) |
215 | return ret; |
216 | } |
217 | return 0; |
218 | } |
219 | |
220 | static inline int intel_bts_update_queues(struct intel_bts *bts) |
221 | { |
222 | if (bts->queues.new_data) { |
223 | bts->queues.new_data = false; |
224 | return intel_bts_setup_queues(bts); |
225 | } |
226 | return 0; |
227 | } |
228 | |
229 | static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a, |
230 | unsigned char *buf_b, size_t len_b) |
231 | { |
232 | size_t offs, len; |
233 | |
234 | if (len_a > len_b) |
235 | offs = len_a - len_b; |
236 | else |
237 | offs = 0; |
238 | |
239 | for (; offs < len_a; offs += sizeof(struct branch)) { |
240 | len = len_a - offs; |
241 | if (!memcmp(p: buf_a + offs, q: buf_b, size: len)) |
242 | return buf_b + len; |
243 | } |
244 | |
245 | return buf_b; |
246 | } |
247 | |
248 | static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, |
249 | struct auxtrace_buffer *b) |
250 | { |
251 | struct auxtrace_buffer *a; |
252 | void *start; |
253 | |
254 | if (b->list.prev == &queue->head) |
255 | return 0; |
256 | a = list_entry(b->list.prev, struct auxtrace_buffer, list); |
257 | start = intel_bts_find_overlap(buf_a: a->data, len_a: a->size, buf_b: b->data, len_b: b->size); |
258 | if (!start) |
259 | return -EINVAL; |
260 | b->use_size = b->data + b->size - start; |
261 | b->use_data = start; |
262 | return 0; |
263 | } |
264 | |
265 | static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip) |
266 | { |
267 | return machine__kernel_ip(machine: bts->machine, ip) ? |
268 | PERF_RECORD_MISC_KERNEL : |
269 | PERF_RECORD_MISC_USER; |
270 | } |
271 | |
272 | static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, |
273 | struct branch *branch) |
274 | { |
275 | int ret; |
276 | struct intel_bts *bts = btsq->bts; |
277 | union perf_event event; |
278 | struct perf_sample sample = { .ip = 0, }; |
279 | |
280 | if (bts->synth_opts.initial_skip && |
281 | bts->num_events++ <= bts->synth_opts.initial_skip) |
282 | return 0; |
283 | |
284 | sample.ip = le64_to_cpu(branch->from); |
285 | sample.cpumode = intel_bts_cpumode(bts, ip: sample.ip); |
286 | sample.pid = btsq->pid; |
287 | sample.tid = btsq->tid; |
288 | sample.addr = le64_to_cpu(branch->to); |
289 | sample.id = btsq->bts->branches_id; |
290 | sample.stream_id = btsq->bts->branches_id; |
291 | sample.period = 1; |
292 | sample.cpu = btsq->cpu; |
293 | sample.flags = btsq->sample_flags; |
294 | sample.insn_len = btsq->intel_pt_insn.length; |
295 | memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ); |
296 | |
297 | event.sample.header.type = PERF_RECORD_SAMPLE; |
298 | event.sample.header.misc = sample.cpumode; |
299 | event.sample.header.size = sizeof(struct perf_event_header); |
300 | |
301 | if (bts->synth_opts.inject) { |
302 | event.sample.header.size = bts->branches_event_size; |
303 | ret = perf_event__synthesize_sample(&event, |
304 | bts->branches_sample_type, |
305 | 0, &sample); |
306 | if (ret) |
307 | return ret; |
308 | } |
309 | |
310 | ret = perf_session__deliver_synth_event(session: bts->session, event: &event, sample: &sample); |
311 | if (ret) |
312 | pr_err("Intel BTS: failed to deliver branch event, error %d\n" , |
313 | ret); |
314 | |
315 | return ret; |
316 | } |
317 | |
318 | static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip) |
319 | { |
320 | struct machine *machine = btsq->bts->machine; |
321 | struct thread *thread; |
322 | unsigned char buf[INTEL_PT_INSN_BUF_SZ]; |
323 | ssize_t len; |
324 | bool x86_64; |
325 | int err = -1; |
326 | |
327 | thread = machine__find_thread(machine, pid: -1, tid: btsq->tid); |
328 | if (!thread) |
329 | return -1; |
330 | |
331 | len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, is64bit: &x86_64); |
332 | if (len <= 0) |
333 | goto out_put; |
334 | |
335 | if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn: &btsq->intel_pt_insn)) |
336 | goto out_put; |
337 | |
338 | err = 0; |
339 | out_put: |
340 | thread__put(thread); |
341 | return err; |
342 | } |
343 | |
344 | static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid, |
345 | pid_t tid, u64 ip) |
346 | { |
347 | union perf_event event; |
348 | int err; |
349 | |
350 | auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, |
351 | INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip, |
352 | "Failed to get instruction" , 0); |
353 | |
354 | err = perf_session__deliver_synth_event(session: bts->session, event: &event, NULL); |
355 | if (err) |
356 | pr_err("Intel BTS: failed to deliver error event, error %d\n" , |
357 | err); |
358 | |
359 | return err; |
360 | } |
361 | |
362 | static int intel_bts_get_branch_type(struct intel_bts_queue *btsq, |
363 | struct branch *branch) |
364 | { |
365 | int err; |
366 | |
367 | if (!branch->from) { |
368 | if (branch->to) |
369 | btsq->sample_flags = PERF_IP_FLAG_BRANCH | |
370 | PERF_IP_FLAG_TRACE_BEGIN; |
371 | else |
372 | btsq->sample_flags = 0; |
373 | btsq->intel_pt_insn.length = 0; |
374 | } else if (!branch->to) { |
375 | btsq->sample_flags = PERF_IP_FLAG_BRANCH | |
376 | PERF_IP_FLAG_TRACE_END; |
377 | btsq->intel_pt_insn.length = 0; |
378 | } else { |
379 | err = intel_bts_get_next_insn(btsq, ip: branch->from); |
380 | if (err) { |
381 | btsq->sample_flags = 0; |
382 | btsq->intel_pt_insn.length = 0; |
383 | if (!btsq->bts->synth_opts.errors) |
384 | return 0; |
385 | err = intel_bts_synth_error(bts: btsq->bts, cpu: btsq->cpu, |
386 | pid: btsq->pid, tid: btsq->tid, |
387 | ip: branch->from); |
388 | return err; |
389 | } |
390 | btsq->sample_flags = intel_pt_insn_type(op: btsq->intel_pt_insn.op); |
391 | /* Check for an async branch into the kernel */ |
392 | if (!machine__kernel_ip(machine: btsq->bts->machine, ip: branch->from) && |
393 | machine__kernel_ip(machine: btsq->bts->machine, ip: branch->to) && |
394 | btsq->sample_flags != (PERF_IP_FLAG_BRANCH | |
395 | PERF_IP_FLAG_CALL | |
396 | PERF_IP_FLAG_SYSCALLRET)) |
397 | btsq->sample_flags = PERF_IP_FLAG_BRANCH | |
398 | PERF_IP_FLAG_CALL | |
399 | PERF_IP_FLAG_ASYNC | |
400 | PERF_IP_FLAG_INTERRUPT; |
401 | } |
402 | |
403 | return 0; |
404 | } |
405 | |
406 | static int intel_bts_process_buffer(struct intel_bts_queue *btsq, |
407 | struct auxtrace_buffer *buffer, |
408 | struct thread *thread) |
409 | { |
410 | struct branch *branch; |
411 | size_t sz, bsz = sizeof(struct branch); |
412 | u32 filter = btsq->bts->branches_filter; |
413 | int err = 0; |
414 | |
415 | if (buffer->use_data) { |
416 | sz = buffer->use_size; |
417 | branch = buffer->use_data; |
418 | } else { |
419 | sz = buffer->size; |
420 | branch = buffer->data; |
421 | } |
422 | |
423 | if (!btsq->bts->sample_branches) |
424 | return 0; |
425 | |
426 | for (; sz > bsz; branch += 1, sz -= bsz) { |
427 | if (!branch->from && !branch->to) |
428 | continue; |
429 | intel_bts_get_branch_type(btsq, branch); |
430 | if (btsq->bts->synth_opts.thread_stack) |
431 | thread_stack__event(thread, cpu: btsq->cpu, flags: btsq->sample_flags, |
432 | le64_to_cpufrom_ip: (branch->from), |
433 | le64_to_cputo_ip: (branch->to), |
434 | insn_len: btsq->intel_pt_insn.length, |
435 | trace_nr: buffer->buffer_nr + 1, callstack: true, br_stack_sz: 0, mispred_all: 0); |
436 | if (filter && !(filter & btsq->sample_flags)) |
437 | continue; |
438 | err = intel_bts_synth_branch_sample(btsq, branch); |
439 | if (err) |
440 | break; |
441 | } |
442 | return err; |
443 | } |
444 | |
445 | static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp) |
446 | { |
447 | struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer; |
448 | struct auxtrace_queue *queue; |
449 | struct thread *thread; |
450 | int err; |
451 | |
452 | if (btsq->done) |
453 | return 1; |
454 | |
455 | if (btsq->pid == -1) { |
456 | thread = machine__find_thread(machine: btsq->bts->machine, pid: -1, |
457 | tid: btsq->tid); |
458 | if (thread) |
459 | btsq->pid = thread__pid(thread); |
460 | } else { |
461 | thread = machine__findnew_thread(machine: btsq->bts->machine, pid: btsq->pid, |
462 | tid: btsq->tid); |
463 | } |
464 | |
465 | queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; |
466 | |
467 | if (!buffer) |
468 | buffer = auxtrace_buffer__next(queue, NULL); |
469 | |
470 | if (!buffer) { |
471 | if (!btsq->bts->sampling_mode) |
472 | btsq->done = 1; |
473 | err = 1; |
474 | goto out_put; |
475 | } |
476 | |
477 | /* Currently there is no support for split buffers */ |
478 | if (buffer->consecutive) { |
479 | err = -EINVAL; |
480 | goto out_put; |
481 | } |
482 | |
483 | if (!buffer->data) { |
484 | int fd = perf_data__fd(data: btsq->bts->session->data); |
485 | |
486 | buffer->data = auxtrace_buffer__get_data(buffer, fd); |
487 | if (!buffer->data) { |
488 | err = -ENOMEM; |
489 | goto out_put; |
490 | } |
491 | } |
492 | |
493 | if (btsq->bts->snapshot_mode && !buffer->consecutive && |
494 | intel_bts_do_fix_overlap(queue, b: buffer)) { |
495 | err = -ENOMEM; |
496 | goto out_put; |
497 | } |
498 | |
499 | if (!btsq->bts->synth_opts.callchain && |
500 | !btsq->bts->synth_opts.thread_stack && thread && |
501 | (!old_buffer || btsq->bts->sampling_mode || |
502 | (btsq->bts->snapshot_mode && !buffer->consecutive))) |
503 | thread_stack__set_trace_nr(thread, cpu: btsq->cpu, trace_nr: buffer->buffer_nr + 1); |
504 | |
505 | err = intel_bts_process_buffer(btsq, buffer, thread); |
506 | |
507 | auxtrace_buffer__drop_data(buffer); |
508 | |
509 | btsq->buffer = auxtrace_buffer__next(queue, buffer); |
510 | if (btsq->buffer) { |
511 | if (timestamp) |
512 | *timestamp = btsq->buffer->reference; |
513 | } else { |
514 | if (!btsq->bts->sampling_mode) |
515 | btsq->done = 1; |
516 | } |
517 | out_put: |
518 | thread__put(thread); |
519 | return err; |
520 | } |
521 | |
522 | static int intel_bts_flush_queue(struct intel_bts_queue *btsq) |
523 | { |
524 | u64 ts = 0; |
525 | int ret; |
526 | |
527 | while (1) { |
528 | ret = intel_bts_process_queue(btsq, timestamp: &ts); |
529 | if (ret < 0) |
530 | return ret; |
531 | if (ret) |
532 | break; |
533 | } |
534 | return 0; |
535 | } |
536 | |
537 | static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid) |
538 | { |
539 | struct auxtrace_queues *queues = &bts->queues; |
540 | unsigned int i; |
541 | |
542 | for (i = 0; i < queues->nr_queues; i++) { |
543 | struct auxtrace_queue *queue = &bts->queues.queue_array[i]; |
544 | struct intel_bts_queue *btsq = queue->priv; |
545 | |
546 | if (btsq && btsq->tid == tid) |
547 | return intel_bts_flush_queue(btsq); |
548 | } |
549 | return 0; |
550 | } |
551 | |
552 | static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp) |
553 | { |
554 | while (1) { |
555 | unsigned int queue_nr; |
556 | struct auxtrace_queue *queue; |
557 | struct intel_bts_queue *btsq; |
558 | u64 ts = 0; |
559 | int ret; |
560 | |
561 | if (!bts->heap.heap_cnt) |
562 | return 0; |
563 | |
564 | if (bts->heap.heap_array[0].ordinal > timestamp) |
565 | return 0; |
566 | |
567 | queue_nr = bts->heap.heap_array[0].queue_nr; |
568 | queue = &bts->queues.queue_array[queue_nr]; |
569 | btsq = queue->priv; |
570 | |
571 | auxtrace_heap__pop(&bts->heap); |
572 | |
573 | ret = intel_bts_process_queue(btsq, timestamp: &ts); |
574 | if (ret < 0) { |
575 | auxtrace_heap__add(&bts->heap, queue_nr, ts); |
576 | return ret; |
577 | } |
578 | |
579 | if (!ret) { |
580 | ret = auxtrace_heap__add(&bts->heap, queue_nr, ts); |
581 | if (ret < 0) |
582 | return ret; |
583 | } else { |
584 | btsq->on_heap = false; |
585 | } |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | static int intel_bts_process_event(struct perf_session *session, |
592 | union perf_event *event, |
593 | struct perf_sample *sample, |
594 | struct perf_tool *tool) |
595 | { |
596 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
597 | auxtrace); |
598 | u64 timestamp; |
599 | int err; |
600 | |
601 | if (dump_trace) |
602 | return 0; |
603 | |
604 | if (!tool->ordered_events) { |
605 | pr_err("Intel BTS requires ordered events\n" ); |
606 | return -EINVAL; |
607 | } |
608 | |
609 | if (sample->time && sample->time != (u64)-1) |
610 | timestamp = perf_time_to_tsc(ns: sample->time, tc: &bts->tc); |
611 | else |
612 | timestamp = 0; |
613 | |
614 | err = intel_bts_update_queues(bts); |
615 | if (err) |
616 | return err; |
617 | |
618 | err = intel_bts_process_queues(bts, timestamp); |
619 | if (err) |
620 | return err; |
621 | if (event->header.type == PERF_RECORD_EXIT) { |
622 | err = intel_bts_process_tid_exit(bts, tid: event->fork.tid); |
623 | if (err) |
624 | return err; |
625 | } |
626 | |
627 | if (event->header.type == PERF_RECORD_AUX && |
628 | (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && |
629 | bts->synth_opts.errors) |
630 | err = intel_bts_lost(bts, sample); |
631 | |
632 | return err; |
633 | } |
634 | |
635 | static int intel_bts_process_auxtrace_event(struct perf_session *session, |
636 | union perf_event *event, |
637 | struct perf_tool *tool __maybe_unused) |
638 | { |
639 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
640 | auxtrace); |
641 | |
642 | if (bts->sampling_mode) |
643 | return 0; |
644 | |
645 | if (!bts->data_queued) { |
646 | struct auxtrace_buffer *buffer; |
647 | off_t data_offset; |
648 | int fd = perf_data__fd(data: session->data); |
649 | int err; |
650 | |
651 | if (perf_data__is_pipe(data: session->data)) { |
652 | data_offset = 0; |
653 | } else { |
654 | data_offset = lseek(fd, 0, SEEK_CUR); |
655 | if (data_offset == -1) |
656 | return -errno; |
657 | } |
658 | |
659 | err = auxtrace_queues__add_event(&bts->queues, session, event, |
660 | data_offset, &buffer); |
661 | if (err) |
662 | return err; |
663 | |
664 | /* Dump here now we have copied a piped trace out of the pipe */ |
665 | if (dump_trace) { |
666 | if (auxtrace_buffer__get_data(buffer, fd)) { |
667 | intel_bts_dump_event(bts, buf: buffer->data, |
668 | len: buffer->size); |
669 | auxtrace_buffer__put_data(buffer); |
670 | } |
671 | } |
672 | } |
673 | |
674 | return 0; |
675 | } |
676 | |
677 | static int intel_bts_flush(struct perf_session *session, |
678 | struct perf_tool *tool __maybe_unused) |
679 | { |
680 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
681 | auxtrace); |
682 | int ret; |
683 | |
684 | if (dump_trace || bts->sampling_mode) |
685 | return 0; |
686 | |
687 | if (!tool->ordered_events) |
688 | return -EINVAL; |
689 | |
690 | ret = intel_bts_update_queues(bts); |
691 | if (ret < 0) |
692 | return ret; |
693 | |
694 | return intel_bts_process_queues(bts, MAX_TIMESTAMP); |
695 | } |
696 | |
697 | static void intel_bts_free_queue(void *priv) |
698 | { |
699 | struct intel_bts_queue *btsq = priv; |
700 | |
701 | if (!btsq) |
702 | return; |
703 | free(btsq); |
704 | } |
705 | |
706 | static void intel_bts_free_events(struct perf_session *session) |
707 | { |
708 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
709 | auxtrace); |
710 | struct auxtrace_queues *queues = &bts->queues; |
711 | unsigned int i; |
712 | |
713 | for (i = 0; i < queues->nr_queues; i++) { |
714 | intel_bts_free_queue(priv: queues->queue_array[i].priv); |
715 | queues->queue_array[i].priv = NULL; |
716 | } |
717 | auxtrace_queues__free(queues); |
718 | } |
719 | |
720 | static void intel_bts_free(struct perf_session *session) |
721 | { |
722 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
723 | auxtrace); |
724 | |
725 | auxtrace_heap__free(&bts->heap); |
726 | intel_bts_free_events(session); |
727 | session->auxtrace = NULL; |
728 | free(bts); |
729 | } |
730 | |
731 | static bool intel_bts_evsel_is_auxtrace(struct perf_session *session, |
732 | struct evsel *evsel) |
733 | { |
734 | struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, |
735 | auxtrace); |
736 | |
737 | return evsel->core.attr.type == bts->pmu_type; |
738 | } |
739 | |
740 | struct intel_bts_synth { |
741 | struct perf_tool dummy_tool; |
742 | struct perf_session *session; |
743 | }; |
744 | |
745 | static int intel_bts_event_synth(struct perf_tool *tool, |
746 | union perf_event *event, |
747 | struct perf_sample *sample __maybe_unused, |
748 | struct machine *machine __maybe_unused) |
749 | { |
750 | struct intel_bts_synth *intel_bts_synth = |
751 | container_of(tool, struct intel_bts_synth, dummy_tool); |
752 | |
753 | return perf_session__deliver_synth_event(session: intel_bts_synth->session, |
754 | event, NULL); |
755 | } |
756 | |
757 | static int intel_bts_synth_event(struct perf_session *session, |
758 | struct perf_event_attr *attr, u64 id) |
759 | { |
760 | struct intel_bts_synth intel_bts_synth; |
761 | |
762 | memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth)); |
763 | intel_bts_synth.session = session; |
764 | |
765 | return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1, |
766 | &id, intel_bts_event_synth); |
767 | } |
768 | |
769 | static int intel_bts_synth_events(struct intel_bts *bts, |
770 | struct perf_session *session) |
771 | { |
772 | struct evlist *evlist = session->evlist; |
773 | struct evsel *evsel; |
774 | struct perf_event_attr attr; |
775 | bool found = false; |
776 | u64 id; |
777 | int err; |
778 | |
779 | evlist__for_each_entry(evlist, evsel) { |
780 | if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) { |
781 | found = true; |
782 | break; |
783 | } |
784 | } |
785 | |
786 | if (!found) { |
787 | pr_debug("There are no selected events with Intel BTS data\n" ); |
788 | return 0; |
789 | } |
790 | |
791 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
792 | attr.size = sizeof(struct perf_event_attr); |
793 | attr.type = PERF_TYPE_HARDWARE; |
794 | attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; |
795 | attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | |
796 | PERF_SAMPLE_PERIOD; |
797 | attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; |
798 | attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; |
799 | attr.exclude_user = evsel->core.attr.exclude_user; |
800 | attr.exclude_kernel = evsel->core.attr.exclude_kernel; |
801 | attr.exclude_hv = evsel->core.attr.exclude_hv; |
802 | attr.exclude_host = evsel->core.attr.exclude_host; |
803 | attr.exclude_guest = evsel->core.attr.exclude_guest; |
804 | attr.sample_id_all = evsel->core.attr.sample_id_all; |
805 | attr.read_format = evsel->core.attr.read_format; |
806 | |
807 | id = evsel->core.id[0] + 1000000000; |
808 | if (!id) |
809 | id = 1; |
810 | |
811 | if (bts->synth_opts.branches) { |
812 | attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; |
813 | attr.sample_period = 1; |
814 | attr.sample_type |= PERF_SAMPLE_ADDR; |
815 | pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n" , |
816 | id, (u64)attr.sample_type); |
817 | err = intel_bts_synth_event(session, attr: &attr, id); |
818 | if (err) { |
819 | pr_err("%s: failed to synthesize 'branches' event type\n" , |
820 | __func__); |
821 | return err; |
822 | } |
823 | bts->sample_branches = true; |
824 | bts->branches_sample_type = attr.sample_type; |
825 | bts->branches_id = id; |
826 | /* |
827 | * We only use sample types from PERF_SAMPLE_MASK so we can use |
828 | * __evsel__sample_size() here. |
829 | */ |
830 | bts->branches_event_size = sizeof(struct perf_record_sample) + |
831 | __evsel__sample_size(attr.sample_type); |
832 | } |
833 | |
834 | return 0; |
835 | } |
836 | |
837 | static const char * const intel_bts_info_fmts[] = { |
838 | [INTEL_BTS_PMU_TYPE] = " PMU Type %" PRId64"\n" , |
839 | [INTEL_BTS_TIME_SHIFT] = " Time Shift %" PRIu64"\n" , |
840 | [INTEL_BTS_TIME_MULT] = " Time Muliplier %" PRIu64"\n" , |
841 | [INTEL_BTS_TIME_ZERO] = " Time Zero %" PRIu64"\n" , |
842 | [INTEL_BTS_CAP_USER_TIME_ZERO] = " Cap Time Zero %" PRId64"\n" , |
843 | [INTEL_BTS_SNAPSHOT_MODE] = " Snapshot mode %" PRId64"\n" , |
844 | }; |
845 | |
846 | static void intel_bts_print_info(__u64 *arr, int start, int finish) |
847 | { |
848 | int i; |
849 | |
850 | if (!dump_trace) |
851 | return; |
852 | |
853 | for (i = start; i <= finish; i++) |
854 | fprintf(stdout, intel_bts_info_fmts[i], arr[i]); |
855 | } |
856 | |
857 | int intel_bts_process_auxtrace_info(union perf_event *event, |
858 | struct perf_session *session) |
859 | { |
860 | struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; |
861 | size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE; |
862 | struct intel_bts *bts; |
863 | int err; |
864 | |
865 | if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + |
866 | min_sz) |
867 | return -EINVAL; |
868 | |
869 | bts = zalloc(sizeof(struct intel_bts)); |
870 | if (!bts) |
871 | return -ENOMEM; |
872 | |
873 | err = auxtrace_queues__init(&bts->queues); |
874 | if (err) |
875 | goto err_free; |
876 | |
877 | bts->session = session; |
878 | bts->machine = &session->machines.host; /* No kvm support */ |
879 | bts->auxtrace_type = auxtrace_info->type; |
880 | bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE]; |
881 | bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT]; |
882 | bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT]; |
883 | bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO]; |
884 | bts->cap_user_time_zero = |
885 | auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO]; |
886 | bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE]; |
887 | |
888 | bts->sampling_mode = false; |
889 | |
890 | bts->auxtrace.process_event = intel_bts_process_event; |
891 | bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event; |
892 | bts->auxtrace.flush_events = intel_bts_flush; |
893 | bts->auxtrace.free_events = intel_bts_free_events; |
894 | bts->auxtrace.free = intel_bts_free; |
895 | bts->auxtrace.evsel_is_auxtrace = intel_bts_evsel_is_auxtrace; |
896 | session->auxtrace = &bts->auxtrace; |
897 | |
898 | intel_bts_print_info(arr: &auxtrace_info->priv[0], start: INTEL_BTS_PMU_TYPE, |
899 | finish: INTEL_BTS_SNAPSHOT_MODE); |
900 | |
901 | if (dump_trace) |
902 | return 0; |
903 | |
904 | if (session->itrace_synth_opts->set) { |
905 | bts->synth_opts = *session->itrace_synth_opts; |
906 | } else { |
907 | itrace_synth_opts__set_default(&bts->synth_opts, |
908 | session->itrace_synth_opts->default_no_sample); |
909 | bts->synth_opts.thread_stack = |
910 | session->itrace_synth_opts->thread_stack; |
911 | } |
912 | |
913 | if (bts->synth_opts.calls) |
914 | bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | |
915 | PERF_IP_FLAG_TRACE_END; |
916 | if (bts->synth_opts.returns) |
917 | bts->branches_filter |= PERF_IP_FLAG_RETURN | |
918 | PERF_IP_FLAG_TRACE_BEGIN; |
919 | |
920 | err = intel_bts_synth_events(bts, session); |
921 | if (err) |
922 | goto err_free_queues; |
923 | |
924 | err = auxtrace_queues__process_index(&bts->queues, session); |
925 | if (err) |
926 | goto err_free_queues; |
927 | |
928 | if (bts->queues.populated) |
929 | bts->data_queued = true; |
930 | |
931 | return 0; |
932 | |
933 | err_free_queues: |
934 | auxtrace_queues__free(&bts->queues); |
935 | session->auxtrace = NULL; |
936 | err_free: |
937 | free(bts); |
938 | return err; |
939 | } |
940 | |