1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <errno.h> |
3 | #include <inttypes.h> |
4 | #include "string2.h" |
5 | #include <sys/param.h> |
6 | #include <sys/types.h> |
7 | #include <byteswap.h> |
8 | #include <unistd.h> |
9 | #include <regex.h> |
10 | #include <stdio.h> |
11 | #include <stdlib.h> |
12 | #include <linux/compiler.h> |
13 | #include <linux/list.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/string.h> |
17 | #include <linux/stringify.h> |
18 | #include <linux/zalloc.h> |
19 | #include <sys/stat.h> |
20 | #include <sys/utsname.h> |
21 | #include <linux/time64.h> |
22 | #include <dirent.h> |
23 | #ifdef HAVE_LIBBPF_SUPPORT |
24 | #include <bpf/libbpf.h> |
25 | #endif |
26 | #include <perf/cpumap.h> |
27 | #include <tools/libc_compat.h> // reallocarray |
28 | |
29 | #include "dso.h" |
30 | #include "evlist.h" |
31 | #include "evsel.h" |
32 | #include "util/evsel_fprintf.h" |
33 | #include "header.h" |
34 | #include "memswap.h" |
35 | #include "trace-event.h" |
36 | #include "session.h" |
37 | #include "symbol.h" |
38 | #include "debug.h" |
39 | #include "cpumap.h" |
40 | #include "pmu.h" |
41 | #include "pmus.h" |
42 | #include "vdso.h" |
43 | #include "strbuf.h" |
44 | #include "build-id.h" |
45 | #include "data.h" |
46 | #include <api/fs/fs.h> |
47 | #include "asm/bug.h" |
48 | #include "tool.h" |
49 | #include "time-utils.h" |
50 | #include "units.h" |
51 | #include "util/util.h" // perf_exe() |
52 | #include "cputopo.h" |
53 | #include "bpf-event.h" |
54 | #include "bpf-utils.h" |
55 | #include "clockid.h" |
56 | |
57 | #include <linux/ctype.h> |
58 | #include <internal/lib.h> |
59 | |
60 | #ifdef HAVE_LIBTRACEEVENT |
61 | #include <traceevent/event-parse.h> |
62 | #endif |
63 | |
64 | /* |
65 | * magic2 = "PERFILE2" |
66 | * must be a numerical value to let the endianness |
67 | * determine the memory layout. That way we are able |
68 | * to detect endianness when reading the perf.data file |
69 | * back. |
70 | * |
71 | * we check for legacy (PERFFILE) format. |
72 | */ |
73 | static const char *__perf_magic1 = "PERFFILE" ; |
74 | static const u64 __perf_magic2 = 0x32454c4946524550ULL; |
75 | static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; |
76 | |
77 | #define PERF_MAGIC __perf_magic2 |
78 | |
79 | const char perf_version_string[] = PERF_VERSION; |
80 | |
81 | struct perf_file_attr { |
82 | struct perf_event_attr attr; |
83 | struct perf_file_section ids; |
84 | }; |
85 | |
86 | void (struct perf_header *, int feat) |
87 | { |
88 | __set_bit(feat, header->adds_features); |
89 | } |
90 | |
91 | void (struct perf_header *, int feat) |
92 | { |
93 | __clear_bit(feat, header->adds_features); |
94 | } |
95 | |
96 | bool (const struct perf_header *, int feat) |
97 | { |
98 | return test_bit(feat, header->adds_features); |
99 | } |
100 | |
101 | static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) |
102 | { |
103 | ssize_t ret = writen(ff->fd, buf, size); |
104 | |
105 | if (ret != (ssize_t)size) |
106 | return ret < 0 ? (int)ret : -1; |
107 | return 0; |
108 | } |
109 | |
110 | static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) |
111 | { |
112 | /* struct perf_event_header::size is u16 */ |
113 | const size_t max_size = 0xffff - sizeof(struct perf_event_header); |
114 | size_t new_size = ff->size; |
115 | void *addr; |
116 | |
117 | if (size + ff->offset > max_size) |
118 | return -E2BIG; |
119 | |
120 | while (size > (new_size - ff->offset)) |
121 | new_size <<= 1; |
122 | new_size = min(max_size, new_size); |
123 | |
124 | if (ff->size < new_size) { |
125 | addr = realloc(ff->buf, new_size); |
126 | if (!addr) |
127 | return -ENOMEM; |
128 | ff->buf = addr; |
129 | ff->size = new_size; |
130 | } |
131 | |
132 | memcpy(ff->buf + ff->offset, buf, size); |
133 | ff->offset += size; |
134 | |
135 | return 0; |
136 | } |
137 | |
138 | /* Return: 0 if succeeded, -ERR if failed. */ |
139 | int do_write(struct feat_fd *ff, const void *buf, size_t size) |
140 | { |
141 | if (!ff->buf) |
142 | return __do_write_fd(ff, buf, size); |
143 | return __do_write_buf(ff, buf, size); |
144 | } |
145 | |
146 | /* Return: 0 if succeeded, -ERR if failed. */ |
147 | static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) |
148 | { |
149 | u64 *p = (u64 *) set; |
150 | int i, ret; |
151 | |
152 | ret = do_write(ff, buf: &size, size: sizeof(size)); |
153 | if (ret < 0) |
154 | return ret; |
155 | |
156 | for (i = 0; (u64) i < BITS_TO_U64(size); i++) { |
157 | ret = do_write(ff, buf: p + i, size: sizeof(*p)); |
158 | if (ret < 0) |
159 | return ret; |
160 | } |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | /* Return: 0 if succeeded, -ERR if failed. */ |
166 | int write_padded(struct feat_fd *ff, const void *bf, |
167 | size_t count, size_t count_aligned) |
168 | { |
169 | static const char zero_buf[NAME_ALIGN]; |
170 | int err = do_write(ff, buf: bf, size: count); |
171 | |
172 | if (!err) |
173 | err = do_write(ff, buf: zero_buf, size: count_aligned - count); |
174 | |
175 | return err; |
176 | } |
177 | |
178 | #define string_size(str) \ |
179 | (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) |
180 | |
181 | /* Return: 0 if succeeded, -ERR if failed. */ |
182 | static int do_write_string(struct feat_fd *ff, const char *str) |
183 | { |
184 | u32 len, olen; |
185 | int ret; |
186 | |
187 | olen = strlen(str) + 1; |
188 | len = PERF_ALIGN(olen, NAME_ALIGN); |
189 | |
190 | /* write len, incl. \0 */ |
191 | ret = do_write(ff, buf: &len, size: sizeof(len)); |
192 | if (ret < 0) |
193 | return ret; |
194 | |
195 | return write_padded(ff, bf: str, count: olen, count_aligned: len); |
196 | } |
197 | |
198 | static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) |
199 | { |
200 | ssize_t ret = readn(ff->fd, addr, size); |
201 | |
202 | if (ret != size) |
203 | return ret < 0 ? (int)ret : -1; |
204 | return 0; |
205 | } |
206 | |
207 | static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) |
208 | { |
209 | if (size > (ssize_t)ff->size - ff->offset) |
210 | return -1; |
211 | |
212 | memcpy(addr, ff->buf + ff->offset, size); |
213 | ff->offset += size; |
214 | |
215 | return 0; |
216 | |
217 | } |
218 | |
219 | static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) |
220 | { |
221 | if (!ff->buf) |
222 | return __do_read_fd(ff, addr, size); |
223 | return __do_read_buf(ff, addr, size); |
224 | } |
225 | |
226 | static int do_read_u32(struct feat_fd *ff, u32 *addr) |
227 | { |
228 | int ret; |
229 | |
230 | ret = __do_read(ff, addr, size: sizeof(*addr)); |
231 | if (ret) |
232 | return ret; |
233 | |
234 | if (ff->ph->needs_swap) |
235 | *addr = bswap_32(*addr); |
236 | return 0; |
237 | } |
238 | |
239 | static int do_read_u64(struct feat_fd *ff, u64 *addr) |
240 | { |
241 | int ret; |
242 | |
243 | ret = __do_read(ff, addr, size: sizeof(*addr)); |
244 | if (ret) |
245 | return ret; |
246 | |
247 | if (ff->ph->needs_swap) |
248 | *addr = bswap_64(*addr); |
249 | return 0; |
250 | } |
251 | |
252 | static char *do_read_string(struct feat_fd *ff) |
253 | { |
254 | u32 len; |
255 | char *buf; |
256 | |
257 | if (do_read_u32(ff, addr: &len)) |
258 | return NULL; |
259 | |
260 | buf = malloc(len); |
261 | if (!buf) |
262 | return NULL; |
263 | |
264 | if (!__do_read(ff, addr: buf, size: len)) { |
265 | /* |
266 | * strings are padded by zeroes |
267 | * thus the actual strlen of buf |
268 | * may be less than len |
269 | */ |
270 | return buf; |
271 | } |
272 | |
273 | free(buf); |
274 | return NULL; |
275 | } |
276 | |
277 | /* Return: 0 if succeeded, -ERR if failed. */ |
278 | static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) |
279 | { |
280 | unsigned long *set; |
281 | u64 size, *p; |
282 | int i, ret; |
283 | |
284 | ret = do_read_u64(ff, addr: &size); |
285 | if (ret) |
286 | return ret; |
287 | |
288 | set = bitmap_zalloc(size); |
289 | if (!set) |
290 | return -ENOMEM; |
291 | |
292 | p = (u64 *) set; |
293 | |
294 | for (i = 0; (u64) i < BITS_TO_U64(size); i++) { |
295 | ret = do_read_u64(ff, addr: p + i); |
296 | if (ret < 0) { |
297 | free(set); |
298 | return ret; |
299 | } |
300 | } |
301 | |
302 | *pset = set; |
303 | *psize = size; |
304 | return 0; |
305 | } |
306 | |
307 | #ifdef HAVE_LIBTRACEEVENT |
308 | static int write_tracing_data(struct feat_fd *ff, |
309 | struct evlist *evlist) |
310 | { |
311 | if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n" , __func__)) |
312 | return -1; |
313 | |
314 | return read_tracing_data(ff->fd, &evlist->core.entries); |
315 | } |
316 | #endif |
317 | |
318 | static int write_build_id(struct feat_fd *ff, |
319 | struct evlist *evlist __maybe_unused) |
320 | { |
321 | struct perf_session *session; |
322 | int err; |
323 | |
324 | session = container_of(ff->ph, struct perf_session, header); |
325 | |
326 | if (!perf_session__read_build_ids(session, with_hits: true)) |
327 | return -1; |
328 | |
329 | if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n" , __func__)) |
330 | return -1; |
331 | |
332 | err = perf_session__write_buildid_table(session, fd: ff); |
333 | if (err < 0) { |
334 | pr_debug("failed to write buildid table\n" ); |
335 | return err; |
336 | } |
337 | perf_session__cache_build_ids(session); |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | static int write_hostname(struct feat_fd *ff, |
343 | struct evlist *evlist __maybe_unused) |
344 | { |
345 | struct utsname uts; |
346 | int ret; |
347 | |
348 | ret = uname(&uts); |
349 | if (ret < 0) |
350 | return -1; |
351 | |
352 | return do_write_string(ff, str: uts.nodename); |
353 | } |
354 | |
355 | static int write_osrelease(struct feat_fd *ff, |
356 | struct evlist *evlist __maybe_unused) |
357 | { |
358 | struct utsname uts; |
359 | int ret; |
360 | |
361 | ret = uname(&uts); |
362 | if (ret < 0) |
363 | return -1; |
364 | |
365 | return do_write_string(ff, str: uts.release); |
366 | } |
367 | |
368 | static int write_arch(struct feat_fd *ff, |
369 | struct evlist *evlist __maybe_unused) |
370 | { |
371 | struct utsname uts; |
372 | int ret; |
373 | |
374 | ret = uname(&uts); |
375 | if (ret < 0) |
376 | return -1; |
377 | |
378 | return do_write_string(ff, str: uts.machine); |
379 | } |
380 | |
381 | static int write_version(struct feat_fd *ff, |
382 | struct evlist *evlist __maybe_unused) |
383 | { |
384 | return do_write_string(ff, str: perf_version_string); |
385 | } |
386 | |
387 | static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) |
388 | { |
389 | FILE *file; |
390 | char *buf = NULL; |
391 | char *s, *p; |
392 | const char *search = cpuinfo_proc; |
393 | size_t len = 0; |
394 | int ret = -1; |
395 | |
396 | if (!search) |
397 | return -1; |
398 | |
399 | file = fopen("/proc/cpuinfo" , "r" ); |
400 | if (!file) |
401 | return -1; |
402 | |
403 | while (getline(&buf, &len, file) > 0) { |
404 | ret = strncmp(buf, search, strlen(search)); |
405 | if (!ret) |
406 | break; |
407 | } |
408 | |
409 | if (ret) { |
410 | ret = -1; |
411 | goto done; |
412 | } |
413 | |
414 | s = buf; |
415 | |
416 | p = strchr(buf, ':'); |
417 | if (p && *(p+1) == ' ' && *(p+2)) |
418 | s = p + 2; |
419 | p = strchr(s, '\n'); |
420 | if (p) |
421 | *p = '\0'; |
422 | |
423 | /* squash extra space characters (branding string) */ |
424 | p = s; |
425 | while (*p) { |
426 | if (isspace(*p)) { |
427 | char *r = p + 1; |
428 | char *q = skip_spaces(r); |
429 | *p = ' '; |
430 | if (q != (p+1)) |
431 | while ((*r++ = *q++)); |
432 | } |
433 | p++; |
434 | } |
435 | ret = do_write_string(ff, str: s); |
436 | done: |
437 | free(buf); |
438 | fclose(file); |
439 | return ret; |
440 | } |
441 | |
442 | static int write_cpudesc(struct feat_fd *ff, |
443 | struct evlist *evlist __maybe_unused) |
444 | { |
445 | #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__) |
446 | #define CPUINFO_PROC { "cpu", } |
447 | #elif defined(__s390__) |
448 | #define CPUINFO_PROC { "vendor_id", } |
449 | #elif defined(__sh__) |
450 | #define CPUINFO_PROC { "cpu type", } |
451 | #elif defined(__alpha__) || defined(__mips__) |
452 | #define CPUINFO_PROC { "cpu model", } |
453 | #elif defined(__arm__) |
454 | #define CPUINFO_PROC { "model name", "Processor", } |
455 | #elif defined(__arc__) |
456 | #define CPUINFO_PROC { "Processor", } |
457 | #elif defined(__xtensa__) |
458 | #define CPUINFO_PROC { "core ID", } |
459 | #elif defined(__loongarch__) |
460 | #define CPUINFO_PROC { "Model Name", } |
461 | #else |
462 | #define CPUINFO_PROC { "model name", } |
463 | #endif |
464 | const char *cpuinfo_procs[] = CPUINFO_PROC; |
465 | #undef CPUINFO_PROC |
466 | unsigned int i; |
467 | |
468 | for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { |
469 | int ret; |
470 | ret = __write_cpudesc(ff, cpuinfo_proc: cpuinfo_procs[i]); |
471 | if (ret >= 0) |
472 | return ret; |
473 | } |
474 | return -1; |
475 | } |
476 | |
477 | |
478 | static int write_nrcpus(struct feat_fd *ff, |
479 | struct evlist *evlist __maybe_unused) |
480 | { |
481 | long nr; |
482 | u32 nrc, nra; |
483 | int ret; |
484 | |
485 | nrc = cpu__max_present_cpu().cpu; |
486 | |
487 | nr = sysconf(_SC_NPROCESSORS_ONLN); |
488 | if (nr < 0) |
489 | return -1; |
490 | |
491 | nra = (u32)(nr & UINT_MAX); |
492 | |
493 | ret = do_write(ff, buf: &nrc, size: sizeof(nrc)); |
494 | if (ret < 0) |
495 | return ret; |
496 | |
497 | return do_write(ff, buf: &nra, size: sizeof(nra)); |
498 | } |
499 | |
500 | static int write_event_desc(struct feat_fd *ff, |
501 | struct evlist *evlist) |
502 | { |
503 | struct evsel *evsel; |
504 | u32 nre, nri, sz; |
505 | int ret; |
506 | |
507 | nre = evlist->core.nr_entries; |
508 | |
509 | /* |
510 | * write number of events |
511 | */ |
512 | ret = do_write(ff, buf: &nre, size: sizeof(nre)); |
513 | if (ret < 0) |
514 | return ret; |
515 | |
516 | /* |
517 | * size of perf_event_attr struct |
518 | */ |
519 | sz = (u32)sizeof(evsel->core.attr); |
520 | ret = do_write(ff, buf: &sz, size: sizeof(sz)); |
521 | if (ret < 0) |
522 | return ret; |
523 | |
524 | evlist__for_each_entry(evlist, evsel) { |
525 | ret = do_write(ff, buf: &evsel->core.attr, size: sz); |
526 | if (ret < 0) |
527 | return ret; |
528 | /* |
529 | * write number of unique id per event |
530 | * there is one id per instance of an event |
531 | * |
532 | * copy into an nri to be independent of the |
533 | * type of ids, |
534 | */ |
535 | nri = evsel->core.ids; |
536 | ret = do_write(ff, buf: &nri, size: sizeof(nri)); |
537 | if (ret < 0) |
538 | return ret; |
539 | |
540 | /* |
541 | * write event string as passed on cmdline |
542 | */ |
543 | ret = do_write_string(ff, str: evsel__name(evsel)); |
544 | if (ret < 0) |
545 | return ret; |
546 | /* |
547 | * write unique ids for this event |
548 | */ |
549 | ret = do_write(ff, buf: evsel->core.id, size: evsel->core.ids * sizeof(u64)); |
550 | if (ret < 0) |
551 | return ret; |
552 | } |
553 | return 0; |
554 | } |
555 | |
556 | static int write_cmdline(struct feat_fd *ff, |
557 | struct evlist *evlist __maybe_unused) |
558 | { |
559 | char pbuf[MAXPATHLEN], *buf; |
560 | int i, ret, n; |
561 | |
562 | /* actual path to perf binary */ |
563 | buf = perf_exe(pbuf, MAXPATHLEN); |
564 | |
565 | /* account for binary path */ |
566 | n = perf_env.nr_cmdline + 1; |
567 | |
568 | ret = do_write(ff, buf: &n, size: sizeof(n)); |
569 | if (ret < 0) |
570 | return ret; |
571 | |
572 | ret = do_write_string(ff, buf); |
573 | if (ret < 0) |
574 | return ret; |
575 | |
576 | for (i = 0 ; i < perf_env.nr_cmdline; i++) { |
577 | ret = do_write_string(ff, str: perf_env.cmdline_argv[i]); |
578 | if (ret < 0) |
579 | return ret; |
580 | } |
581 | return 0; |
582 | } |
583 | |
584 | |
585 | static int write_cpu_topology(struct feat_fd *ff, |
586 | struct evlist *evlist __maybe_unused) |
587 | { |
588 | struct cpu_topology *tp; |
589 | u32 i; |
590 | int ret, j; |
591 | |
592 | tp = cpu_topology__new(); |
593 | if (!tp) |
594 | return -1; |
595 | |
596 | ret = do_write(ff, buf: &tp->package_cpus_lists, size: sizeof(tp->package_cpus_lists)); |
597 | if (ret < 0) |
598 | goto done; |
599 | |
600 | for (i = 0; i < tp->package_cpus_lists; i++) { |
601 | ret = do_write_string(ff, str: tp->package_cpus_list[i]); |
602 | if (ret < 0) |
603 | goto done; |
604 | } |
605 | ret = do_write(ff, buf: &tp->core_cpus_lists, size: sizeof(tp->core_cpus_lists)); |
606 | if (ret < 0) |
607 | goto done; |
608 | |
609 | for (i = 0; i < tp->core_cpus_lists; i++) { |
610 | ret = do_write_string(ff, str: tp->core_cpus_list[i]); |
611 | if (ret < 0) |
612 | break; |
613 | } |
614 | |
615 | ret = perf_env__read_cpu_topology_map(env: &perf_env); |
616 | if (ret < 0) |
617 | goto done; |
618 | |
619 | for (j = 0; j < perf_env.nr_cpus_avail; j++) { |
620 | ret = do_write(ff, buf: &perf_env.cpu[j].core_id, |
621 | size: sizeof(perf_env.cpu[j].core_id)); |
622 | if (ret < 0) |
623 | return ret; |
624 | ret = do_write(ff, buf: &perf_env.cpu[j].socket_id, |
625 | size: sizeof(perf_env.cpu[j].socket_id)); |
626 | if (ret < 0) |
627 | return ret; |
628 | } |
629 | |
630 | if (!tp->die_cpus_lists) |
631 | goto done; |
632 | |
633 | ret = do_write(ff, buf: &tp->die_cpus_lists, size: sizeof(tp->die_cpus_lists)); |
634 | if (ret < 0) |
635 | goto done; |
636 | |
637 | for (i = 0; i < tp->die_cpus_lists; i++) { |
638 | ret = do_write_string(ff, str: tp->die_cpus_list[i]); |
639 | if (ret < 0) |
640 | goto done; |
641 | } |
642 | |
643 | for (j = 0; j < perf_env.nr_cpus_avail; j++) { |
644 | ret = do_write(ff, buf: &perf_env.cpu[j].die_id, |
645 | size: sizeof(perf_env.cpu[j].die_id)); |
646 | if (ret < 0) |
647 | return ret; |
648 | } |
649 | |
650 | done: |
651 | cpu_topology__delete(tp); |
652 | return ret; |
653 | } |
654 | |
655 | |
656 | |
657 | static int write_total_mem(struct feat_fd *ff, |
658 | struct evlist *evlist __maybe_unused) |
659 | { |
660 | char *buf = NULL; |
661 | FILE *fp; |
662 | size_t len = 0; |
663 | int ret = -1, n; |
664 | uint64_t mem; |
665 | |
666 | fp = fopen("/proc/meminfo" , "r" ); |
667 | if (!fp) |
668 | return -1; |
669 | |
670 | while (getline(&buf, &len, fp) > 0) { |
671 | ret = strncmp(buf, "MemTotal:" , 9); |
672 | if (!ret) |
673 | break; |
674 | } |
675 | if (!ret) { |
676 | n = sscanf(buf, "%*s %" PRIu64, &mem); |
677 | if (n == 1) |
678 | ret = do_write(ff, buf: &mem, size: sizeof(mem)); |
679 | } else |
680 | ret = -1; |
681 | free(buf); |
682 | fclose(fp); |
683 | return ret; |
684 | } |
685 | |
686 | static int write_numa_topology(struct feat_fd *ff, |
687 | struct evlist *evlist __maybe_unused) |
688 | { |
689 | struct numa_topology *tp; |
690 | int ret = -1; |
691 | u32 i; |
692 | |
693 | tp = numa_topology__new(); |
694 | if (!tp) |
695 | return -ENOMEM; |
696 | |
697 | ret = do_write(ff, buf: &tp->nr, size: sizeof(u32)); |
698 | if (ret < 0) |
699 | goto err; |
700 | |
701 | for (i = 0; i < tp->nr; i++) { |
702 | struct numa_topology_node *n = &tp->nodes[i]; |
703 | |
704 | ret = do_write(ff, buf: &n->node, size: sizeof(u32)); |
705 | if (ret < 0) |
706 | goto err; |
707 | |
708 | ret = do_write(ff, buf: &n->mem_total, size: sizeof(u64)); |
709 | if (ret) |
710 | goto err; |
711 | |
712 | ret = do_write(ff, buf: &n->mem_free, size: sizeof(u64)); |
713 | if (ret) |
714 | goto err; |
715 | |
716 | ret = do_write_string(ff, str: n->cpus); |
717 | if (ret < 0) |
718 | goto err; |
719 | } |
720 | |
721 | ret = 0; |
722 | |
723 | err: |
724 | numa_topology__delete(tp); |
725 | return ret; |
726 | } |
727 | |
728 | /* |
729 | * File format: |
730 | * |
731 | * struct pmu_mappings { |
732 | * u32 pmu_num; |
733 | * struct pmu_map { |
734 | * u32 type; |
735 | * char name[]; |
736 | * }[pmu_num]; |
737 | * }; |
738 | */ |
739 | |
740 | static int write_pmu_mappings(struct feat_fd *ff, |
741 | struct evlist *evlist __maybe_unused) |
742 | { |
743 | struct perf_pmu *pmu = NULL; |
744 | u32 pmu_num = 0; |
745 | int ret; |
746 | |
747 | /* |
748 | * Do a first pass to count number of pmu to avoid lseek so this |
749 | * works in pipe mode as well. |
750 | */ |
751 | while ((pmu = perf_pmus__scan(pmu))) |
752 | pmu_num++; |
753 | |
754 | ret = do_write(ff, buf: &pmu_num, size: sizeof(pmu_num)); |
755 | if (ret < 0) |
756 | return ret; |
757 | |
758 | while ((pmu = perf_pmus__scan(pmu))) { |
759 | ret = do_write(ff, buf: &pmu->type, size: sizeof(pmu->type)); |
760 | if (ret < 0) |
761 | return ret; |
762 | |
763 | ret = do_write_string(ff, str: pmu->name); |
764 | if (ret < 0) |
765 | return ret; |
766 | } |
767 | |
768 | return 0; |
769 | } |
770 | |
771 | /* |
772 | * File format: |
773 | * |
774 | * struct group_descs { |
775 | * u32 nr_groups; |
776 | * struct group_desc { |
777 | * char name[]; |
778 | * u32 leader_idx; |
779 | * u32 nr_members; |
780 | * }[nr_groups]; |
781 | * }; |
782 | */ |
783 | static int write_group_desc(struct feat_fd *ff, |
784 | struct evlist *evlist) |
785 | { |
786 | u32 nr_groups = evlist__nr_groups(evlist); |
787 | struct evsel *evsel; |
788 | int ret; |
789 | |
790 | ret = do_write(ff, buf: &nr_groups, size: sizeof(nr_groups)); |
791 | if (ret < 0) |
792 | return ret; |
793 | |
794 | evlist__for_each_entry(evlist, evsel) { |
795 | if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { |
796 | const char *name = evsel->group_name ?: "{anon_group}" ; |
797 | u32 leader_idx = evsel->core.idx; |
798 | u32 nr_members = evsel->core.nr_members; |
799 | |
800 | ret = do_write_string(ff, str: name); |
801 | if (ret < 0) |
802 | return ret; |
803 | |
804 | ret = do_write(ff, buf: &leader_idx, size: sizeof(leader_idx)); |
805 | if (ret < 0) |
806 | return ret; |
807 | |
808 | ret = do_write(ff, buf: &nr_members, size: sizeof(nr_members)); |
809 | if (ret < 0) |
810 | return ret; |
811 | } |
812 | } |
813 | return 0; |
814 | } |
815 | |
816 | /* |
817 | * Return the CPU id as a raw string. |
818 | * |
819 | * Each architecture should provide a more precise id string that |
820 | * can be use to match the architecture's "mapfile". |
821 | */ |
822 | char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) |
823 | { |
824 | return NULL; |
825 | } |
826 | |
827 | /* Return zero when the cpuid from the mapfile.csv matches the |
828 | * cpuid string generated on this platform. |
829 | * Otherwise return non-zero. |
830 | */ |
831 | int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) |
832 | { |
833 | regex_t re; |
834 | regmatch_t pmatch[1]; |
835 | int match; |
836 | |
837 | if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { |
838 | /* Warn unable to generate match particular string. */ |
839 | pr_info("Invalid regular expression %s\n" , mapcpuid); |
840 | return 1; |
841 | } |
842 | |
843 | match = !regexec(&re, cpuid, 1, pmatch, 0); |
844 | regfree(&re); |
845 | if (match) { |
846 | size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); |
847 | |
848 | /* Verify the entire string matched. */ |
849 | if (match_len == strlen(cpuid)) |
850 | return 0; |
851 | } |
852 | return 1; |
853 | } |
854 | |
855 | /* |
856 | * default get_cpuid(): nothing gets recorded |
857 | * actual implementation must be in arch/$(SRCARCH)/util/header.c |
858 | */ |
859 | int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) |
860 | { |
861 | return ENOSYS; /* Not implemented */ |
862 | } |
863 | |
864 | static int write_cpuid(struct feat_fd *ff, |
865 | struct evlist *evlist __maybe_unused) |
866 | { |
867 | char buffer[64]; |
868 | int ret; |
869 | |
870 | ret = get_cpuid(buffer, sz: sizeof(buffer)); |
871 | if (ret) |
872 | return -1; |
873 | |
874 | return do_write_string(ff, str: buffer); |
875 | } |
876 | |
877 | static int write_branch_stack(struct feat_fd *ff __maybe_unused, |
878 | struct evlist *evlist __maybe_unused) |
879 | { |
880 | return 0; |
881 | } |
882 | |
883 | static int write_auxtrace(struct feat_fd *ff, |
884 | struct evlist *evlist __maybe_unused) |
885 | { |
886 | struct perf_session *session; |
887 | int err; |
888 | |
889 | if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n" , __func__)) |
890 | return -1; |
891 | |
892 | session = container_of(ff->ph, struct perf_session, header); |
893 | |
894 | err = auxtrace_index__write(fd: ff->fd, head: &session->auxtrace_index); |
895 | if (err < 0) |
896 | pr_err("Failed to write auxtrace index\n" ); |
897 | return err; |
898 | } |
899 | |
900 | static int write_clockid(struct feat_fd *ff, |
901 | struct evlist *evlist __maybe_unused) |
902 | { |
903 | return do_write(ff, buf: &ff->ph->env.clock.clockid_res_ns, |
904 | size: sizeof(ff->ph->env.clock.clockid_res_ns)); |
905 | } |
906 | |
907 | static int write_clock_data(struct feat_fd *ff, |
908 | struct evlist *evlist __maybe_unused) |
909 | { |
910 | u64 *data64; |
911 | u32 data32; |
912 | int ret; |
913 | |
914 | /* version */ |
915 | data32 = 1; |
916 | |
917 | ret = do_write(ff, buf: &data32, size: sizeof(data32)); |
918 | if (ret < 0) |
919 | return ret; |
920 | |
921 | /* clockid */ |
922 | data32 = ff->ph->env.clock.clockid; |
923 | |
924 | ret = do_write(ff, buf: &data32, size: sizeof(data32)); |
925 | if (ret < 0) |
926 | return ret; |
927 | |
928 | /* TOD ref time */ |
929 | data64 = &ff->ph->env.clock.tod_ns; |
930 | |
931 | ret = do_write(ff, buf: data64, size: sizeof(*data64)); |
932 | if (ret < 0) |
933 | return ret; |
934 | |
935 | /* clockid ref time */ |
936 | data64 = &ff->ph->env.clock.clockid_ns; |
937 | |
938 | return do_write(ff, buf: data64, size: sizeof(*data64)); |
939 | } |
940 | |
941 | static int write_hybrid_topology(struct feat_fd *ff, |
942 | struct evlist *evlist __maybe_unused) |
943 | { |
944 | struct hybrid_topology *tp; |
945 | int ret; |
946 | u32 i; |
947 | |
948 | tp = hybrid_topology__new(); |
949 | if (!tp) |
950 | return -ENOENT; |
951 | |
952 | ret = do_write(ff, buf: &tp->nr, size: sizeof(u32)); |
953 | if (ret < 0) |
954 | goto err; |
955 | |
956 | for (i = 0; i < tp->nr; i++) { |
957 | struct hybrid_topology_node *n = &tp->nodes[i]; |
958 | |
959 | ret = do_write_string(ff, str: n->pmu_name); |
960 | if (ret < 0) |
961 | goto err; |
962 | |
963 | ret = do_write_string(ff, str: n->cpus); |
964 | if (ret < 0) |
965 | goto err; |
966 | } |
967 | |
968 | ret = 0; |
969 | |
970 | err: |
971 | hybrid_topology__delete(tp); |
972 | return ret; |
973 | } |
974 | |
975 | static int write_dir_format(struct feat_fd *ff, |
976 | struct evlist *evlist __maybe_unused) |
977 | { |
978 | struct perf_session *session; |
979 | struct perf_data *data; |
980 | |
981 | session = container_of(ff->ph, struct perf_session, header); |
982 | data = session->data; |
983 | |
984 | if (WARN_ON(!perf_data__is_dir(data))) |
985 | return -1; |
986 | |
987 | return do_write(ff, buf: &data->dir.version, size: sizeof(data->dir.version)); |
988 | } |
989 | |
990 | /* |
991 | * Check whether a CPU is online |
992 | * |
993 | * Returns: |
994 | * 1 -> if CPU is online |
995 | * 0 -> if CPU is offline |
996 | * -1 -> error case |
997 | */ |
998 | int is_cpu_online(unsigned int cpu) |
999 | { |
1000 | char *str; |
1001 | size_t strlen; |
1002 | char buf[256]; |
1003 | int status = -1; |
1004 | struct stat statbuf; |
1005 | |
1006 | snprintf(buf, size: sizeof(buf), |
1007 | fmt: "/sys/devices/system/cpu/cpu%d" , cpu); |
1008 | if (stat(buf, &statbuf) != 0) |
1009 | return 0; |
1010 | |
1011 | /* |
1012 | * Check if /sys/devices/system/cpu/cpux/online file |
1013 | * exists. Some cases cpu0 won't have online file since |
1014 | * it is not expected to be turned off generally. |
1015 | * In kernels without CONFIG_HOTPLUG_CPU, this |
1016 | * file won't exist |
1017 | */ |
1018 | snprintf(buf, size: sizeof(buf), |
1019 | fmt: "/sys/devices/system/cpu/cpu%d/online" , cpu); |
1020 | if (stat(buf, &statbuf) != 0) |
1021 | return 1; |
1022 | |
1023 | /* |
1024 | * Read online file using sysfs__read_str. |
1025 | * If read or open fails, return -1. |
1026 | * If read succeeds, return value from file |
1027 | * which gets stored in "str" |
1028 | */ |
1029 | snprintf(buf, size: sizeof(buf), |
1030 | fmt: "devices/system/cpu/cpu%d/online" , cpu); |
1031 | |
1032 | if (sysfs__read_str(buf, &str, &strlen) < 0) |
1033 | return status; |
1034 | |
1035 | status = atoi(str); |
1036 | |
1037 | free(str); |
1038 | return status; |
1039 | } |
1040 | |
1041 | #ifdef HAVE_LIBBPF_SUPPORT |
1042 | static int write_bpf_prog_info(struct feat_fd *ff, |
1043 | struct evlist *evlist __maybe_unused) |
1044 | { |
1045 | struct perf_env *env = &ff->ph->env; |
1046 | struct rb_root *root; |
1047 | struct rb_node *next; |
1048 | int ret; |
1049 | |
1050 | down_read(&env->bpf_progs.lock); |
1051 | |
1052 | ret = do_write(ff, &env->bpf_progs.infos_cnt, |
1053 | sizeof(env->bpf_progs.infos_cnt)); |
1054 | if (ret < 0) |
1055 | goto out; |
1056 | |
1057 | root = &env->bpf_progs.infos; |
1058 | next = rb_first(root); |
1059 | while (next) { |
1060 | struct bpf_prog_info_node *node; |
1061 | size_t len; |
1062 | |
1063 | node = rb_entry(next, struct bpf_prog_info_node, rb_node); |
1064 | next = rb_next(&node->rb_node); |
1065 | len = sizeof(struct perf_bpil) + |
1066 | node->info_linear->data_len; |
1067 | |
1068 | /* before writing to file, translate address to offset */ |
1069 | bpil_addr_to_offs(node->info_linear); |
1070 | ret = do_write(ff, node->info_linear, len); |
1071 | /* |
1072 | * translate back to address even when do_write() fails, |
1073 | * so that this function never changes the data. |
1074 | */ |
1075 | bpil_offs_to_addr(node->info_linear); |
1076 | if (ret < 0) |
1077 | goto out; |
1078 | } |
1079 | out: |
1080 | up_read(&env->bpf_progs.lock); |
1081 | return ret; |
1082 | } |
1083 | |
1084 | static int write_bpf_btf(struct feat_fd *ff, |
1085 | struct evlist *evlist __maybe_unused) |
1086 | { |
1087 | struct perf_env *env = &ff->ph->env; |
1088 | struct rb_root *root; |
1089 | struct rb_node *next; |
1090 | int ret; |
1091 | |
1092 | down_read(&env->bpf_progs.lock); |
1093 | |
1094 | ret = do_write(ff, &env->bpf_progs.btfs_cnt, |
1095 | sizeof(env->bpf_progs.btfs_cnt)); |
1096 | |
1097 | if (ret < 0) |
1098 | goto out; |
1099 | |
1100 | root = &env->bpf_progs.btfs; |
1101 | next = rb_first(root); |
1102 | while (next) { |
1103 | struct btf_node *node; |
1104 | |
1105 | node = rb_entry(next, struct btf_node, rb_node); |
1106 | next = rb_next(&node->rb_node); |
1107 | ret = do_write(ff, &node->id, |
1108 | sizeof(u32) * 2 + node->data_size); |
1109 | if (ret < 0) |
1110 | goto out; |
1111 | } |
1112 | out: |
1113 | up_read(&env->bpf_progs.lock); |
1114 | return ret; |
1115 | } |
1116 | #endif // HAVE_LIBBPF_SUPPORT |
1117 | |
1118 | static int cpu_cache_level__sort(const void *a, const void *b) |
1119 | { |
1120 | struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; |
1121 | struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; |
1122 | |
1123 | return cache_a->level - cache_b->level; |
1124 | } |
1125 | |
1126 | static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) |
1127 | { |
1128 | if (a->level != b->level) |
1129 | return false; |
1130 | |
1131 | if (a->line_size != b->line_size) |
1132 | return false; |
1133 | |
1134 | if (a->sets != b->sets) |
1135 | return false; |
1136 | |
1137 | if (a->ways != b->ways) |
1138 | return false; |
1139 | |
1140 | if (strcmp(a->type, b->type)) |
1141 | return false; |
1142 | |
1143 | if (strcmp(a->size, b->size)) |
1144 | return false; |
1145 | |
1146 | if (strcmp(a->map, b->map)) |
1147 | return false; |
1148 | |
1149 | return true; |
1150 | } |
1151 | |
1152 | static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) |
1153 | { |
1154 | char path[PATH_MAX], file[PATH_MAX]; |
1155 | struct stat st; |
1156 | size_t len; |
1157 | |
1158 | scnprintf(buf: path, PATH_MAX, fmt: "devices/system/cpu/cpu%d/cache/index%d/" , cpu, level); |
1159 | scnprintf(buf: file, PATH_MAX, fmt: "%s/%s" , sysfs__mountpoint(), path); |
1160 | |
1161 | if (stat(file, &st)) |
1162 | return 1; |
1163 | |
1164 | scnprintf(buf: file, PATH_MAX, fmt: "%s/level" , path); |
1165 | if (sysfs__read_int(file, (int *) &cache->level)) |
1166 | return -1; |
1167 | |
1168 | scnprintf(buf: file, PATH_MAX, fmt: "%s/coherency_line_size" , path); |
1169 | if (sysfs__read_int(file, (int *) &cache->line_size)) |
1170 | return -1; |
1171 | |
1172 | scnprintf(buf: file, PATH_MAX, fmt: "%s/number_of_sets" , path); |
1173 | if (sysfs__read_int(file, (int *) &cache->sets)) |
1174 | return -1; |
1175 | |
1176 | scnprintf(buf: file, PATH_MAX, fmt: "%s/ways_of_associativity" , path); |
1177 | if (sysfs__read_int(file, (int *) &cache->ways)) |
1178 | return -1; |
1179 | |
1180 | scnprintf(buf: file, PATH_MAX, fmt: "%s/type" , path); |
1181 | if (sysfs__read_str(file, &cache->type, &len)) |
1182 | return -1; |
1183 | |
1184 | cache->type[len] = 0; |
1185 | cache->type = strim(cache->type); |
1186 | |
1187 | scnprintf(buf: file, PATH_MAX, fmt: "%s/size" , path); |
1188 | if (sysfs__read_str(file, &cache->size, &len)) { |
1189 | zfree(&cache->type); |
1190 | return -1; |
1191 | } |
1192 | |
1193 | cache->size[len] = 0; |
1194 | cache->size = strim(cache->size); |
1195 | |
1196 | scnprintf(buf: file, PATH_MAX, fmt: "%s/shared_cpu_list" , path); |
1197 | if (sysfs__read_str(file, &cache->map, &len)) { |
1198 | zfree(&cache->size); |
1199 | zfree(&cache->type); |
1200 | return -1; |
1201 | } |
1202 | |
1203 | cache->map[len] = 0; |
1204 | cache->map = strim(cache->map); |
1205 | return 0; |
1206 | } |
1207 | |
1208 | static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) |
1209 | { |
1210 | fprintf(out, "L%d %-15s %8s [%s]\n" , c->level, c->type, c->size, c->map); |
1211 | } |
1212 | |
1213 | /* |
1214 | * Build caches levels for a particular CPU from the data in |
1215 | * /sys/devices/system/cpu/cpu<cpu>/cache/ |
1216 | * The cache level data is stored in caches[] from index at |
1217 | * *cntp. |
1218 | */ |
1219 | int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp) |
1220 | { |
1221 | u16 level; |
1222 | |
1223 | for (level = 0; level < MAX_CACHE_LVL; level++) { |
1224 | struct cpu_cache_level c; |
1225 | int err; |
1226 | u32 i; |
1227 | |
1228 | err = cpu_cache_level__read(cache: &c, cpu, level); |
1229 | if (err < 0) |
1230 | return err; |
1231 | |
1232 | if (err == 1) |
1233 | break; |
1234 | |
1235 | for (i = 0; i < *cntp; i++) { |
1236 | if (cpu_cache_level__cmp(a: &c, b: &caches[i])) |
1237 | break; |
1238 | } |
1239 | |
1240 | if (i == *cntp) { |
1241 | caches[*cntp] = c; |
1242 | *cntp = *cntp + 1; |
1243 | } else |
1244 | cpu_cache_level__free(cache: &c); |
1245 | } |
1246 | |
1247 | return 0; |
1248 | } |
1249 | |
1250 | static int build_caches(struct cpu_cache_level caches[], u32 *cntp) |
1251 | { |
1252 | u32 nr, cpu, cnt = 0; |
1253 | |
1254 | nr = cpu__max_cpu().cpu; |
1255 | |
1256 | for (cpu = 0; cpu < nr; cpu++) { |
1257 | int ret = build_caches_for_cpu(cpu, caches, cntp: &cnt); |
1258 | |
1259 | if (ret) |
1260 | return ret; |
1261 | } |
1262 | *cntp = cnt; |
1263 | return 0; |
1264 | } |
1265 | |
1266 | static int write_cache(struct feat_fd *ff, |
1267 | struct evlist *evlist __maybe_unused) |
1268 | { |
1269 | u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL; |
1270 | struct cpu_cache_level caches[max_caches]; |
1271 | u32 cnt = 0, i, version = 1; |
1272 | int ret; |
1273 | |
1274 | ret = build_caches(caches, cntp: &cnt); |
1275 | if (ret) |
1276 | goto out; |
1277 | |
1278 | qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); |
1279 | |
1280 | ret = do_write(ff, buf: &version, size: sizeof(u32)); |
1281 | if (ret < 0) |
1282 | goto out; |
1283 | |
1284 | ret = do_write(ff, buf: &cnt, size: sizeof(u32)); |
1285 | if (ret < 0) |
1286 | goto out; |
1287 | |
1288 | for (i = 0; i < cnt; i++) { |
1289 | struct cpu_cache_level *c = &caches[i]; |
1290 | |
1291 | #define _W(v) \ |
1292 | ret = do_write(ff, &c->v, sizeof(u32)); \ |
1293 | if (ret < 0) \ |
1294 | goto out; |
1295 | |
1296 | _W(level) |
1297 | _W(line_size) |
1298 | _W(sets) |
1299 | _W(ways) |
1300 | #undef _W |
1301 | |
1302 | #define _W(v) \ |
1303 | ret = do_write_string(ff, (const char *) c->v); \ |
1304 | if (ret < 0) \ |
1305 | goto out; |
1306 | |
1307 | _W(type) |
1308 | _W(size) |
1309 | _W(map) |
1310 | #undef _W |
1311 | } |
1312 | |
1313 | out: |
1314 | for (i = 0; i < cnt; i++) |
1315 | cpu_cache_level__free(cache: &caches[i]); |
1316 | return ret; |
1317 | } |
1318 | |
1319 | static int write_stat(struct feat_fd *ff __maybe_unused, |
1320 | struct evlist *evlist __maybe_unused) |
1321 | { |
1322 | return 0; |
1323 | } |
1324 | |
1325 | static int write_sample_time(struct feat_fd *ff, |
1326 | struct evlist *evlist) |
1327 | { |
1328 | int ret; |
1329 | |
1330 | ret = do_write(ff, buf: &evlist->first_sample_time, |
1331 | size: sizeof(evlist->first_sample_time)); |
1332 | if (ret < 0) |
1333 | return ret; |
1334 | |
1335 | return do_write(ff, buf: &evlist->last_sample_time, |
1336 | size: sizeof(evlist->last_sample_time)); |
1337 | } |
1338 | |
1339 | |
1340 | static int memory_node__read(struct memory_node *n, unsigned long idx) |
1341 | { |
1342 | unsigned int phys, size = 0; |
1343 | char path[PATH_MAX]; |
1344 | struct dirent *ent; |
1345 | DIR *dir; |
1346 | |
1347 | #define for_each_memory(mem, dir) \ |
1348 | while ((ent = readdir(dir))) \ |
1349 | if (strcmp(ent->d_name, ".") && \ |
1350 | strcmp(ent->d_name, "..") && \ |
1351 | sscanf(ent->d_name, "memory%u", &mem) == 1) |
1352 | |
1353 | scnprintf(buf: path, PATH_MAX, |
1354 | fmt: "%s/devices/system/node/node%lu" , |
1355 | sysfs__mountpoint(), idx); |
1356 | |
1357 | dir = opendir(path); |
1358 | if (!dir) { |
1359 | pr_warning("failed: can't open memory sysfs data\n" ); |
1360 | return -1; |
1361 | } |
1362 | |
1363 | for_each_memory(phys, dir) { |
1364 | size = max(phys, size); |
1365 | } |
1366 | |
1367 | size++; |
1368 | |
1369 | n->set = bitmap_zalloc(size); |
1370 | if (!n->set) { |
1371 | closedir(dir); |
1372 | return -ENOMEM; |
1373 | } |
1374 | |
1375 | n->node = idx; |
1376 | n->size = size; |
1377 | |
1378 | rewinddir(dir); |
1379 | |
1380 | for_each_memory(phys, dir) { |
1381 | __set_bit(phys, n->set); |
1382 | } |
1383 | |
1384 | closedir(dir); |
1385 | return 0; |
1386 | } |
1387 | |
1388 | static void memory_node__delete_nodes(struct memory_node *nodesp, u64 cnt) |
1389 | { |
1390 | for (u64 i = 0; i < cnt; i++) |
1391 | bitmap_free(bitmap: nodesp[i].set); |
1392 | |
1393 | free(nodesp); |
1394 | } |
1395 | |
1396 | static int memory_node__sort(const void *a, const void *b) |
1397 | { |
1398 | const struct memory_node *na = a; |
1399 | const struct memory_node *nb = b; |
1400 | |
1401 | return na->node - nb->node; |
1402 | } |
1403 | |
1404 | static int build_mem_topology(struct memory_node **nodesp, u64 *cntp) |
1405 | { |
1406 | char path[PATH_MAX]; |
1407 | struct dirent *ent; |
1408 | DIR *dir; |
1409 | int ret = 0; |
1410 | size_t cnt = 0, size = 0; |
1411 | struct memory_node *nodes = NULL; |
1412 | |
1413 | scnprintf(buf: path, PATH_MAX, fmt: "%s/devices/system/node/" , |
1414 | sysfs__mountpoint()); |
1415 | |
1416 | dir = opendir(path); |
1417 | if (!dir) { |
1418 | pr_debug2("%s: couldn't read %s, does this arch have topology information?\n" , |
1419 | __func__, path); |
1420 | return -1; |
1421 | } |
1422 | |
1423 | while (!ret && (ent = readdir(dir))) { |
1424 | unsigned int idx; |
1425 | int r; |
1426 | |
1427 | if (!strcmp(ent->d_name, "." ) || |
1428 | !strcmp(ent->d_name, ".." )) |
1429 | continue; |
1430 | |
1431 | r = sscanf(ent->d_name, "node%u" , &idx); |
1432 | if (r != 1) |
1433 | continue; |
1434 | |
1435 | if (cnt >= size) { |
1436 | struct memory_node *new_nodes = |
1437 | reallocarray(nodes, cnt + 4, sizeof(*nodes)); |
1438 | |
1439 | if (!new_nodes) { |
1440 | pr_err("Failed to write MEM_TOPOLOGY, size %zd nodes\n" , size); |
1441 | ret = -ENOMEM; |
1442 | goto out; |
1443 | } |
1444 | nodes = new_nodes; |
1445 | size += 4; |
1446 | } |
1447 | ret = memory_node__read(n: &nodes[cnt], idx); |
1448 | if (!ret) |
1449 | cnt += 1; |
1450 | } |
1451 | out: |
1452 | closedir(dir); |
1453 | if (!ret) { |
1454 | *cntp = cnt; |
1455 | *nodesp = nodes; |
1456 | qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); |
1457 | } else |
1458 | memory_node__delete_nodes(nodesp: nodes, cnt); |
1459 | |
1460 | return ret; |
1461 | } |
1462 | |
1463 | /* |
1464 | * The MEM_TOPOLOGY holds physical memory map for every |
1465 | * node in system. The format of data is as follows: |
1466 | * |
1467 | * 0 - version | for future changes |
1468 | * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes |
1469 | * 16 - count | number of nodes |
1470 | * |
1471 | * For each node we store map of physical indexes for |
1472 | * each node: |
1473 | * |
1474 | * 32 - node id | node index |
1475 | * 40 - size | size of bitmap |
1476 | * 48 - bitmap | bitmap of memory indexes that belongs to node |
1477 | */ |
1478 | static int write_mem_topology(struct feat_fd *ff __maybe_unused, |
1479 | struct evlist *evlist __maybe_unused) |
1480 | { |
1481 | struct memory_node *nodes = NULL; |
1482 | u64 bsize, version = 1, i, nr = 0; |
1483 | int ret; |
1484 | |
1485 | ret = sysfs__read_xll("devices/system/memory/block_size_bytes" , |
1486 | (unsigned long long *) &bsize); |
1487 | if (ret) |
1488 | return ret; |
1489 | |
1490 | ret = build_mem_topology(nodesp: &nodes, cntp: &nr); |
1491 | if (ret) |
1492 | return ret; |
1493 | |
1494 | ret = do_write(ff, buf: &version, size: sizeof(version)); |
1495 | if (ret < 0) |
1496 | goto out; |
1497 | |
1498 | ret = do_write(ff, buf: &bsize, size: sizeof(bsize)); |
1499 | if (ret < 0) |
1500 | goto out; |
1501 | |
1502 | ret = do_write(ff, buf: &nr, size: sizeof(nr)); |
1503 | if (ret < 0) |
1504 | goto out; |
1505 | |
1506 | for (i = 0; i < nr; i++) { |
1507 | struct memory_node *n = &nodes[i]; |
1508 | |
1509 | #define _W(v) \ |
1510 | ret = do_write(ff, &n->v, sizeof(n->v)); \ |
1511 | if (ret < 0) \ |
1512 | goto out; |
1513 | |
1514 | _W(node) |
1515 | _W(size) |
1516 | |
1517 | #undef _W |
1518 | |
1519 | ret = do_write_bitmap(ff, set: n->set, size: n->size); |
1520 | if (ret < 0) |
1521 | goto out; |
1522 | } |
1523 | |
1524 | out: |
1525 | memory_node__delete_nodes(nodesp: nodes, cnt: nr); |
1526 | return ret; |
1527 | } |
1528 | |
1529 | static int write_compressed(struct feat_fd *ff __maybe_unused, |
1530 | struct evlist *evlist __maybe_unused) |
1531 | { |
1532 | int ret; |
1533 | |
1534 | ret = do_write(ff, buf: &(ff->ph->env.comp_ver), size: sizeof(ff->ph->env.comp_ver)); |
1535 | if (ret) |
1536 | return ret; |
1537 | |
1538 | ret = do_write(ff, buf: &(ff->ph->env.comp_type), size: sizeof(ff->ph->env.comp_type)); |
1539 | if (ret) |
1540 | return ret; |
1541 | |
1542 | ret = do_write(ff, buf: &(ff->ph->env.comp_level), size: sizeof(ff->ph->env.comp_level)); |
1543 | if (ret) |
1544 | return ret; |
1545 | |
1546 | ret = do_write(ff, buf: &(ff->ph->env.comp_ratio), size: sizeof(ff->ph->env.comp_ratio)); |
1547 | if (ret) |
1548 | return ret; |
1549 | |
1550 | return do_write(ff, buf: &(ff->ph->env.comp_mmap_len), size: sizeof(ff->ph->env.comp_mmap_len)); |
1551 | } |
1552 | |
1553 | static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu, |
1554 | bool write_pmu) |
1555 | { |
1556 | struct perf_pmu_caps *caps = NULL; |
1557 | int ret; |
1558 | |
1559 | ret = do_write(ff, buf: &pmu->nr_caps, size: sizeof(pmu->nr_caps)); |
1560 | if (ret < 0) |
1561 | return ret; |
1562 | |
1563 | list_for_each_entry(caps, &pmu->caps, list) { |
1564 | ret = do_write_string(ff, str: caps->name); |
1565 | if (ret < 0) |
1566 | return ret; |
1567 | |
1568 | ret = do_write_string(ff, str: caps->value); |
1569 | if (ret < 0) |
1570 | return ret; |
1571 | } |
1572 | |
1573 | if (write_pmu) { |
1574 | ret = do_write_string(ff, str: pmu->name); |
1575 | if (ret < 0) |
1576 | return ret; |
1577 | } |
1578 | |
1579 | return ret; |
1580 | } |
1581 | |
1582 | static int write_cpu_pmu_caps(struct feat_fd *ff, |
1583 | struct evlist *evlist __maybe_unused) |
1584 | { |
1585 | struct perf_pmu *cpu_pmu = perf_pmus__find(name: "cpu" ); |
1586 | int ret; |
1587 | |
1588 | if (!cpu_pmu) |
1589 | return -ENOENT; |
1590 | |
1591 | ret = perf_pmu__caps_parse(pmu: cpu_pmu); |
1592 | if (ret < 0) |
1593 | return ret; |
1594 | |
1595 | return __write_pmu_caps(ff, pmu: cpu_pmu, write_pmu: false); |
1596 | } |
1597 | |
1598 | static int write_pmu_caps(struct feat_fd *ff, |
1599 | struct evlist *evlist __maybe_unused) |
1600 | { |
1601 | struct perf_pmu *pmu = NULL; |
1602 | int nr_pmu = 0; |
1603 | int ret; |
1604 | |
1605 | while ((pmu = perf_pmus__scan(pmu))) { |
1606 | if (!strcmp(pmu->name, "cpu" )) { |
1607 | /* |
1608 | * The "cpu" PMU is special and covered by |
1609 | * HEADER_CPU_PMU_CAPS. Note, core PMUs are |
1610 | * counted/written here for ARM, s390 and Intel hybrid. |
1611 | */ |
1612 | continue; |
1613 | } |
1614 | if (perf_pmu__caps_parse(pmu) <= 0) |
1615 | continue; |
1616 | nr_pmu++; |
1617 | } |
1618 | |
1619 | ret = do_write(ff, buf: &nr_pmu, size: sizeof(nr_pmu)); |
1620 | if (ret < 0) |
1621 | return ret; |
1622 | |
1623 | if (!nr_pmu) |
1624 | return 0; |
1625 | |
1626 | /* |
1627 | * Note older perf tools assume core PMUs come first, this is a property |
1628 | * of perf_pmus__scan. |
1629 | */ |
1630 | pmu = NULL; |
1631 | while ((pmu = perf_pmus__scan(pmu))) { |
1632 | if (!strcmp(pmu->name, "cpu" )) { |
1633 | /* Skip as above. */ |
1634 | continue; |
1635 | } |
1636 | if (perf_pmu__caps_parse(pmu) <= 0) |
1637 | continue; |
1638 | ret = __write_pmu_caps(ff, pmu, write_pmu: true); |
1639 | if (ret < 0) |
1640 | return ret; |
1641 | } |
1642 | return 0; |
1643 | } |
1644 | |
1645 | static void print_hostname(struct feat_fd *ff, FILE *fp) |
1646 | { |
1647 | fprintf(fp, "# hostname : %s\n" , ff->ph->env.hostname); |
1648 | } |
1649 | |
1650 | static void print_osrelease(struct feat_fd *ff, FILE *fp) |
1651 | { |
1652 | fprintf(fp, "# os release : %s\n" , ff->ph->env.os_release); |
1653 | } |
1654 | |
1655 | static void print_arch(struct feat_fd *ff, FILE *fp) |
1656 | { |
1657 | fprintf(fp, "# arch : %s\n" , ff->ph->env.arch); |
1658 | } |
1659 | |
1660 | static void print_cpudesc(struct feat_fd *ff, FILE *fp) |
1661 | { |
1662 | fprintf(fp, "# cpudesc : %s\n" , ff->ph->env.cpu_desc); |
1663 | } |
1664 | |
1665 | static void print_nrcpus(struct feat_fd *ff, FILE *fp) |
1666 | { |
1667 | fprintf(fp, "# nrcpus online : %u\n" , ff->ph->env.nr_cpus_online); |
1668 | fprintf(fp, "# nrcpus avail : %u\n" , ff->ph->env.nr_cpus_avail); |
1669 | } |
1670 | |
1671 | static void print_version(struct feat_fd *ff, FILE *fp) |
1672 | { |
1673 | fprintf(fp, "# perf version : %s\n" , ff->ph->env.version); |
1674 | } |
1675 | |
1676 | static void print_cmdline(struct feat_fd *ff, FILE *fp) |
1677 | { |
1678 | int nr, i; |
1679 | |
1680 | nr = ff->ph->env.nr_cmdline; |
1681 | |
1682 | fprintf(fp, "# cmdline : " ); |
1683 | |
1684 | for (i = 0; i < nr; i++) { |
1685 | char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); |
1686 | if (!argv_i) { |
1687 | fprintf(fp, "%s " , ff->ph->env.cmdline_argv[i]); |
1688 | } else { |
1689 | char *mem = argv_i; |
1690 | do { |
1691 | char *quote = strchr(argv_i, '\''); |
1692 | if (!quote) |
1693 | break; |
1694 | *quote++ = '\0'; |
1695 | fprintf(fp, "%s\\\'" , argv_i); |
1696 | argv_i = quote; |
1697 | } while (1); |
1698 | fprintf(fp, "%s " , argv_i); |
1699 | free(mem); |
1700 | } |
1701 | } |
1702 | fputc('\n', fp); |
1703 | } |
1704 | |
1705 | static void print_cpu_topology(struct feat_fd *ff, FILE *fp) |
1706 | { |
1707 | struct perf_header *ph = ff->ph; |
1708 | int cpu_nr = ph->env.nr_cpus_avail; |
1709 | int nr, i; |
1710 | char *str; |
1711 | |
1712 | nr = ph->env.nr_sibling_cores; |
1713 | str = ph->env.sibling_cores; |
1714 | |
1715 | for (i = 0; i < nr; i++) { |
1716 | fprintf(fp, "# sibling sockets : %s\n" , str); |
1717 | str += strlen(str) + 1; |
1718 | } |
1719 | |
1720 | if (ph->env.nr_sibling_dies) { |
1721 | nr = ph->env.nr_sibling_dies; |
1722 | str = ph->env.sibling_dies; |
1723 | |
1724 | for (i = 0; i < nr; i++) { |
1725 | fprintf(fp, "# sibling dies : %s\n" , str); |
1726 | str += strlen(str) + 1; |
1727 | } |
1728 | } |
1729 | |
1730 | nr = ph->env.nr_sibling_threads; |
1731 | str = ph->env.sibling_threads; |
1732 | |
1733 | for (i = 0; i < nr; i++) { |
1734 | fprintf(fp, "# sibling threads : %s\n" , str); |
1735 | str += strlen(str) + 1; |
1736 | } |
1737 | |
1738 | if (ph->env.nr_sibling_dies) { |
1739 | if (ph->env.cpu != NULL) { |
1740 | for (i = 0; i < cpu_nr; i++) |
1741 | fprintf(fp, "# CPU %d: Core ID %d, " |
1742 | "Die ID %d, Socket ID %d\n" , |
1743 | i, ph->env.cpu[i].core_id, |
1744 | ph->env.cpu[i].die_id, |
1745 | ph->env.cpu[i].socket_id); |
1746 | } else |
1747 | fprintf(fp, "# Core ID, Die ID and Socket ID " |
1748 | "information is not available\n" ); |
1749 | } else { |
1750 | if (ph->env.cpu != NULL) { |
1751 | for (i = 0; i < cpu_nr; i++) |
1752 | fprintf(fp, "# CPU %d: Core ID %d, " |
1753 | "Socket ID %d\n" , |
1754 | i, ph->env.cpu[i].core_id, |
1755 | ph->env.cpu[i].socket_id); |
1756 | } else |
1757 | fprintf(fp, "# Core ID and Socket ID " |
1758 | "information is not available\n" ); |
1759 | } |
1760 | } |
1761 | |
1762 | static void print_clockid(struct feat_fd *ff, FILE *fp) |
1763 | { |
1764 | fprintf(fp, "# clockid frequency: %" PRIu64" MHz\n" , |
1765 | ff->ph->env.clock.clockid_res_ns * 1000); |
1766 | } |
1767 | |
1768 | static void print_clock_data(struct feat_fd *ff, FILE *fp) |
1769 | { |
1770 | struct timespec clockid_ns; |
1771 | char tstr[64], date[64]; |
1772 | struct timeval tod_ns; |
1773 | clockid_t clockid; |
1774 | struct tm ltime; |
1775 | u64 ref; |
1776 | |
1777 | if (!ff->ph->env.clock.enabled) { |
1778 | fprintf(fp, "# reference time disabled\n" ); |
1779 | return; |
1780 | } |
1781 | |
1782 | /* Compute TOD time. */ |
1783 | ref = ff->ph->env.clock.tod_ns; |
1784 | tod_ns.tv_sec = ref / NSEC_PER_SEC; |
1785 | ref -= tod_ns.tv_sec * NSEC_PER_SEC; |
1786 | tod_ns.tv_usec = ref / NSEC_PER_USEC; |
1787 | |
1788 | /* Compute clockid time. */ |
1789 | ref = ff->ph->env.clock.clockid_ns; |
1790 | clockid_ns.tv_sec = ref / NSEC_PER_SEC; |
1791 | ref -= clockid_ns.tv_sec * NSEC_PER_SEC; |
1792 | clockid_ns.tv_nsec = ref; |
1793 | |
1794 | clockid = ff->ph->env.clock.clockid; |
1795 | |
1796 | if (localtime_r(&tod_ns.tv_sec, <ime) == NULL) |
1797 | snprintf(buf: tstr, size: sizeof(tstr), fmt: "<error>" ); |
1798 | else { |
1799 | strftime(date, sizeof(date), "%F %T" , <ime); |
1800 | scnprintf(buf: tstr, size: sizeof(tstr), fmt: "%s.%06d" , |
1801 | date, (int) tod_ns.tv_usec); |
1802 | } |
1803 | |
1804 | fprintf(fp, "# clockid: %s (%u)\n" , clockid_name(clk_id: clockid), clockid); |
1805 | fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n" , |
1806 | tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec, |
1807 | (long) clockid_ns.tv_sec, clockid_ns.tv_nsec, |
1808 | clockid_name(clk_id: clockid)); |
1809 | } |
1810 | |
1811 | static void print_hybrid_topology(struct feat_fd *ff, FILE *fp) |
1812 | { |
1813 | int i; |
1814 | struct hybrid_node *n; |
1815 | |
1816 | fprintf(fp, "# hybrid cpu system:\n" ); |
1817 | for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) { |
1818 | n = &ff->ph->env.hybrid_nodes[i]; |
1819 | fprintf(fp, "# %s cpu list : %s\n" , n->pmu_name, n->cpus); |
1820 | } |
1821 | } |
1822 | |
1823 | static void print_dir_format(struct feat_fd *ff, FILE *fp) |
1824 | { |
1825 | struct perf_session *session; |
1826 | struct perf_data *data; |
1827 | |
1828 | session = container_of(ff->ph, struct perf_session, header); |
1829 | data = session->data; |
1830 | |
1831 | fprintf(fp, "# directory data version : %" PRIu64"\n" , data->dir.version); |
1832 | } |
1833 | |
1834 | #ifdef HAVE_LIBBPF_SUPPORT |
1835 | static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) |
1836 | { |
1837 | struct perf_env *env = &ff->ph->env; |
1838 | struct rb_root *root; |
1839 | struct rb_node *next; |
1840 | |
1841 | down_read(&env->bpf_progs.lock); |
1842 | |
1843 | root = &env->bpf_progs.infos; |
1844 | next = rb_first(root); |
1845 | |
1846 | while (next) { |
1847 | struct bpf_prog_info_node *node; |
1848 | |
1849 | node = rb_entry(next, struct bpf_prog_info_node, rb_node); |
1850 | next = rb_next(&node->rb_node); |
1851 | |
1852 | __bpf_event__print_bpf_prog_info(&node->info_linear->info, |
1853 | env, fp); |
1854 | } |
1855 | |
1856 | up_read(&env->bpf_progs.lock); |
1857 | } |
1858 | |
1859 | static void print_bpf_btf(struct feat_fd *ff, FILE *fp) |
1860 | { |
1861 | struct perf_env *env = &ff->ph->env; |
1862 | struct rb_root *root; |
1863 | struct rb_node *next; |
1864 | |
1865 | down_read(&env->bpf_progs.lock); |
1866 | |
1867 | root = &env->bpf_progs.btfs; |
1868 | next = rb_first(root); |
1869 | |
1870 | while (next) { |
1871 | struct btf_node *node; |
1872 | |
1873 | node = rb_entry(next, struct btf_node, rb_node); |
1874 | next = rb_next(&node->rb_node); |
1875 | fprintf(fp, "# btf info of id %u\n" , node->id); |
1876 | } |
1877 | |
1878 | up_read(&env->bpf_progs.lock); |
1879 | } |
1880 | #endif // HAVE_LIBBPF_SUPPORT |
1881 | |
1882 | static void free_event_desc(struct evsel *events) |
1883 | { |
1884 | struct evsel *evsel; |
1885 | |
1886 | if (!events) |
1887 | return; |
1888 | |
1889 | for (evsel = events; evsel->core.attr.size; evsel++) { |
1890 | zfree(&evsel->name); |
1891 | zfree(&evsel->core.id); |
1892 | } |
1893 | |
1894 | free(events); |
1895 | } |
1896 | |
1897 | static bool perf_attr_check(struct perf_event_attr *attr) |
1898 | { |
1899 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) { |
1900 | pr_warning("Reserved bits are set unexpectedly. " |
1901 | "Please update perf tool.\n" ); |
1902 | return false; |
1903 | } |
1904 | |
1905 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) { |
1906 | pr_warning("Unknown sample type (0x%llx) is detected. " |
1907 | "Please update perf tool.\n" , |
1908 | attr->sample_type); |
1909 | return false; |
1910 | } |
1911 | |
1912 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) { |
1913 | pr_warning("Unknown read format (0x%llx) is detected. " |
1914 | "Please update perf tool.\n" , |
1915 | attr->read_format); |
1916 | return false; |
1917 | } |
1918 | |
1919 | if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) && |
1920 | (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) { |
1921 | pr_warning("Unknown branch sample type (0x%llx) is detected. " |
1922 | "Please update perf tool.\n" , |
1923 | attr->branch_sample_type); |
1924 | |
1925 | return false; |
1926 | } |
1927 | |
1928 | return true; |
1929 | } |
1930 | |
1931 | static struct evsel *read_event_desc(struct feat_fd *ff) |
1932 | { |
1933 | struct evsel *evsel, *events = NULL; |
1934 | u64 *id; |
1935 | void *buf = NULL; |
1936 | u32 nre, sz, nr, i, j; |
1937 | size_t msz; |
1938 | |
1939 | /* number of events */ |
1940 | if (do_read_u32(ff, addr: &nre)) |
1941 | goto error; |
1942 | |
1943 | if (do_read_u32(ff, addr: &sz)) |
1944 | goto error; |
1945 | |
1946 | /* buffer to hold on file attr struct */ |
1947 | buf = malloc(sz); |
1948 | if (!buf) |
1949 | goto error; |
1950 | |
1951 | /* the last event terminates with evsel->core.attr.size == 0: */ |
1952 | events = calloc(nre + 1, sizeof(*events)); |
1953 | if (!events) |
1954 | goto error; |
1955 | |
1956 | msz = sizeof(evsel->core.attr); |
1957 | if (sz < msz) |
1958 | msz = sz; |
1959 | |
1960 | for (i = 0, evsel = events; i < nre; evsel++, i++) { |
1961 | evsel->core.idx = i; |
1962 | |
1963 | /* |
1964 | * must read entire on-file attr struct to |
1965 | * sync up with layout. |
1966 | */ |
1967 | if (__do_read(ff, addr: buf, size: sz)) |
1968 | goto error; |
1969 | |
1970 | if (ff->ph->needs_swap) |
1971 | perf_event__attr_swap(attr: buf); |
1972 | |
1973 | memcpy(&evsel->core.attr, buf, msz); |
1974 | |
1975 | if (!perf_attr_check(attr: &evsel->core.attr)) |
1976 | goto error; |
1977 | |
1978 | if (do_read_u32(ff, addr: &nr)) |
1979 | goto error; |
1980 | |
1981 | if (ff->ph->needs_swap) |
1982 | evsel->needs_swap = true; |
1983 | |
1984 | evsel->name = do_read_string(ff); |
1985 | if (!evsel->name) |
1986 | goto error; |
1987 | |
1988 | if (!nr) |
1989 | continue; |
1990 | |
1991 | id = calloc(nr, sizeof(*id)); |
1992 | if (!id) |
1993 | goto error; |
1994 | evsel->core.ids = nr; |
1995 | evsel->core.id = id; |
1996 | |
1997 | for (j = 0 ; j < nr; j++) { |
1998 | if (do_read_u64(ff, addr: id)) |
1999 | goto error; |
2000 | id++; |
2001 | } |
2002 | } |
2003 | out: |
2004 | free(buf); |
2005 | return events; |
2006 | error: |
2007 | free_event_desc(events); |
2008 | events = NULL; |
2009 | goto out; |
2010 | } |
2011 | |
2012 | static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, |
2013 | void *priv __maybe_unused) |
2014 | { |
2015 | return fprintf(fp, ", %s = %s" , name, val); |
2016 | } |
2017 | |
2018 | static void print_event_desc(struct feat_fd *ff, FILE *fp) |
2019 | { |
2020 | struct evsel *evsel, *events; |
2021 | u32 j; |
2022 | u64 *id; |
2023 | |
2024 | if (ff->events) |
2025 | events = ff->events; |
2026 | else |
2027 | events = read_event_desc(ff); |
2028 | |
2029 | if (!events) { |
2030 | fprintf(fp, "# event desc: not available or unable to read\n" ); |
2031 | return; |
2032 | } |
2033 | |
2034 | for (evsel = events; evsel->core.attr.size; evsel++) { |
2035 | fprintf(fp, "# event : name = %s, " , evsel->name); |
2036 | |
2037 | if (evsel->core.ids) { |
2038 | fprintf(fp, ", id = {" ); |
2039 | for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) { |
2040 | if (j) |
2041 | fputc(',', fp); |
2042 | fprintf(fp, " %" PRIu64, *id); |
2043 | } |
2044 | fprintf(fp, " }" ); |
2045 | } |
2046 | |
2047 | perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL); |
2048 | |
2049 | fputc('\n', fp); |
2050 | } |
2051 | |
2052 | free_event_desc(events); |
2053 | ff->events = NULL; |
2054 | } |
2055 | |
2056 | static void print_total_mem(struct feat_fd *ff, FILE *fp) |
2057 | { |
2058 | fprintf(fp, "# total memory : %llu kB\n" , ff->ph->env.total_mem); |
2059 | } |
2060 | |
2061 | static void print_numa_topology(struct feat_fd *ff, FILE *fp) |
2062 | { |
2063 | int i; |
2064 | struct numa_node *n; |
2065 | |
2066 | for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { |
2067 | n = &ff->ph->env.numa_nodes[i]; |
2068 | |
2069 | fprintf(fp, "# node%u meminfo : total = %" PRIu64" kB," |
2070 | " free = %" PRIu64" kB\n" , |
2071 | n->node, n->mem_total, n->mem_free); |
2072 | |
2073 | fprintf(fp, "# node%u cpu list : " , n->node); |
2074 | cpu_map__fprintf(n->map, fp); |
2075 | } |
2076 | } |
2077 | |
2078 | static void print_cpuid(struct feat_fd *ff, FILE *fp) |
2079 | { |
2080 | fprintf(fp, "# cpuid : %s\n" , ff->ph->env.cpuid); |
2081 | } |
2082 | |
2083 | static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) |
2084 | { |
2085 | fprintf(fp, "# contains samples with branch stack\n" ); |
2086 | } |
2087 | |
2088 | static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) |
2089 | { |
2090 | fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n" ); |
2091 | } |
2092 | |
2093 | static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) |
2094 | { |
2095 | fprintf(fp, "# contains stat data\n" ); |
2096 | } |
2097 | |
2098 | static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) |
2099 | { |
2100 | int i; |
2101 | |
2102 | fprintf(fp, "# CPU cache info:\n" ); |
2103 | for (i = 0; i < ff->ph->env.caches_cnt; i++) { |
2104 | fprintf(fp, "# " ); |
2105 | cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); |
2106 | } |
2107 | } |
2108 | |
2109 | static void print_compressed(struct feat_fd *ff, FILE *fp) |
2110 | { |
2111 | fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n" , |
2112 | ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown" , |
2113 | ff->ph->env.comp_level, ff->ph->env.comp_ratio); |
2114 | } |
2115 | |
2116 | static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name) |
2117 | { |
2118 | const char *delimiter = "" ; |
2119 | int i; |
2120 | |
2121 | if (!nr_caps) { |
2122 | fprintf(fp, "# %s pmu capabilities: not available\n" , pmu_name); |
2123 | return; |
2124 | } |
2125 | |
2126 | fprintf(fp, "# %s pmu capabilities: " , pmu_name); |
2127 | for (i = 0; i < nr_caps; i++) { |
2128 | fprintf(fp, "%s%s" , delimiter, caps[i]); |
2129 | delimiter = ", " ; |
2130 | } |
2131 | |
2132 | fprintf(fp, "\n" ); |
2133 | } |
2134 | |
2135 | static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp) |
2136 | { |
2137 | __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps, |
2138 | ff->ph->env.cpu_pmu_caps, (char *)"cpu" ); |
2139 | } |
2140 | |
2141 | static void print_pmu_caps(struct feat_fd *ff, FILE *fp) |
2142 | { |
2143 | struct pmu_caps *pmu_caps; |
2144 | |
2145 | for (int i = 0; i < ff->ph->env.nr_pmus_with_caps; i++) { |
2146 | pmu_caps = &ff->ph->env.pmu_caps[i]; |
2147 | __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps, |
2148 | pmu_caps->pmu_name); |
2149 | } |
2150 | |
2151 | if (strcmp(perf_env__arch(env: &ff->ph->env), "x86" ) == 0 && |
2152 | perf_env__has_pmu_mapping(env: &ff->ph->env, pmu_name: "ibs_op" )) { |
2153 | char *max_precise = perf_env__find_pmu_cap(env: &ff->ph->env, pmu_name: "cpu" , cap: "max_precise" ); |
2154 | |
2155 | if (max_precise != NULL && atoi(max_precise) == 0) |
2156 | fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n" ); |
2157 | } |
2158 | } |
2159 | |
2160 | static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) |
2161 | { |
2162 | const char *delimiter = "# pmu mappings: " ; |
2163 | char *str, *tmp; |
2164 | u32 pmu_num; |
2165 | u32 type; |
2166 | |
2167 | pmu_num = ff->ph->env.nr_pmu_mappings; |
2168 | if (!pmu_num) { |
2169 | fprintf(fp, "# pmu mappings: not available\n" ); |
2170 | return; |
2171 | } |
2172 | |
2173 | str = ff->ph->env.pmu_mappings; |
2174 | |
2175 | while (pmu_num) { |
2176 | type = strtoul(str, &tmp, 0); |
2177 | if (*tmp != ':') |
2178 | goto error; |
2179 | |
2180 | str = tmp + 1; |
2181 | fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); |
2182 | |
2183 | delimiter = ", " ; |
2184 | str += strlen(str) + 1; |
2185 | pmu_num--; |
2186 | } |
2187 | |
2188 | fprintf(fp, "\n" ); |
2189 | |
2190 | if (!pmu_num) |
2191 | return; |
2192 | error: |
2193 | fprintf(fp, "# pmu mappings: unable to read\n" ); |
2194 | } |
2195 | |
2196 | static void print_group_desc(struct feat_fd *ff, FILE *fp) |
2197 | { |
2198 | struct perf_session *session; |
2199 | struct evsel *evsel; |
2200 | u32 nr = 0; |
2201 | |
2202 | session = container_of(ff->ph, struct perf_session, header); |
2203 | |
2204 | evlist__for_each_entry(session->evlist, evsel) { |
2205 | if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { |
2206 | fprintf(fp, "# group: %s{%s" , evsel->group_name ?: "" , evsel__name(evsel)); |
2207 | |
2208 | nr = evsel->core.nr_members - 1; |
2209 | } else if (nr) { |
2210 | fprintf(fp, ",%s" , evsel__name(evsel)); |
2211 | |
2212 | if (--nr == 0) |
2213 | fprintf(fp, "}\n" ); |
2214 | } |
2215 | } |
2216 | } |
2217 | |
2218 | static void print_sample_time(struct feat_fd *ff, FILE *fp) |
2219 | { |
2220 | struct perf_session *session; |
2221 | char time_buf[32]; |
2222 | double d; |
2223 | |
2224 | session = container_of(ff->ph, struct perf_session, header); |
2225 | |
2226 | timestamp__scnprintf_usec(timestamp: session->evlist->first_sample_time, |
2227 | buf: time_buf, sz: sizeof(time_buf)); |
2228 | fprintf(fp, "# time of first sample : %s\n" , time_buf); |
2229 | |
2230 | timestamp__scnprintf_usec(timestamp: session->evlist->last_sample_time, |
2231 | buf: time_buf, sz: sizeof(time_buf)); |
2232 | fprintf(fp, "# time of last sample : %s\n" , time_buf); |
2233 | |
2234 | d = (double)(session->evlist->last_sample_time - |
2235 | session->evlist->first_sample_time) / NSEC_PER_MSEC; |
2236 | |
2237 | fprintf(fp, "# sample duration : %10.3f ms\n" , d); |
2238 | } |
2239 | |
2240 | static void memory_node__fprintf(struct memory_node *n, |
2241 | unsigned long long bsize, FILE *fp) |
2242 | { |
2243 | char buf_map[100], buf_size[50]; |
2244 | unsigned long long size; |
2245 | |
2246 | size = bsize * bitmap_weight(src: n->set, nbits: n->size); |
2247 | unit_number__scnprintf(buf: buf_size, size: 50, n: size); |
2248 | |
2249 | bitmap_scnprintf(n->set, n->size, buf_map, 100); |
2250 | fprintf(fp, "# %3" PRIu64 " [%s]: %s\n" , n->node, buf_size, buf_map); |
2251 | } |
2252 | |
2253 | static void print_mem_topology(struct feat_fd *ff, FILE *fp) |
2254 | { |
2255 | struct memory_node *nodes; |
2256 | int i, nr; |
2257 | |
2258 | nodes = ff->ph->env.memory_nodes; |
2259 | nr = ff->ph->env.nr_memory_nodes; |
2260 | |
2261 | fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n" , |
2262 | nr, ff->ph->env.memory_bsize); |
2263 | |
2264 | for (i = 0; i < nr; i++) { |
2265 | memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); |
2266 | } |
2267 | } |
2268 | |
2269 | static int __event_process_build_id(struct perf_record_header_build_id *bev, |
2270 | char *filename, |
2271 | struct perf_session *session) |
2272 | { |
2273 | int err = -1; |
2274 | struct machine *machine; |
2275 | u16 cpumode; |
2276 | struct dso *dso; |
2277 | enum dso_space_type dso_space; |
2278 | |
2279 | machine = perf_session__findnew_machine(session, pid: bev->pid); |
2280 | if (!machine) |
2281 | goto out; |
2282 | |
2283 | cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
2284 | |
2285 | switch (cpumode) { |
2286 | case PERF_RECORD_MISC_KERNEL: |
2287 | dso_space = DSO_SPACE__KERNEL; |
2288 | break; |
2289 | case PERF_RECORD_MISC_GUEST_KERNEL: |
2290 | dso_space = DSO_SPACE__KERNEL_GUEST; |
2291 | break; |
2292 | case PERF_RECORD_MISC_USER: |
2293 | case PERF_RECORD_MISC_GUEST_USER: |
2294 | dso_space = DSO_SPACE__USER; |
2295 | break; |
2296 | default: |
2297 | goto out; |
2298 | } |
2299 | |
2300 | dso = machine__findnew_dso(machine, filename); |
2301 | if (dso != NULL) { |
2302 | char sbuild_id[SBUILD_ID_SIZE]; |
2303 | struct build_id bid; |
2304 | size_t size = BUILD_ID_SIZE; |
2305 | |
2306 | if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE) |
2307 | size = bev->size; |
2308 | |
2309 | build_id__init(bid: &bid, data: bev->data, size); |
2310 | dso__set_build_id(dso, bid: &bid); |
2311 | dso->header_build_id = 1; |
2312 | |
2313 | if (dso_space != DSO_SPACE__USER) { |
2314 | struct kmod_path m = { .name = NULL, }; |
2315 | |
2316 | if (!kmod_path__parse_name(&m, filename) && m.kmod) |
2317 | dso__set_module_info(dso, m: &m, machine); |
2318 | |
2319 | dso->kernel = dso_space; |
2320 | free(m.name); |
2321 | } |
2322 | |
2323 | build_id__sprintf(build_id: &dso->bid, bf: sbuild_id); |
2324 | pr_debug("build id event received for %s: %s [%zu]\n" , |
2325 | dso->long_name, sbuild_id, size); |
2326 | dso__put(dso); |
2327 | } |
2328 | |
2329 | err = 0; |
2330 | out: |
2331 | return err; |
2332 | } |
2333 | |
2334 | static int (struct perf_header *, |
2335 | int input, u64 offset, u64 size) |
2336 | { |
2337 | struct perf_session *session = container_of(header, struct perf_session, header); |
2338 | struct { |
2339 | struct perf_event_header ; |
2340 | u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; |
2341 | char filename[0]; |
2342 | } old_bev; |
2343 | struct bev; |
2344 | char filename[PATH_MAX]; |
2345 | u64 limit = offset + size; |
2346 | |
2347 | while (offset < limit) { |
2348 | ssize_t len; |
2349 | |
2350 | if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) |
2351 | return -1; |
2352 | |
2353 | if (header->needs_swap) |
2354 | perf_event_header__bswap(hdr: &old_bev.header); |
2355 | |
2356 | len = old_bev.header.size - sizeof(old_bev); |
2357 | if (readn(input, filename, len) != len) |
2358 | return -1; |
2359 | |
2360 | bev.header = old_bev.header; |
2361 | |
2362 | /* |
2363 | * As the pid is the missing value, we need to fill |
2364 | * it properly. The header.misc value give us nice hint. |
2365 | */ |
2366 | bev.pid = HOST_KERNEL_ID; |
2367 | if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || |
2368 | bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) |
2369 | bev.pid = DEFAULT_GUEST_KERNEL_ID; |
2370 | |
2371 | memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); |
2372 | __event_process_build_id(bev: &bev, filename, session); |
2373 | |
2374 | offset += bev.header.size; |
2375 | } |
2376 | |
2377 | return 0; |
2378 | } |
2379 | |
2380 | static int (struct perf_header *, |
2381 | int input, u64 offset, u64 size) |
2382 | { |
2383 | struct perf_session *session = container_of(header, struct perf_session, header); |
2384 | struct bev; |
2385 | char filename[PATH_MAX]; |
2386 | u64 limit = offset + size, orig_offset = offset; |
2387 | int err = -1; |
2388 | |
2389 | while (offset < limit) { |
2390 | ssize_t len; |
2391 | |
2392 | if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) |
2393 | goto out; |
2394 | |
2395 | if (header->needs_swap) |
2396 | perf_event_header__bswap(hdr: &bev.header); |
2397 | |
2398 | len = bev.header.size - sizeof(bev); |
2399 | if (readn(input, filename, len) != len) |
2400 | goto out; |
2401 | /* |
2402 | * The a1645ce1 changeset: |
2403 | * |
2404 | * "perf: 'perf kvm' tool for monitoring guest performance from host" |
2405 | * |
2406 | * Added a field to struct perf_record_header_build_id that broke the file |
2407 | * format. |
2408 | * |
2409 | * Since the kernel build-id is the first entry, process the |
2410 | * table using the old format if the well known |
2411 | * '[kernel.kallsyms]' string for the kernel build-id has the |
2412 | * first 4 characters chopped off (where the pid_t sits). |
2413 | */ |
2414 | if (memcmp(p: filename, q: "nel.kallsyms]" , size: 13) == 0) { |
2415 | if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) |
2416 | return -1; |
2417 | return perf_header__read_build_ids_abi_quirk(header, input, offset, size); |
2418 | } |
2419 | |
2420 | __event_process_build_id(bev: &bev, filename, session); |
2421 | |
2422 | offset += bev.header.size; |
2423 | } |
2424 | err = 0; |
2425 | out: |
2426 | return err; |
2427 | } |
2428 | |
2429 | /* Macro for features that simply need to read and store a string. */ |
2430 | #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ |
2431 | static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ |
2432 | {\ |
2433 | free(ff->ph->env.__feat_env); \ |
2434 | ff->ph->env.__feat_env = do_read_string(ff); \ |
2435 | return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ |
2436 | } |
2437 | |
2438 | FEAT_PROCESS_STR_FUN(hostname, hostname); |
2439 | FEAT_PROCESS_STR_FUN(osrelease, os_release); |
2440 | FEAT_PROCESS_STR_FUN(version, version); |
2441 | FEAT_PROCESS_STR_FUN(arch, arch); |
2442 | FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); |
2443 | FEAT_PROCESS_STR_FUN(cpuid, cpuid); |
2444 | |
2445 | #ifdef HAVE_LIBTRACEEVENT |
2446 | static int process_tracing_data(struct feat_fd *ff, void *data) |
2447 | { |
2448 | ssize_t ret = trace_report(ff->fd, data, false); |
2449 | |
2450 | return ret < 0 ? -1 : 0; |
2451 | } |
2452 | #endif |
2453 | |
2454 | static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) |
2455 | { |
2456 | if (perf_header__read_build_ids(header: ff->ph, input: ff->fd, offset: ff->offset, size: ff->size)) |
2457 | pr_debug("Failed to read buildids, continuing...\n" ); |
2458 | return 0; |
2459 | } |
2460 | |
2461 | static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) |
2462 | { |
2463 | int ret; |
2464 | u32 nr_cpus_avail, nr_cpus_online; |
2465 | |
2466 | ret = do_read_u32(ff, addr: &nr_cpus_avail); |
2467 | if (ret) |
2468 | return ret; |
2469 | |
2470 | ret = do_read_u32(ff, addr: &nr_cpus_online); |
2471 | if (ret) |
2472 | return ret; |
2473 | ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; |
2474 | ff->ph->env.nr_cpus_online = (int)nr_cpus_online; |
2475 | return 0; |
2476 | } |
2477 | |
2478 | static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) |
2479 | { |
2480 | u64 total_mem; |
2481 | int ret; |
2482 | |
2483 | ret = do_read_u64(ff, addr: &total_mem); |
2484 | if (ret) |
2485 | return -1; |
2486 | ff->ph->env.total_mem = (unsigned long long)total_mem; |
2487 | return 0; |
2488 | } |
2489 | |
2490 | static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx) |
2491 | { |
2492 | struct evsel *evsel; |
2493 | |
2494 | evlist__for_each_entry(evlist, evsel) { |
2495 | if (evsel->core.idx == idx) |
2496 | return evsel; |
2497 | } |
2498 | |
2499 | return NULL; |
2500 | } |
2501 | |
2502 | static void evlist__set_event_name(struct evlist *evlist, struct evsel *event) |
2503 | { |
2504 | struct evsel *evsel; |
2505 | |
2506 | if (!event->name) |
2507 | return; |
2508 | |
2509 | evsel = evlist__find_by_index(evlist, idx: event->core.idx); |
2510 | if (!evsel) |
2511 | return; |
2512 | |
2513 | if (evsel->name) |
2514 | return; |
2515 | |
2516 | evsel->name = strdup(event->name); |
2517 | } |
2518 | |
2519 | static int |
2520 | process_event_desc(struct feat_fd *ff, void *data __maybe_unused) |
2521 | { |
2522 | struct perf_session *session; |
2523 | struct evsel *evsel, *events = read_event_desc(ff); |
2524 | |
2525 | if (!events) |
2526 | return 0; |
2527 | |
2528 | session = container_of(ff->ph, struct perf_session, header); |
2529 | |
2530 | if (session->data->is_pipe) { |
2531 | /* Save events for reading later by print_event_desc, |
2532 | * since they can't be read again in pipe mode. */ |
2533 | ff->events = events; |
2534 | } |
2535 | |
2536 | for (evsel = events; evsel->core.attr.size; evsel++) |
2537 | evlist__set_event_name(evlist: session->evlist, event: evsel); |
2538 | |
2539 | if (!session->data->is_pipe) |
2540 | free_event_desc(events); |
2541 | |
2542 | return 0; |
2543 | } |
2544 | |
2545 | static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) |
2546 | { |
2547 | char *str, *cmdline = NULL, **argv = NULL; |
2548 | u32 nr, i, len = 0; |
2549 | |
2550 | if (do_read_u32(ff, addr: &nr)) |
2551 | return -1; |
2552 | |
2553 | ff->ph->env.nr_cmdline = nr; |
2554 | |
2555 | cmdline = zalloc(ff->size + nr + 1); |
2556 | if (!cmdline) |
2557 | return -1; |
2558 | |
2559 | argv = zalloc(sizeof(char *) * (nr + 1)); |
2560 | if (!argv) |
2561 | goto error; |
2562 | |
2563 | for (i = 0; i < nr; i++) { |
2564 | str = do_read_string(ff); |
2565 | if (!str) |
2566 | goto error; |
2567 | |
2568 | argv[i] = cmdline + len; |
2569 | memcpy(argv[i], str, strlen(str) + 1); |
2570 | len += strlen(str) + 1; |
2571 | free(str); |
2572 | } |
2573 | ff->ph->env.cmdline = cmdline; |
2574 | ff->ph->env.cmdline_argv = (const char **) argv; |
2575 | return 0; |
2576 | |
2577 | error: |
2578 | free(argv); |
2579 | free(cmdline); |
2580 | return -1; |
2581 | } |
2582 | |
2583 | static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) |
2584 | { |
2585 | u32 nr, i; |
2586 | char *str = NULL; |
2587 | struct strbuf sb; |
2588 | int cpu_nr = ff->ph->env.nr_cpus_avail; |
2589 | u64 size = 0; |
2590 | struct perf_header *ph = ff->ph; |
2591 | bool do_core_id_test = true; |
2592 | |
2593 | ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); |
2594 | if (!ph->env.cpu) |
2595 | return -1; |
2596 | |
2597 | if (do_read_u32(ff, addr: &nr)) |
2598 | goto free_cpu; |
2599 | |
2600 | ph->env.nr_sibling_cores = nr; |
2601 | size += sizeof(u32); |
2602 | if (strbuf_init(buf: &sb, hint: 128) < 0) |
2603 | goto free_cpu; |
2604 | |
2605 | for (i = 0; i < nr; i++) { |
2606 | str = do_read_string(ff); |
2607 | if (!str) |
2608 | goto error; |
2609 | |
2610 | /* include a NULL character at the end */ |
2611 | if (strbuf_add(buf: &sb, str, strlen(str) + 1) < 0) |
2612 | goto error; |
2613 | size += string_size(str); |
2614 | zfree(&str); |
2615 | } |
2616 | ph->env.sibling_cores = strbuf_detach(buf: &sb, NULL); |
2617 | |
2618 | if (do_read_u32(ff, addr: &nr)) |
2619 | return -1; |
2620 | |
2621 | ph->env.nr_sibling_threads = nr; |
2622 | size += sizeof(u32); |
2623 | |
2624 | for (i = 0; i < nr; i++) { |
2625 | str = do_read_string(ff); |
2626 | if (!str) |
2627 | goto error; |
2628 | |
2629 | /* include a NULL character at the end */ |
2630 | if (strbuf_add(buf: &sb, str, strlen(str) + 1) < 0) |
2631 | goto error; |
2632 | size += string_size(str); |
2633 | zfree(&str); |
2634 | } |
2635 | ph->env.sibling_threads = strbuf_detach(buf: &sb, NULL); |
2636 | |
2637 | /* |
2638 | * The header may be from old perf, |
2639 | * which doesn't include core id and socket id information. |
2640 | */ |
2641 | if (ff->size <= size) { |
2642 | zfree(&ph->env.cpu); |
2643 | return 0; |
2644 | } |
2645 | |
2646 | /* On s390 the socket_id number is not related to the numbers of cpus. |
2647 | * The socket_id number might be higher than the numbers of cpus. |
2648 | * This depends on the configuration. |
2649 | * AArch64 is the same. |
2650 | */ |
2651 | if (ph->env.arch && (!strncmp(ph->env.arch, "s390" , 4) |
2652 | || !strncmp(ph->env.arch, "aarch64" , 7))) |
2653 | do_core_id_test = false; |
2654 | |
2655 | for (i = 0; i < (u32)cpu_nr; i++) { |
2656 | if (do_read_u32(ff, addr: &nr)) |
2657 | goto free_cpu; |
2658 | |
2659 | ph->env.cpu[i].core_id = nr; |
2660 | size += sizeof(u32); |
2661 | |
2662 | if (do_read_u32(ff, addr: &nr)) |
2663 | goto free_cpu; |
2664 | |
2665 | if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { |
2666 | pr_debug("socket_id number is too big." |
2667 | "You may need to upgrade the perf tool.\n" ); |
2668 | goto free_cpu; |
2669 | } |
2670 | |
2671 | ph->env.cpu[i].socket_id = nr; |
2672 | size += sizeof(u32); |
2673 | } |
2674 | |
2675 | /* |
2676 | * The header may be from old perf, |
2677 | * which doesn't include die information. |
2678 | */ |
2679 | if (ff->size <= size) |
2680 | return 0; |
2681 | |
2682 | if (do_read_u32(ff, addr: &nr)) |
2683 | return -1; |
2684 | |
2685 | ph->env.nr_sibling_dies = nr; |
2686 | size += sizeof(u32); |
2687 | |
2688 | for (i = 0; i < nr; i++) { |
2689 | str = do_read_string(ff); |
2690 | if (!str) |
2691 | goto error; |
2692 | |
2693 | /* include a NULL character at the end */ |
2694 | if (strbuf_add(buf: &sb, str, strlen(str) + 1) < 0) |
2695 | goto error; |
2696 | size += string_size(str); |
2697 | zfree(&str); |
2698 | } |
2699 | ph->env.sibling_dies = strbuf_detach(buf: &sb, NULL); |
2700 | |
2701 | for (i = 0; i < (u32)cpu_nr; i++) { |
2702 | if (do_read_u32(ff, addr: &nr)) |
2703 | goto free_cpu; |
2704 | |
2705 | ph->env.cpu[i].die_id = nr; |
2706 | } |
2707 | |
2708 | return 0; |
2709 | |
2710 | error: |
2711 | strbuf_release(buf: &sb); |
2712 | zfree(&str); |
2713 | free_cpu: |
2714 | zfree(&ph->env.cpu); |
2715 | return -1; |
2716 | } |
2717 | |
2718 | static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) |
2719 | { |
2720 | struct numa_node *nodes, *n; |
2721 | u32 nr, i; |
2722 | char *str; |
2723 | |
2724 | /* nr nodes */ |
2725 | if (do_read_u32(ff, addr: &nr)) |
2726 | return -1; |
2727 | |
2728 | nodes = zalloc(sizeof(*nodes) * nr); |
2729 | if (!nodes) |
2730 | return -ENOMEM; |
2731 | |
2732 | for (i = 0; i < nr; i++) { |
2733 | n = &nodes[i]; |
2734 | |
2735 | /* node number */ |
2736 | if (do_read_u32(ff, addr: &n->node)) |
2737 | goto error; |
2738 | |
2739 | if (do_read_u64(ff, addr: &n->mem_total)) |
2740 | goto error; |
2741 | |
2742 | if (do_read_u64(ff, addr: &n->mem_free)) |
2743 | goto error; |
2744 | |
2745 | str = do_read_string(ff); |
2746 | if (!str) |
2747 | goto error; |
2748 | |
2749 | n->map = perf_cpu_map__new(str); |
2750 | free(str); |
2751 | if (!n->map) |
2752 | goto error; |
2753 | } |
2754 | ff->ph->env.nr_numa_nodes = nr; |
2755 | ff->ph->env.numa_nodes = nodes; |
2756 | return 0; |
2757 | |
2758 | error: |
2759 | free(nodes); |
2760 | return -1; |
2761 | } |
2762 | |
2763 | static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) |
2764 | { |
2765 | char *name; |
2766 | u32 pmu_num; |
2767 | u32 type; |
2768 | struct strbuf sb; |
2769 | |
2770 | if (do_read_u32(ff, addr: &pmu_num)) |
2771 | return -1; |
2772 | |
2773 | if (!pmu_num) { |
2774 | pr_debug("pmu mappings not available\n" ); |
2775 | return 0; |
2776 | } |
2777 | |
2778 | ff->ph->env.nr_pmu_mappings = pmu_num; |
2779 | if (strbuf_init(buf: &sb, hint: 128) < 0) |
2780 | return -1; |
2781 | |
2782 | while (pmu_num) { |
2783 | if (do_read_u32(ff, addr: &type)) |
2784 | goto error; |
2785 | |
2786 | name = do_read_string(ff); |
2787 | if (!name) |
2788 | goto error; |
2789 | |
2790 | if (strbuf_addf(sb: &sb, fmt: "%u:%s" , type, name) < 0) |
2791 | goto error; |
2792 | /* include a NULL character at the end */ |
2793 | if (strbuf_add(buf: &sb, "" , 1) < 0) |
2794 | goto error; |
2795 | |
2796 | if (!strcmp(name, "msr" )) |
2797 | ff->ph->env.msr_pmu_type = type; |
2798 | |
2799 | free(name); |
2800 | pmu_num--; |
2801 | } |
2802 | ff->ph->env.pmu_mappings = strbuf_detach(buf: &sb, NULL); |
2803 | return 0; |
2804 | |
2805 | error: |
2806 | strbuf_release(buf: &sb); |
2807 | return -1; |
2808 | } |
2809 | |
2810 | static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) |
2811 | { |
2812 | size_t ret = -1; |
2813 | u32 i, nr, nr_groups; |
2814 | struct perf_session *session; |
2815 | struct evsel *evsel, *leader = NULL; |
2816 | struct group_desc { |
2817 | char *name; |
2818 | u32 leader_idx; |
2819 | u32 nr_members; |
2820 | } *desc; |
2821 | |
2822 | if (do_read_u32(ff, addr: &nr_groups)) |
2823 | return -1; |
2824 | |
2825 | ff->ph->env.nr_groups = nr_groups; |
2826 | if (!nr_groups) { |
2827 | pr_debug("group desc not available\n" ); |
2828 | return 0; |
2829 | } |
2830 | |
2831 | desc = calloc(nr_groups, sizeof(*desc)); |
2832 | if (!desc) |
2833 | return -1; |
2834 | |
2835 | for (i = 0; i < nr_groups; i++) { |
2836 | desc[i].name = do_read_string(ff); |
2837 | if (!desc[i].name) |
2838 | goto out_free; |
2839 | |
2840 | if (do_read_u32(ff, addr: &desc[i].leader_idx)) |
2841 | goto out_free; |
2842 | |
2843 | if (do_read_u32(ff, addr: &desc[i].nr_members)) |
2844 | goto out_free; |
2845 | } |
2846 | |
2847 | /* |
2848 | * Rebuild group relationship based on the group_desc |
2849 | */ |
2850 | session = container_of(ff->ph, struct perf_session, header); |
2851 | |
2852 | i = nr = 0; |
2853 | evlist__for_each_entry(session->evlist, evsel) { |
2854 | if (i < nr_groups && evsel->core.idx == (int) desc[i].leader_idx) { |
2855 | evsel__set_leader(evsel, leader: evsel); |
2856 | /* {anon_group} is a dummy name */ |
2857 | if (strcmp(desc[i].name, "{anon_group}" )) { |
2858 | evsel->group_name = desc[i].name; |
2859 | desc[i].name = NULL; |
2860 | } |
2861 | evsel->core.nr_members = desc[i].nr_members; |
2862 | |
2863 | if (i >= nr_groups || nr > 0) { |
2864 | pr_debug("invalid group desc\n" ); |
2865 | goto out_free; |
2866 | } |
2867 | |
2868 | leader = evsel; |
2869 | nr = evsel->core.nr_members - 1; |
2870 | i++; |
2871 | } else if (nr) { |
2872 | /* This is a group member */ |
2873 | evsel__set_leader(evsel, leader); |
2874 | |
2875 | nr--; |
2876 | } |
2877 | } |
2878 | |
2879 | if (i != nr_groups || nr != 0) { |
2880 | pr_debug("invalid group desc\n" ); |
2881 | goto out_free; |
2882 | } |
2883 | |
2884 | ret = 0; |
2885 | out_free: |
2886 | for (i = 0; i < nr_groups; i++) |
2887 | zfree(&desc[i].name); |
2888 | free(desc); |
2889 | |
2890 | return ret; |
2891 | } |
2892 | |
2893 | static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) |
2894 | { |
2895 | struct perf_session *session; |
2896 | int err; |
2897 | |
2898 | session = container_of(ff->ph, struct perf_session, header); |
2899 | |
2900 | err = auxtrace_index__process(fd: ff->fd, size: ff->size, session, |
2901 | needs_swap: ff->ph->needs_swap); |
2902 | if (err < 0) |
2903 | pr_err("Failed to process auxtrace index\n" ); |
2904 | return err; |
2905 | } |
2906 | |
2907 | static int process_cache(struct feat_fd *ff, void *data __maybe_unused) |
2908 | { |
2909 | struct cpu_cache_level *caches; |
2910 | u32 cnt, i, version; |
2911 | |
2912 | if (do_read_u32(ff, addr: &version)) |
2913 | return -1; |
2914 | |
2915 | if (version != 1) |
2916 | return -1; |
2917 | |
2918 | if (do_read_u32(ff, addr: &cnt)) |
2919 | return -1; |
2920 | |
2921 | caches = zalloc(sizeof(*caches) * cnt); |
2922 | if (!caches) |
2923 | return -1; |
2924 | |
2925 | for (i = 0; i < cnt; i++) { |
2926 | struct cpu_cache_level *c = &caches[i]; |
2927 | |
2928 | #define _R(v) \ |
2929 | if (do_read_u32(ff, &c->v)) \ |
2930 | goto out_free_caches; \ |
2931 | |
2932 | _R(level) |
2933 | _R(line_size) |
2934 | _R(sets) |
2935 | _R(ways) |
2936 | #undef _R |
2937 | |
2938 | #define _R(v) \ |
2939 | c->v = do_read_string(ff); \ |
2940 | if (!c->v) \ |
2941 | goto out_free_caches; \ |
2942 | |
2943 | _R(type) |
2944 | _R(size) |
2945 | _R(map) |
2946 | #undef _R |
2947 | } |
2948 | |
2949 | ff->ph->env.caches = caches; |
2950 | ff->ph->env.caches_cnt = cnt; |
2951 | return 0; |
2952 | out_free_caches: |
2953 | for (i = 0; i < cnt; i++) { |
2954 | free(caches[i].type); |
2955 | free(caches[i].size); |
2956 | free(caches[i].map); |
2957 | } |
2958 | free(caches); |
2959 | return -1; |
2960 | } |
2961 | |
2962 | static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) |
2963 | { |
2964 | struct perf_session *session; |
2965 | u64 first_sample_time, last_sample_time; |
2966 | int ret; |
2967 | |
2968 | session = container_of(ff->ph, struct perf_session, header); |
2969 | |
2970 | ret = do_read_u64(ff, addr: &first_sample_time); |
2971 | if (ret) |
2972 | return -1; |
2973 | |
2974 | ret = do_read_u64(ff, addr: &last_sample_time); |
2975 | if (ret) |
2976 | return -1; |
2977 | |
2978 | session->evlist->first_sample_time = first_sample_time; |
2979 | session->evlist->last_sample_time = last_sample_time; |
2980 | return 0; |
2981 | } |
2982 | |
2983 | static int process_mem_topology(struct feat_fd *ff, |
2984 | void *data __maybe_unused) |
2985 | { |
2986 | struct memory_node *nodes; |
2987 | u64 version, i, nr, bsize; |
2988 | int ret = -1; |
2989 | |
2990 | if (do_read_u64(ff, addr: &version)) |
2991 | return -1; |
2992 | |
2993 | if (version != 1) |
2994 | return -1; |
2995 | |
2996 | if (do_read_u64(ff, addr: &bsize)) |
2997 | return -1; |
2998 | |
2999 | if (do_read_u64(ff, addr: &nr)) |
3000 | return -1; |
3001 | |
3002 | nodes = zalloc(sizeof(*nodes) * nr); |
3003 | if (!nodes) |
3004 | return -1; |
3005 | |
3006 | for (i = 0; i < nr; i++) { |
3007 | struct memory_node n; |
3008 | |
3009 | #define _R(v) \ |
3010 | if (do_read_u64(ff, &n.v)) \ |
3011 | goto out; \ |
3012 | |
3013 | _R(node) |
3014 | _R(size) |
3015 | |
3016 | #undef _R |
3017 | |
3018 | if (do_read_bitmap(ff, pset: &n.set, psize: &n.size)) |
3019 | goto out; |
3020 | |
3021 | nodes[i] = n; |
3022 | } |
3023 | |
3024 | ff->ph->env.memory_bsize = bsize; |
3025 | ff->ph->env.memory_nodes = nodes; |
3026 | ff->ph->env.nr_memory_nodes = nr; |
3027 | ret = 0; |
3028 | |
3029 | out: |
3030 | if (ret) |
3031 | free(nodes); |
3032 | return ret; |
3033 | } |
3034 | |
3035 | static int process_clockid(struct feat_fd *ff, |
3036 | void *data __maybe_unused) |
3037 | { |
3038 | if (do_read_u64(ff, addr: &ff->ph->env.clock.clockid_res_ns)) |
3039 | return -1; |
3040 | |
3041 | return 0; |
3042 | } |
3043 | |
3044 | static int process_clock_data(struct feat_fd *ff, |
3045 | void *_data __maybe_unused) |
3046 | { |
3047 | u32 data32; |
3048 | u64 data64; |
3049 | |
3050 | /* version */ |
3051 | if (do_read_u32(ff, addr: &data32)) |
3052 | return -1; |
3053 | |
3054 | if (data32 != 1) |
3055 | return -1; |
3056 | |
3057 | /* clockid */ |
3058 | if (do_read_u32(ff, addr: &data32)) |
3059 | return -1; |
3060 | |
3061 | ff->ph->env.clock.clockid = data32; |
3062 | |
3063 | /* TOD ref time */ |
3064 | if (do_read_u64(ff, addr: &data64)) |
3065 | return -1; |
3066 | |
3067 | ff->ph->env.clock.tod_ns = data64; |
3068 | |
3069 | /* clockid ref time */ |
3070 | if (do_read_u64(ff, addr: &data64)) |
3071 | return -1; |
3072 | |
3073 | ff->ph->env.clock.clockid_ns = data64; |
3074 | ff->ph->env.clock.enabled = true; |
3075 | return 0; |
3076 | } |
3077 | |
3078 | static int process_hybrid_topology(struct feat_fd *ff, |
3079 | void *data __maybe_unused) |
3080 | { |
3081 | struct hybrid_node *nodes, *n; |
3082 | u32 nr, i; |
3083 | |
3084 | /* nr nodes */ |
3085 | if (do_read_u32(ff, addr: &nr)) |
3086 | return -1; |
3087 | |
3088 | nodes = zalloc(sizeof(*nodes) * nr); |
3089 | if (!nodes) |
3090 | return -ENOMEM; |
3091 | |
3092 | for (i = 0; i < nr; i++) { |
3093 | n = &nodes[i]; |
3094 | |
3095 | n->pmu_name = do_read_string(ff); |
3096 | if (!n->pmu_name) |
3097 | goto error; |
3098 | |
3099 | n->cpus = do_read_string(ff); |
3100 | if (!n->cpus) |
3101 | goto error; |
3102 | } |
3103 | |
3104 | ff->ph->env.nr_hybrid_nodes = nr; |
3105 | ff->ph->env.hybrid_nodes = nodes; |
3106 | return 0; |
3107 | |
3108 | error: |
3109 | for (i = 0; i < nr; i++) { |
3110 | free(nodes[i].pmu_name); |
3111 | free(nodes[i].cpus); |
3112 | } |
3113 | |
3114 | free(nodes); |
3115 | return -1; |
3116 | } |
3117 | |
3118 | static int process_dir_format(struct feat_fd *ff, |
3119 | void *_data __maybe_unused) |
3120 | { |
3121 | struct perf_session *session; |
3122 | struct perf_data *data; |
3123 | |
3124 | session = container_of(ff->ph, struct perf_session, header); |
3125 | data = session->data; |
3126 | |
3127 | if (WARN_ON(!perf_data__is_dir(data))) |
3128 | return -1; |
3129 | |
3130 | return do_read_u64(ff, addr: &data->dir.version); |
3131 | } |
3132 | |
3133 | #ifdef HAVE_LIBBPF_SUPPORT |
3134 | static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) |
3135 | { |
3136 | struct bpf_prog_info_node *info_node; |
3137 | struct perf_env *env = &ff->ph->env; |
3138 | struct perf_bpil *info_linear; |
3139 | u32 count, i; |
3140 | int err = -1; |
3141 | |
3142 | if (ff->ph->needs_swap) { |
3143 | pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n" ); |
3144 | return 0; |
3145 | } |
3146 | |
3147 | if (do_read_u32(ff, &count)) |
3148 | return -1; |
3149 | |
3150 | down_write(&env->bpf_progs.lock); |
3151 | |
3152 | for (i = 0; i < count; ++i) { |
3153 | u32 info_len, data_len; |
3154 | |
3155 | info_linear = NULL; |
3156 | info_node = NULL; |
3157 | if (do_read_u32(ff, &info_len)) |
3158 | goto out; |
3159 | if (do_read_u32(ff, &data_len)) |
3160 | goto out; |
3161 | |
3162 | if (info_len > sizeof(struct bpf_prog_info)) { |
3163 | pr_warning("detected invalid bpf_prog_info\n" ); |
3164 | goto out; |
3165 | } |
3166 | |
3167 | info_linear = malloc(sizeof(struct perf_bpil) + |
3168 | data_len); |
3169 | if (!info_linear) |
3170 | goto out; |
3171 | info_linear->info_len = sizeof(struct bpf_prog_info); |
3172 | info_linear->data_len = data_len; |
3173 | if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) |
3174 | goto out; |
3175 | if (__do_read(ff, &info_linear->info, info_len)) |
3176 | goto out; |
3177 | if (info_len < sizeof(struct bpf_prog_info)) |
3178 | memset(((void *)(&info_linear->info)) + info_len, 0, |
3179 | sizeof(struct bpf_prog_info) - info_len); |
3180 | |
3181 | if (__do_read(ff, info_linear->data, data_len)) |
3182 | goto out; |
3183 | |
3184 | info_node = malloc(sizeof(struct bpf_prog_info_node)); |
3185 | if (!info_node) |
3186 | goto out; |
3187 | |
3188 | /* after reading from file, translate offset to address */ |
3189 | bpil_offs_to_addr(info_linear); |
3190 | info_node->info_linear = info_linear; |
3191 | __perf_env__insert_bpf_prog_info(env, info_node); |
3192 | } |
3193 | |
3194 | up_write(&env->bpf_progs.lock); |
3195 | return 0; |
3196 | out: |
3197 | free(info_linear); |
3198 | free(info_node); |
3199 | up_write(&env->bpf_progs.lock); |
3200 | return err; |
3201 | } |
3202 | |
3203 | static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) |
3204 | { |
3205 | struct perf_env *env = &ff->ph->env; |
3206 | struct btf_node *node = NULL; |
3207 | u32 count, i; |
3208 | int err = -1; |
3209 | |
3210 | if (ff->ph->needs_swap) { |
3211 | pr_warning("interpreting btf from systems with endianness is not yet supported\n" ); |
3212 | return 0; |
3213 | } |
3214 | |
3215 | if (do_read_u32(ff, &count)) |
3216 | return -1; |
3217 | |
3218 | down_write(&env->bpf_progs.lock); |
3219 | |
3220 | for (i = 0; i < count; ++i) { |
3221 | u32 id, data_size; |
3222 | |
3223 | if (do_read_u32(ff, &id)) |
3224 | goto out; |
3225 | if (do_read_u32(ff, &data_size)) |
3226 | goto out; |
3227 | |
3228 | node = malloc(sizeof(struct btf_node) + data_size); |
3229 | if (!node) |
3230 | goto out; |
3231 | |
3232 | node->id = id; |
3233 | node->data_size = data_size; |
3234 | |
3235 | if (__do_read(ff, node->data, data_size)) |
3236 | goto out; |
3237 | |
3238 | __perf_env__insert_btf(env, node); |
3239 | node = NULL; |
3240 | } |
3241 | |
3242 | err = 0; |
3243 | out: |
3244 | up_write(&env->bpf_progs.lock); |
3245 | free(node); |
3246 | return err; |
3247 | } |
3248 | #endif // HAVE_LIBBPF_SUPPORT |
3249 | |
3250 | static int process_compressed(struct feat_fd *ff, |
3251 | void *data __maybe_unused) |
3252 | { |
3253 | if (do_read_u32(ff, addr: &(ff->ph->env.comp_ver))) |
3254 | return -1; |
3255 | |
3256 | if (do_read_u32(ff, addr: &(ff->ph->env.comp_type))) |
3257 | return -1; |
3258 | |
3259 | if (do_read_u32(ff, addr: &(ff->ph->env.comp_level))) |
3260 | return -1; |
3261 | |
3262 | if (do_read_u32(ff, addr: &(ff->ph->env.comp_ratio))) |
3263 | return -1; |
3264 | |
3265 | if (do_read_u32(ff, addr: &(ff->ph->env.comp_mmap_len))) |
3266 | return -1; |
3267 | |
3268 | return 0; |
3269 | } |
3270 | |
3271 | static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps, |
3272 | char ***caps, unsigned int *max_branches, |
3273 | unsigned int *br_cntr_nr, |
3274 | unsigned int *br_cntr_width) |
3275 | { |
3276 | char *name, *value, *ptr; |
3277 | u32 nr_pmu_caps, i; |
3278 | |
3279 | *nr_caps = 0; |
3280 | *caps = NULL; |
3281 | |
3282 | if (do_read_u32(ff, addr: &nr_pmu_caps)) |
3283 | return -1; |
3284 | |
3285 | if (!nr_pmu_caps) |
3286 | return 0; |
3287 | |
3288 | *caps = zalloc(sizeof(char *) * nr_pmu_caps); |
3289 | if (!*caps) |
3290 | return -1; |
3291 | |
3292 | for (i = 0; i < nr_pmu_caps; i++) { |
3293 | name = do_read_string(ff); |
3294 | if (!name) |
3295 | goto error; |
3296 | |
3297 | value = do_read_string(ff); |
3298 | if (!value) |
3299 | goto free_name; |
3300 | |
3301 | if (asprintf(&ptr, "%s=%s" , name, value) < 0) |
3302 | goto free_value; |
3303 | |
3304 | (*caps)[i] = ptr; |
3305 | |
3306 | if (!strcmp(name, "branches" )) |
3307 | *max_branches = atoi(value); |
3308 | |
3309 | if (!strcmp(name, "branch_counter_nr" )) |
3310 | *br_cntr_nr = atoi(value); |
3311 | |
3312 | if (!strcmp(name, "branch_counter_width" )) |
3313 | *br_cntr_width = atoi(value); |
3314 | |
3315 | free(value); |
3316 | free(name); |
3317 | } |
3318 | *nr_caps = nr_pmu_caps; |
3319 | return 0; |
3320 | |
3321 | free_value: |
3322 | free(value); |
3323 | free_name: |
3324 | free(name); |
3325 | error: |
3326 | for (; i > 0; i--) |
3327 | free((*caps)[i - 1]); |
3328 | free(*caps); |
3329 | *caps = NULL; |
3330 | *nr_caps = 0; |
3331 | return -1; |
3332 | } |
3333 | |
3334 | static int process_cpu_pmu_caps(struct feat_fd *ff, |
3335 | void *data __maybe_unused) |
3336 | { |
3337 | int ret = __process_pmu_caps(ff, nr_caps: &ff->ph->env.nr_cpu_pmu_caps, |
3338 | caps: &ff->ph->env.cpu_pmu_caps, |
3339 | max_branches: &ff->ph->env.max_branches, |
3340 | br_cntr_nr: &ff->ph->env.br_cntr_nr, |
3341 | br_cntr_width: &ff->ph->env.br_cntr_width); |
3342 | |
3343 | if (!ret && !ff->ph->env.cpu_pmu_caps) |
3344 | pr_debug("cpu pmu capabilities not available\n" ); |
3345 | return ret; |
3346 | } |
3347 | |
3348 | static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused) |
3349 | { |
3350 | struct pmu_caps *pmu_caps; |
3351 | u32 nr_pmu, i; |
3352 | int ret; |
3353 | int j; |
3354 | |
3355 | if (do_read_u32(ff, addr: &nr_pmu)) |
3356 | return -1; |
3357 | |
3358 | if (!nr_pmu) { |
3359 | pr_debug("pmu capabilities not available\n" ); |
3360 | return 0; |
3361 | } |
3362 | |
3363 | pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu); |
3364 | if (!pmu_caps) |
3365 | return -ENOMEM; |
3366 | |
3367 | for (i = 0; i < nr_pmu; i++) { |
3368 | ret = __process_pmu_caps(ff, nr_caps: &pmu_caps[i].nr_caps, |
3369 | caps: &pmu_caps[i].caps, |
3370 | max_branches: &pmu_caps[i].max_branches, |
3371 | br_cntr_nr: &pmu_caps[i].br_cntr_nr, |
3372 | br_cntr_width: &pmu_caps[i].br_cntr_width); |
3373 | if (ret) |
3374 | goto err; |
3375 | |
3376 | pmu_caps[i].pmu_name = do_read_string(ff); |
3377 | if (!pmu_caps[i].pmu_name) { |
3378 | ret = -1; |
3379 | goto err; |
3380 | } |
3381 | if (!pmu_caps[i].nr_caps) { |
3382 | pr_debug("%s pmu capabilities not available\n" , |
3383 | pmu_caps[i].pmu_name); |
3384 | } |
3385 | } |
3386 | |
3387 | ff->ph->env.nr_pmus_with_caps = nr_pmu; |
3388 | ff->ph->env.pmu_caps = pmu_caps; |
3389 | return 0; |
3390 | |
3391 | err: |
3392 | for (i = 0; i < nr_pmu; i++) { |
3393 | for (j = 0; j < pmu_caps[i].nr_caps; j++) |
3394 | free(pmu_caps[i].caps[j]); |
3395 | free(pmu_caps[i].caps); |
3396 | free(pmu_caps[i].pmu_name); |
3397 | } |
3398 | |
3399 | free(pmu_caps); |
3400 | return ret; |
3401 | } |
3402 | |
3403 | #define FEAT_OPR(n, func, __full_only) \ |
3404 | [HEADER_##n] = { \ |
3405 | .name = __stringify(n), \ |
3406 | .write = write_##func, \ |
3407 | .print = print_##func, \ |
3408 | .full_only = __full_only, \ |
3409 | .process = process_##func, \ |
3410 | .synthesize = true \ |
3411 | } |
3412 | |
3413 | #define FEAT_OPN(n, func, __full_only) \ |
3414 | [HEADER_##n] = { \ |
3415 | .name = __stringify(n), \ |
3416 | .write = write_##func, \ |
3417 | .print = print_##func, \ |
3418 | .full_only = __full_only, \ |
3419 | .process = process_##func \ |
3420 | } |
3421 | |
3422 | /* feature_ops not implemented: */ |
3423 | #define print_tracing_data NULL |
3424 | #define print_build_id NULL |
3425 | |
3426 | #define process_branch_stack NULL |
3427 | #define process_stat NULL |
3428 | |
3429 | // Only used in util/synthetic-events.c |
3430 | const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; |
3431 | |
3432 | const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = { |
3433 | #ifdef HAVE_LIBTRACEEVENT |
3434 | FEAT_OPN(TRACING_DATA, tracing_data, false), |
3435 | #endif |
3436 | FEAT_OPN(BUILD_ID, build_id, false), |
3437 | FEAT_OPR(HOSTNAME, hostname, false), |
3438 | FEAT_OPR(OSRELEASE, osrelease, false), |
3439 | FEAT_OPR(VERSION, version, false), |
3440 | FEAT_OPR(ARCH, arch, false), |
3441 | FEAT_OPR(NRCPUS, nrcpus, false), |
3442 | FEAT_OPR(CPUDESC, cpudesc, false), |
3443 | FEAT_OPR(CPUID, cpuid, false), |
3444 | FEAT_OPR(TOTAL_MEM, total_mem, false), |
3445 | FEAT_OPR(EVENT_DESC, event_desc, false), |
3446 | FEAT_OPR(CMDLINE, cmdline, false), |
3447 | FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), |
3448 | FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), |
3449 | FEAT_OPN(BRANCH_STACK, branch_stack, false), |
3450 | FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), |
3451 | FEAT_OPR(GROUP_DESC, group_desc, false), |
3452 | FEAT_OPN(AUXTRACE, auxtrace, false), |
3453 | FEAT_OPN(STAT, stat, false), |
3454 | FEAT_OPN(CACHE, cache, true), |
3455 | FEAT_OPR(SAMPLE_TIME, sample_time, false), |
3456 | FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), |
3457 | FEAT_OPR(CLOCKID, clockid, false), |
3458 | FEAT_OPN(DIR_FORMAT, dir_format, false), |
3459 | #ifdef HAVE_LIBBPF_SUPPORT |
3460 | FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), |
3461 | FEAT_OPR(BPF_BTF, bpf_btf, false), |
3462 | #endif |
3463 | FEAT_OPR(COMPRESSED, compressed, false), |
3464 | FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false), |
3465 | FEAT_OPR(CLOCK_DATA, clock_data, false), |
3466 | FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true), |
3467 | FEAT_OPR(PMU_CAPS, pmu_caps, false), |
3468 | }; |
3469 | |
3470 | struct { |
3471 | FILE *; |
3472 | bool ; /* extended list of headers */ |
3473 | }; |
3474 | |
3475 | static int perf_file_section__fprintf_info(struct perf_file_section *section, |
3476 | struct perf_header *ph, |
3477 | int feat, int fd, void *data) |
3478 | { |
3479 | struct header_print_data *hd = data; |
3480 | struct feat_fd ff; |
3481 | |
3482 | if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { |
3483 | pr_debug("Failed to lseek to %" PRIu64 " offset for feature " |
3484 | "%d, continuing...\n" , section->offset, feat); |
3485 | return 0; |
3486 | } |
3487 | if (feat >= HEADER_LAST_FEATURE) { |
3488 | pr_warning("unknown feature %d\n" , feat); |
3489 | return 0; |
3490 | } |
3491 | if (!feat_ops[feat].print) |
3492 | return 0; |
3493 | |
3494 | ff = (struct feat_fd) { |
3495 | .fd = fd, |
3496 | .ph = ph, |
3497 | }; |
3498 | |
3499 | if (!feat_ops[feat].full_only || hd->full) |
3500 | feat_ops[feat].print(&ff, hd->fp); |
3501 | else |
3502 | fprintf(hd->fp, "# %s info available, use -I to display\n" , |
3503 | feat_ops[feat].name); |
3504 | |
3505 | return 0; |
3506 | } |
3507 | |
3508 | int (struct perf_session *session, FILE *fp, bool full) |
3509 | { |
3510 | struct header_print_data hd; |
3511 | struct perf_header * = &session->header; |
3512 | int fd = perf_data__fd(data: session->data); |
3513 | struct stat st; |
3514 | time_t stctime; |
3515 | int ret, bit; |
3516 | |
3517 | hd.fp = fp; |
3518 | hd.full = full; |
3519 | |
3520 | ret = fstat(fd, &st); |
3521 | if (ret == -1) |
3522 | return -1; |
3523 | |
3524 | stctime = st.st_mtime; |
3525 | fprintf(fp, "# captured on : %s" , ctime(&stctime)); |
3526 | |
3527 | fprintf(fp, "# header version : %u\n" , header->version); |
3528 | fprintf(fp, "# data offset : %" PRIu64 "\n" , header->data_offset); |
3529 | fprintf(fp, "# data size : %" PRIu64 "\n" , header->data_size); |
3530 | fprintf(fp, "# feat offset : %" PRIu64 "\n" , header->feat_offset); |
3531 | |
3532 | perf_header__process_sections(header, fd, data: &hd, |
3533 | process: perf_file_section__fprintf_info); |
3534 | |
3535 | if (session->data->is_pipe) |
3536 | return 0; |
3537 | |
3538 | fprintf(fp, "# missing features: " ); |
3539 | for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { |
3540 | if (bit) |
3541 | fprintf(fp, "%s " , feat_ops[bit].name); |
3542 | } |
3543 | |
3544 | fprintf(fp, "\n" ); |
3545 | return 0; |
3546 | } |
3547 | |
3548 | struct { |
3549 | struct feat_writer ; |
3550 | struct feat_fd *; |
3551 | }; |
3552 | |
3553 | static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz) |
3554 | { |
3555 | struct header_fw *h = container_of(fw, struct header_fw, fw); |
3556 | |
3557 | return do_write(ff: h->ff, buf, size: sz); |
3558 | } |
3559 | |
3560 | static int do_write_feat(struct feat_fd *ff, int type, |
3561 | struct perf_file_section **p, |
3562 | struct evlist *evlist, |
3563 | struct feat_copier *fc) |
3564 | { |
3565 | int err; |
3566 | int ret = 0; |
3567 | |
3568 | if (perf_header__has_feat(header: ff->ph, feat: type)) { |
3569 | if (!feat_ops[type].write) |
3570 | return -1; |
3571 | |
3572 | if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n" , __func__)) |
3573 | return -1; |
3574 | |
3575 | (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); |
3576 | |
3577 | /* |
3578 | * Hook to let perf inject copy features sections from the input |
3579 | * file. |
3580 | */ |
3581 | if (fc && fc->copy) { |
3582 | struct header_fw h = { |
3583 | .fw.write = feat_writer_cb, |
3584 | .ff = ff, |
3585 | }; |
3586 | |
3587 | /* ->copy() returns 0 if the feature was not copied */ |
3588 | err = fc->copy(fc, type, &h.fw); |
3589 | } else { |
3590 | err = 0; |
3591 | } |
3592 | if (!err) |
3593 | err = feat_ops[type].write(ff, evlist); |
3594 | if (err < 0) { |
3595 | pr_debug("failed to write feature %s\n" , feat_ops[type].name); |
3596 | |
3597 | /* undo anything written */ |
3598 | lseek(ff->fd, (*p)->offset, SEEK_SET); |
3599 | |
3600 | return -1; |
3601 | } |
3602 | (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; |
3603 | (*p)++; |
3604 | } |
3605 | return ret; |
3606 | } |
3607 | |
3608 | static int (struct perf_header *, |
3609 | struct evlist *evlist, int fd, |
3610 | struct feat_copier *fc) |
3611 | { |
3612 | int nr_sections; |
3613 | struct feat_fd ff = { |
3614 | .fd = fd, |
3615 | .ph = header, |
3616 | }; |
3617 | struct perf_file_section *feat_sec, *p; |
3618 | int sec_size; |
3619 | u64 sec_start; |
3620 | int feat; |
3621 | int err; |
3622 | |
3623 | nr_sections = bitmap_weight(src: header->adds_features, nbits: HEADER_FEAT_BITS); |
3624 | if (!nr_sections) |
3625 | return 0; |
3626 | |
3627 | feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); |
3628 | if (feat_sec == NULL) |
3629 | return -ENOMEM; |
3630 | |
3631 | sec_size = sizeof(*feat_sec) * nr_sections; |
3632 | |
3633 | sec_start = header->feat_offset; |
3634 | lseek(fd, sec_start + sec_size, SEEK_SET); |
3635 | |
3636 | for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { |
3637 | if (do_write_feat(ff: &ff, type: feat, p: &p, evlist, fc)) |
3638 | perf_header__clear_feat(header, feat); |
3639 | } |
3640 | |
3641 | lseek(fd, sec_start, SEEK_SET); |
3642 | /* |
3643 | * may write more than needed due to dropped feature, but |
3644 | * this is okay, reader will skip the missing entries |
3645 | */ |
3646 | err = do_write(ff: &ff, buf: feat_sec, size: sec_size); |
3647 | if (err < 0) |
3648 | pr_debug("failed to write feature section\n" ); |
3649 | free(ff.buf); /* TODO: added to silence clang-tidy. */ |
3650 | free(feat_sec); |
3651 | return err; |
3652 | } |
3653 | |
3654 | int (int fd) |
3655 | { |
3656 | struct perf_pipe_file_header ; |
3657 | struct feat_fd ff = { |
3658 | .fd = fd, |
3659 | }; |
3660 | int err; |
3661 | |
3662 | f_header = (struct perf_pipe_file_header){ |
3663 | .magic = PERF_MAGIC, |
3664 | .size = sizeof(f_header), |
3665 | }; |
3666 | |
3667 | err = do_write(ff: &ff, buf: &f_header, size: sizeof(f_header)); |
3668 | if (err < 0) { |
3669 | pr_debug("failed to write perf pipe header\n" ); |
3670 | return err; |
3671 | } |
3672 | free(ff.buf); |
3673 | return 0; |
3674 | } |
3675 | |
3676 | static int (struct perf_session *session, |
3677 | struct evlist *evlist, |
3678 | int fd, bool at_exit, |
3679 | struct feat_copier *fc) |
3680 | { |
3681 | struct perf_file_header ; |
3682 | struct perf_file_attr f_attr; |
3683 | struct perf_header * = &session->header; |
3684 | struct evsel *evsel; |
3685 | struct feat_fd ff = { |
3686 | .fd = fd, |
3687 | }; |
3688 | u64 attr_offset; |
3689 | int err; |
3690 | |
3691 | lseek(fd, sizeof(f_header), SEEK_SET); |
3692 | |
3693 | evlist__for_each_entry(session->evlist, evsel) { |
3694 | evsel->id_offset = lseek(fd, 0, SEEK_CUR); |
3695 | err = do_write(ff: &ff, buf: evsel->core.id, size: evsel->core.ids * sizeof(u64)); |
3696 | if (err < 0) { |
3697 | pr_debug("failed to write perf header\n" ); |
3698 | free(ff.buf); |
3699 | return err; |
3700 | } |
3701 | } |
3702 | |
3703 | attr_offset = lseek(ff.fd, 0, SEEK_CUR); |
3704 | |
3705 | evlist__for_each_entry(evlist, evsel) { |
3706 | if (evsel->core.attr.size < sizeof(evsel->core.attr)) { |
3707 | /* |
3708 | * We are likely in "perf inject" and have read |
3709 | * from an older file. Update attr size so that |
3710 | * reader gets the right offset to the ids. |
3711 | */ |
3712 | evsel->core.attr.size = sizeof(evsel->core.attr); |
3713 | } |
3714 | f_attr = (struct perf_file_attr){ |
3715 | .attr = evsel->core.attr, |
3716 | .ids = { |
3717 | .offset = evsel->id_offset, |
3718 | .size = evsel->core.ids * sizeof(u64), |
3719 | } |
3720 | }; |
3721 | err = do_write(ff: &ff, buf: &f_attr, size: sizeof(f_attr)); |
3722 | if (err < 0) { |
3723 | pr_debug("failed to write perf header attribute\n" ); |
3724 | free(ff.buf); |
3725 | return err; |
3726 | } |
3727 | } |
3728 | |
3729 | if (!header->data_offset) |
3730 | header->data_offset = lseek(fd, 0, SEEK_CUR); |
3731 | header->feat_offset = header->data_offset + header->data_size; |
3732 | |
3733 | if (at_exit) { |
3734 | err = perf_header__adds_write(header, evlist, fd, fc); |
3735 | if (err < 0) { |
3736 | free(ff.buf); |
3737 | return err; |
3738 | } |
3739 | } |
3740 | |
3741 | f_header = (struct perf_file_header){ |
3742 | .magic = PERF_MAGIC, |
3743 | .size = sizeof(f_header), |
3744 | .attr_size = sizeof(f_attr), |
3745 | .attrs = { |
3746 | .offset = attr_offset, |
3747 | .size = evlist->core.nr_entries * sizeof(f_attr), |
3748 | }, |
3749 | .data = { |
3750 | .offset = header->data_offset, |
3751 | .size = header->data_size, |
3752 | }, |
3753 | /* event_types is ignored, store zeros */ |
3754 | }; |
3755 | |
3756 | memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); |
3757 | |
3758 | lseek(fd, 0, SEEK_SET); |
3759 | err = do_write(ff: &ff, buf: &f_header, size: sizeof(f_header)); |
3760 | free(ff.buf); |
3761 | if (err < 0) { |
3762 | pr_debug("failed to write perf header\n" ); |
3763 | return err; |
3764 | } |
3765 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); |
3766 | |
3767 | return 0; |
3768 | } |
3769 | |
3770 | int (struct perf_session *session, |
3771 | struct evlist *evlist, |
3772 | int fd, bool at_exit) |
3773 | { |
3774 | return perf_session__do_write_header(session, evlist, fd, at_exit, NULL); |
3775 | } |
3776 | |
3777 | size_t perf_session__data_offset(const struct evlist *evlist) |
3778 | { |
3779 | struct evsel *evsel; |
3780 | size_t data_offset; |
3781 | |
3782 | data_offset = sizeof(struct perf_file_header); |
3783 | evlist__for_each_entry(evlist, evsel) { |
3784 | data_offset += evsel->core.ids * sizeof(u64); |
3785 | } |
3786 | data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr); |
3787 | |
3788 | return data_offset; |
3789 | } |
3790 | |
3791 | int (struct perf_session *session, |
3792 | struct evlist *evlist, |
3793 | int fd, |
3794 | struct feat_copier *fc) |
3795 | { |
3796 | return perf_session__do_write_header(session, evlist, fd, at_exit: true, fc); |
3797 | } |
3798 | |
3799 | static int (struct perf_header *, |
3800 | int fd, void *buf, size_t size) |
3801 | { |
3802 | if (readn(fd, buf, size) <= 0) |
3803 | return -1; |
3804 | |
3805 | if (header->needs_swap) |
3806 | mem_bswap_64(src: buf, byte_size: size); |
3807 | |
3808 | return 0; |
3809 | } |
3810 | |
3811 | int (struct perf_header *, int fd, |
3812 | void *data, |
3813 | int (*process)(struct perf_file_section *section, |
3814 | struct perf_header *ph, |
3815 | int feat, int fd, void *data)) |
3816 | { |
3817 | struct perf_file_section *feat_sec, *sec; |
3818 | int nr_sections; |
3819 | int sec_size; |
3820 | int feat; |
3821 | int err; |
3822 | |
3823 | nr_sections = bitmap_weight(src: header->adds_features, nbits: HEADER_FEAT_BITS); |
3824 | if (!nr_sections) |
3825 | return 0; |
3826 | |
3827 | feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); |
3828 | if (!feat_sec) |
3829 | return -1; |
3830 | |
3831 | sec_size = sizeof(*feat_sec) * nr_sections; |
3832 | |
3833 | lseek(fd, header->feat_offset, SEEK_SET); |
3834 | |
3835 | err = perf_header__getbuffer64(header, fd, buf: feat_sec, size: sec_size); |
3836 | if (err < 0) |
3837 | goto out_free; |
3838 | |
3839 | for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { |
3840 | err = process(sec++, header, feat, fd, data); |
3841 | if (err < 0) |
3842 | goto out_free; |
3843 | } |
3844 | err = 0; |
3845 | out_free: |
3846 | free(feat_sec); |
3847 | return err; |
3848 | } |
3849 | |
3850 | static const int attr_file_abi_sizes[] = { |
3851 | [0] = PERF_ATTR_SIZE_VER0, |
3852 | [1] = PERF_ATTR_SIZE_VER1, |
3853 | [2] = PERF_ATTR_SIZE_VER2, |
3854 | [3] = PERF_ATTR_SIZE_VER3, |
3855 | [4] = PERF_ATTR_SIZE_VER4, |
3856 | 0, |
3857 | }; |
3858 | |
3859 | /* |
3860 | * In the legacy file format, the magic number is not used to encode endianness. |
3861 | * hdr_sz was used to encode endianness. But given that hdr_sz can vary based |
3862 | * on ABI revisions, we need to try all combinations for all endianness to |
3863 | * detect the endianness. |
3864 | */ |
3865 | static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) |
3866 | { |
3867 | uint64_t ref_size, attr_size; |
3868 | int i; |
3869 | |
3870 | for (i = 0 ; attr_file_abi_sizes[i]; i++) { |
3871 | ref_size = attr_file_abi_sizes[i] |
3872 | + sizeof(struct perf_file_section); |
3873 | if (hdr_sz != ref_size) { |
3874 | attr_size = bswap_64(hdr_sz); |
3875 | if (attr_size != ref_size) |
3876 | continue; |
3877 | |
3878 | ph->needs_swap = true; |
3879 | } |
3880 | pr_debug("ABI%d perf.data file detected, need_swap=%d\n" , |
3881 | i, |
3882 | ph->needs_swap); |
3883 | return 0; |
3884 | } |
3885 | /* could not determine endianness */ |
3886 | return -1; |
3887 | } |
3888 | |
3889 | #define PERF_PIPE_HDR_VER0 16 |
3890 | |
3891 | static const size_t attr_pipe_abi_sizes[] = { |
3892 | [0] = PERF_PIPE_HDR_VER0, |
3893 | 0, |
3894 | }; |
3895 | |
3896 | /* |
3897 | * In the legacy pipe format, there is an implicit assumption that endianness |
3898 | * between host recording the samples, and host parsing the samples is the |
3899 | * same. This is not always the case given that the pipe output may always be |
3900 | * redirected into a file and analyzed on a different machine with possibly a |
3901 | * different endianness and perf_event ABI revisions in the perf tool itself. |
3902 | */ |
3903 | static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) |
3904 | { |
3905 | u64 attr_size; |
3906 | int i; |
3907 | |
3908 | for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { |
3909 | if (hdr_sz != attr_pipe_abi_sizes[i]) { |
3910 | attr_size = bswap_64(hdr_sz); |
3911 | if (attr_size != hdr_sz) |
3912 | continue; |
3913 | |
3914 | ph->needs_swap = true; |
3915 | } |
3916 | pr_debug("Pipe ABI%d perf.data file detected\n" , i); |
3917 | return 0; |
3918 | } |
3919 | return -1; |
3920 | } |
3921 | |
3922 | bool is_perf_magic(u64 magic) |
3923 | { |
3924 | if (!memcmp(p: &magic, q: __perf_magic1, size: sizeof(magic)) |
3925 | || magic == __perf_magic2 |
3926 | || magic == __perf_magic2_sw) |
3927 | return true; |
3928 | |
3929 | return false; |
3930 | } |
3931 | |
3932 | static int check_magic_endian(u64 magic, uint64_t hdr_sz, |
3933 | bool is_pipe, struct perf_header *ph) |
3934 | { |
3935 | int ret; |
3936 | |
3937 | /* check for legacy format */ |
3938 | ret = memcmp(p: &magic, q: __perf_magic1, size: sizeof(magic)); |
3939 | if (ret == 0) { |
3940 | ph->version = PERF_HEADER_VERSION_1; |
3941 | pr_debug("legacy perf.data format\n" ); |
3942 | if (is_pipe) |
3943 | return try_all_pipe_abis(hdr_sz, ph); |
3944 | |
3945 | return try_all_file_abis(hdr_sz, ph); |
3946 | } |
3947 | /* |
3948 | * the new magic number serves two purposes: |
3949 | * - unique number to identify actual perf.data files |
3950 | * - encode endianness of file |
3951 | */ |
3952 | ph->version = PERF_HEADER_VERSION_2; |
3953 | |
3954 | /* check magic number with one endianness */ |
3955 | if (magic == __perf_magic2) |
3956 | return 0; |
3957 | |
3958 | /* check magic number with opposite endianness */ |
3959 | if (magic != __perf_magic2_sw) |
3960 | return -1; |
3961 | |
3962 | ph->needs_swap = true; |
3963 | |
3964 | return 0; |
3965 | } |
3966 | |
3967 | int (struct perf_file_header *, |
3968 | struct perf_header *ph, int fd) |
3969 | { |
3970 | ssize_t ret; |
3971 | |
3972 | lseek(fd, 0, SEEK_SET); |
3973 | |
3974 | ret = readn(fd, header, sizeof(*header)); |
3975 | if (ret <= 0) |
3976 | return -1; |
3977 | |
3978 | if (check_magic_endian(magic: header->magic, |
3979 | hdr_sz: header->attr_size, is_pipe: false, ph) < 0) { |
3980 | pr_debug("magic/endian check failed\n" ); |
3981 | return -1; |
3982 | } |
3983 | |
3984 | if (ph->needs_swap) { |
3985 | mem_bswap_64(src: header, offsetof(struct perf_file_header, |
3986 | adds_features)); |
3987 | } |
3988 | |
3989 | if (header->size != sizeof(*header)) { |
3990 | /* Support the previous format */ |
3991 | if (header->size == offsetof(typeof(*header), adds_features)) |
3992 | bitmap_zero(dst: header->adds_features, nbits: HEADER_FEAT_BITS); |
3993 | else |
3994 | return -1; |
3995 | } else if (ph->needs_swap) { |
3996 | /* |
3997 | * feature bitmap is declared as an array of unsigned longs -- |
3998 | * not good since its size can differ between the host that |
3999 | * generated the data file and the host analyzing the file. |
4000 | * |
4001 | * We need to handle endianness, but we don't know the size of |
4002 | * the unsigned long where the file was generated. Take a best |
4003 | * guess at determining it: try 64-bit swap first (ie., file |
4004 | * created on a 64-bit host), and check if the hostname feature |
4005 | * bit is set (this feature bit is forced on as of fbe96f2). |
4006 | * If the bit is not, undo the 64-bit swap and try a 32-bit |
4007 | * swap. If the hostname bit is still not set (e.g., older data |
4008 | * file), punt and fallback to the original behavior -- |
4009 | * clearing all feature bits and setting buildid. |
4010 | */ |
4011 | mem_bswap_64(src: &header->adds_features, |
4012 | BITS_TO_U64(HEADER_FEAT_BITS)); |
4013 | |
4014 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { |
4015 | /* unswap as u64 */ |
4016 | mem_bswap_64(src: &header->adds_features, |
4017 | BITS_TO_U64(HEADER_FEAT_BITS)); |
4018 | |
4019 | /* unswap as u32 */ |
4020 | mem_bswap_32(src: &header->adds_features, |
4021 | BITS_TO_U32(HEADER_FEAT_BITS)); |
4022 | } |
4023 | |
4024 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { |
4025 | bitmap_zero(dst: header->adds_features, nbits: HEADER_FEAT_BITS); |
4026 | __set_bit(HEADER_BUILD_ID, header->adds_features); |
4027 | } |
4028 | } |
4029 | |
4030 | memcpy(&ph->adds_features, &header->adds_features, |
4031 | sizeof(ph->adds_features)); |
4032 | |
4033 | ph->data_offset = header->data.offset; |
4034 | ph->data_size = header->data.size; |
4035 | ph->feat_offset = header->data.offset + header->data.size; |
4036 | return 0; |
4037 | } |
4038 | |
4039 | static int perf_file_section__process(struct perf_file_section *section, |
4040 | struct perf_header *ph, |
4041 | int feat, int fd, void *data) |
4042 | { |
4043 | struct feat_fd fdd = { |
4044 | .fd = fd, |
4045 | .ph = ph, |
4046 | .size = section->size, |
4047 | .offset = section->offset, |
4048 | }; |
4049 | |
4050 | if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { |
4051 | pr_debug("Failed to lseek to %" PRIu64 " offset for feature " |
4052 | "%d, continuing...\n" , section->offset, feat); |
4053 | return 0; |
4054 | } |
4055 | |
4056 | if (feat >= HEADER_LAST_FEATURE) { |
4057 | pr_debug("unknown feature %d, continuing...\n" , feat); |
4058 | return 0; |
4059 | } |
4060 | |
4061 | if (!feat_ops[feat].process) |
4062 | return 0; |
4063 | |
4064 | return feat_ops[feat].process(&fdd, data); |
4065 | } |
4066 | |
4067 | static int (struct perf_pipe_file_header *, |
4068 | struct perf_header *ph, |
4069 | struct perf_data* data, |
4070 | bool repipe, int repipe_fd) |
4071 | { |
4072 | struct feat_fd ff = { |
4073 | .fd = repipe_fd, |
4074 | .ph = ph, |
4075 | }; |
4076 | ssize_t ret; |
4077 | |
4078 | ret = perf_data__read(data, buf: header, size: sizeof(*header)); |
4079 | if (ret <= 0) |
4080 | return -1; |
4081 | |
4082 | if (check_magic_endian(magic: header->magic, hdr_sz: header->size, is_pipe: true, ph) < 0) { |
4083 | pr_debug("endian/magic failed\n" ); |
4084 | return -1; |
4085 | } |
4086 | |
4087 | if (ph->needs_swap) |
4088 | header->size = bswap_64(header->size); |
4089 | |
4090 | if (repipe && do_write(ff: &ff, buf: header, size: sizeof(*header)) < 0) |
4091 | return -1; |
4092 | |
4093 | return 0; |
4094 | } |
4095 | |
4096 | static int (struct perf_session *session, int repipe_fd) |
4097 | { |
4098 | struct perf_header * = &session->header; |
4099 | struct perf_pipe_file_header ; |
4100 | |
4101 | if (perf_file_header__read_pipe(header: &f_header, ph: header, data: session->data, |
4102 | repipe: session->repipe, repipe_fd) < 0) { |
4103 | pr_debug("incompatible file format\n" ); |
4104 | return -EINVAL; |
4105 | } |
4106 | |
4107 | return f_header.size == sizeof(f_header) ? 0 : -1; |
4108 | } |
4109 | |
4110 | static int read_attr(int fd, struct perf_header *ph, |
4111 | struct perf_file_attr *f_attr) |
4112 | { |
4113 | struct perf_event_attr *attr = &f_attr->attr; |
4114 | size_t sz, left; |
4115 | size_t our_sz = sizeof(f_attr->attr); |
4116 | ssize_t ret; |
4117 | |
4118 | memset(f_attr, 0, sizeof(*f_attr)); |
4119 | |
4120 | /* read minimal guaranteed structure */ |
4121 | ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); |
4122 | if (ret <= 0) { |
4123 | pr_debug("cannot read %d bytes of header attr\n" , |
4124 | PERF_ATTR_SIZE_VER0); |
4125 | return -1; |
4126 | } |
4127 | |
4128 | /* on file perf_event_attr size */ |
4129 | sz = attr->size; |
4130 | |
4131 | if (ph->needs_swap) |
4132 | sz = bswap_32(sz); |
4133 | |
4134 | if (sz == 0) { |
4135 | /* assume ABI0 */ |
4136 | sz = PERF_ATTR_SIZE_VER0; |
4137 | } else if (sz > our_sz) { |
4138 | pr_debug("file uses a more recent and unsupported ABI" |
4139 | " (%zu bytes extra)\n" , sz - our_sz); |
4140 | return -1; |
4141 | } |
4142 | /* what we have not yet read and that we know about */ |
4143 | left = sz - PERF_ATTR_SIZE_VER0; |
4144 | if (left) { |
4145 | void *ptr = attr; |
4146 | ptr += PERF_ATTR_SIZE_VER0; |
4147 | |
4148 | ret = readn(fd, ptr, left); |
4149 | } |
4150 | /* read perf_file_section, ids are read in caller */ |
4151 | ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); |
4152 | |
4153 | return ret <= 0 ? -1 : 0; |
4154 | } |
4155 | |
4156 | #ifdef HAVE_LIBTRACEEVENT |
4157 | static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent) |
4158 | { |
4159 | struct tep_event *event; |
4160 | char bf[128]; |
4161 | |
4162 | /* already prepared */ |
4163 | if (evsel->tp_format) |
4164 | return 0; |
4165 | |
4166 | if (pevent == NULL) { |
4167 | pr_debug("broken or missing trace data\n" ); |
4168 | return -1; |
4169 | } |
4170 | |
4171 | event = tep_find_event(pevent, evsel->core.attr.config); |
4172 | if (event == NULL) { |
4173 | pr_debug("cannot find event format for %d\n" , (int)evsel->core.attr.config); |
4174 | return -1; |
4175 | } |
4176 | |
4177 | if (!evsel->name) { |
4178 | snprintf(bf, sizeof(bf), "%s:%s" , event->system, event->name); |
4179 | evsel->name = strdup(bf); |
4180 | if (evsel->name == NULL) |
4181 | return -1; |
4182 | } |
4183 | |
4184 | evsel->tp_format = event; |
4185 | return 0; |
4186 | } |
4187 | |
4188 | static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent) |
4189 | { |
4190 | struct evsel *pos; |
4191 | |
4192 | evlist__for_each_entry(evlist, pos) { |
4193 | if (pos->core.attr.type == PERF_TYPE_TRACEPOINT && |
4194 | evsel__prepare_tracepoint_event(pos, pevent)) |
4195 | return -1; |
4196 | } |
4197 | |
4198 | return 0; |
4199 | } |
4200 | #endif |
4201 | |
4202 | int (struct perf_session *session, int repipe_fd) |
4203 | { |
4204 | struct perf_data *data = session->data; |
4205 | struct perf_header * = &session->header; |
4206 | struct perf_file_header ; |
4207 | struct perf_file_attr f_attr; |
4208 | u64 f_id; |
4209 | int nr_attrs, nr_ids, i, j, err; |
4210 | int fd = perf_data__fd(data); |
4211 | |
4212 | session->evlist = evlist__new(); |
4213 | if (session->evlist == NULL) |
4214 | return -ENOMEM; |
4215 | |
4216 | session->evlist->env = &header->env; |
4217 | session->machines.host.env = &header->env; |
4218 | |
4219 | /* |
4220 | * We can read 'pipe' data event from regular file, |
4221 | * check for the pipe header regardless of source. |
4222 | */ |
4223 | err = perf_header__read_pipe(session, repipe_fd); |
4224 | if (!err || perf_data__is_pipe(data)) { |
4225 | data->is_pipe = true; |
4226 | return err; |
4227 | } |
4228 | |
4229 | if (perf_file_header__read(header: &f_header, ph: header, fd) < 0) |
4230 | return -EINVAL; |
4231 | |
4232 | if (header->needs_swap && data->in_place_update) { |
4233 | pr_err("In-place update not supported when byte-swapping is required\n" ); |
4234 | return -EINVAL; |
4235 | } |
4236 | |
4237 | /* |
4238 | * Sanity check that perf.data was written cleanly; data size is |
4239 | * initialized to 0 and updated only if the on_exit function is run. |
4240 | * If data size is still 0 then the file contains only partial |
4241 | * information. Just warn user and process it as much as it can. |
4242 | */ |
4243 | if (f_header.data.size == 0) { |
4244 | pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" |
4245 | "Was the 'perf record' command properly terminated?\n" , |
4246 | data->file.path); |
4247 | } |
4248 | |
4249 | if (f_header.attr_size == 0) { |
4250 | pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" |
4251 | "Was the 'perf record' command properly terminated?\n" , |
4252 | data->file.path); |
4253 | return -EINVAL; |
4254 | } |
4255 | |
4256 | nr_attrs = f_header.attrs.size / f_header.attr_size; |
4257 | lseek(fd, f_header.attrs.offset, SEEK_SET); |
4258 | |
4259 | for (i = 0; i < nr_attrs; i++) { |
4260 | struct evsel *evsel; |
4261 | off_t tmp; |
4262 | |
4263 | if (read_attr(fd, ph: header, f_attr: &f_attr) < 0) |
4264 | goto out_errno; |
4265 | |
4266 | if (header->needs_swap) { |
4267 | f_attr.ids.size = bswap_64(f_attr.ids.size); |
4268 | f_attr.ids.offset = bswap_64(f_attr.ids.offset); |
4269 | perf_event__attr_swap(attr: &f_attr.attr); |
4270 | } |
4271 | |
4272 | tmp = lseek(fd, 0, SEEK_CUR); |
4273 | evsel = evsel__new(attr: &f_attr.attr); |
4274 | |
4275 | if (evsel == NULL) |
4276 | goto out_delete_evlist; |
4277 | |
4278 | evsel->needs_swap = header->needs_swap; |
4279 | /* |
4280 | * Do it before so that if perf_evsel__alloc_id fails, this |
4281 | * entry gets purged too at evlist__delete(). |
4282 | */ |
4283 | evlist__add(evlist: session->evlist, entry: evsel); |
4284 | |
4285 | nr_ids = f_attr.ids.size / sizeof(u64); |
4286 | /* |
4287 | * We don't have the cpu and thread maps on the header, so |
4288 | * for allocating the perf_sample_id table we fake 1 cpu and |
4289 | * hattr->ids threads. |
4290 | */ |
4291 | if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids)) |
4292 | goto out_delete_evlist; |
4293 | |
4294 | lseek(fd, f_attr.ids.offset, SEEK_SET); |
4295 | |
4296 | for (j = 0; j < nr_ids; j++) { |
4297 | if (perf_header__getbuffer64(header, fd, buf: &f_id, size: sizeof(f_id))) |
4298 | goto out_errno; |
4299 | |
4300 | perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id); |
4301 | } |
4302 | |
4303 | lseek(fd, tmp, SEEK_SET); |
4304 | } |
4305 | |
4306 | #ifdef HAVE_LIBTRACEEVENT |
4307 | perf_header__process_sections(header, fd, &session->tevent, |
4308 | perf_file_section__process); |
4309 | |
4310 | if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent)) |
4311 | goto out_delete_evlist; |
4312 | #else |
4313 | perf_header__process_sections(header, fd, NULL, process: perf_file_section__process); |
4314 | #endif |
4315 | |
4316 | return 0; |
4317 | out_errno: |
4318 | return -errno; |
4319 | |
4320 | out_delete_evlist: |
4321 | evlist__delete(evlist: session->evlist); |
4322 | session->evlist = NULL; |
4323 | return -ENOMEM; |
4324 | } |
4325 | |
4326 | int perf_event__process_feature(struct perf_session *session, |
4327 | union perf_event *event) |
4328 | { |
4329 | struct perf_tool *tool = session->tool; |
4330 | struct feat_fd ff = { .fd = 0 }; |
4331 | struct *fe = (struct perf_record_header_feature *)event; |
4332 | int type = fe->header.type; |
4333 | u64 feat = fe->feat_id; |
4334 | int ret = 0; |
4335 | |
4336 | if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { |
4337 | pr_warning("invalid record type %d in pipe-mode\n" , type); |
4338 | return 0; |
4339 | } |
4340 | if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { |
4341 | pr_warning("invalid record type %d in pipe-mode\n" , type); |
4342 | return -1; |
4343 | } |
4344 | |
4345 | if (!feat_ops[feat].process) |
4346 | return 0; |
4347 | |
4348 | ff.buf = (void *)fe->data; |
4349 | ff.size = event->header.size - sizeof(*fe); |
4350 | ff.ph = &session->header; |
4351 | |
4352 | if (feat_ops[feat].process(&ff, NULL)) { |
4353 | ret = -1; |
4354 | goto out; |
4355 | } |
4356 | |
4357 | if (!feat_ops[feat].print || !tool->show_feat_hdr) |
4358 | goto out; |
4359 | |
4360 | if (!feat_ops[feat].full_only || |
4361 | tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { |
4362 | feat_ops[feat].print(&ff, stdout); |
4363 | } else { |
4364 | fprintf(stdout, "# %s info available, use -I to display\n" , |
4365 | feat_ops[feat].name); |
4366 | } |
4367 | out: |
4368 | free_event_desc(events: ff.events); |
4369 | return ret; |
4370 | } |
4371 | |
4372 | size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) |
4373 | { |
4374 | struct perf_record_event_update *ev = &event->event_update; |
4375 | struct perf_cpu_map *map; |
4376 | size_t ret; |
4377 | |
4378 | ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n" , ev->id); |
4379 | |
4380 | switch (ev->type) { |
4381 | case PERF_EVENT_UPDATE__SCALE: |
4382 | ret += fprintf(fp, "... scale: %f\n" , ev->scale.scale); |
4383 | break; |
4384 | case PERF_EVENT_UPDATE__UNIT: |
4385 | ret += fprintf(fp, "... unit: %s\n" , ev->unit); |
4386 | break; |
4387 | case PERF_EVENT_UPDATE__NAME: |
4388 | ret += fprintf(fp, "... name: %s\n" , ev->name); |
4389 | break; |
4390 | case PERF_EVENT_UPDATE__CPUS: |
4391 | ret += fprintf(fp, "... " ); |
4392 | |
4393 | map = cpu_map__new_data(data: &ev->cpus.cpus); |
4394 | if (map) { |
4395 | ret += cpu_map__fprintf(map, fp); |
4396 | perf_cpu_map__put(map); |
4397 | } else |
4398 | ret += fprintf(fp, "failed to get cpus\n" ); |
4399 | break; |
4400 | default: |
4401 | ret += fprintf(fp, "... unknown type\n" ); |
4402 | break; |
4403 | } |
4404 | |
4405 | return ret; |
4406 | } |
4407 | |
4408 | int perf_event__process_attr(struct perf_tool *tool __maybe_unused, |
4409 | union perf_event *event, |
4410 | struct evlist **pevlist) |
4411 | { |
4412 | u32 i, n_ids; |
4413 | u64 *ids; |
4414 | struct evsel *evsel; |
4415 | struct evlist *evlist = *pevlist; |
4416 | |
4417 | if (evlist == NULL) { |
4418 | *pevlist = evlist = evlist__new(); |
4419 | if (evlist == NULL) |
4420 | return -ENOMEM; |
4421 | } |
4422 | |
4423 | evsel = evsel__new(attr: &event->attr.attr); |
4424 | if (evsel == NULL) |
4425 | return -ENOMEM; |
4426 | |
4427 | evlist__add(evlist, entry: evsel); |
4428 | |
4429 | n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size; |
4430 | n_ids = n_ids / sizeof(u64); |
4431 | /* |
4432 | * We don't have the cpu and thread maps on the header, so |
4433 | * for allocating the perf_sample_id table we fake 1 cpu and |
4434 | * hattr->ids threads. |
4435 | */ |
4436 | if (perf_evsel__alloc_id(&evsel->core, 1, n_ids)) |
4437 | return -ENOMEM; |
4438 | |
4439 | ids = perf_record_header_attr_id(event); |
4440 | for (i = 0; i < n_ids; i++) { |
4441 | perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]); |
4442 | } |
4443 | |
4444 | return 0; |
4445 | } |
4446 | |
4447 | int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, |
4448 | union perf_event *event, |
4449 | struct evlist **pevlist) |
4450 | { |
4451 | struct perf_record_event_update *ev = &event->event_update; |
4452 | struct evlist *evlist; |
4453 | struct evsel *evsel; |
4454 | struct perf_cpu_map *map; |
4455 | |
4456 | if (dump_trace) |
4457 | perf_event__fprintf_event_update(event, stdout); |
4458 | |
4459 | if (!pevlist || *pevlist == NULL) |
4460 | return -EINVAL; |
4461 | |
4462 | evlist = *pevlist; |
4463 | |
4464 | evsel = evlist__id2evsel(evlist, id: ev->id); |
4465 | if (evsel == NULL) |
4466 | return -EINVAL; |
4467 | |
4468 | switch (ev->type) { |
4469 | case PERF_EVENT_UPDATE__UNIT: |
4470 | free((char *)evsel->unit); |
4471 | evsel->unit = strdup(ev->unit); |
4472 | break; |
4473 | case PERF_EVENT_UPDATE__NAME: |
4474 | free(evsel->name); |
4475 | evsel->name = strdup(ev->name); |
4476 | break; |
4477 | case PERF_EVENT_UPDATE__SCALE: |
4478 | evsel->scale = ev->scale.scale; |
4479 | break; |
4480 | case PERF_EVENT_UPDATE__CPUS: |
4481 | map = cpu_map__new_data(data: &ev->cpus.cpus); |
4482 | if (map) { |
4483 | perf_cpu_map__put(evsel->core.own_cpus); |
4484 | evsel->core.own_cpus = map; |
4485 | } else |
4486 | pr_err("failed to get event_update cpus\n" ); |
4487 | default: |
4488 | break; |
4489 | } |
4490 | |
4491 | return 0; |
4492 | } |
4493 | |
4494 | #ifdef HAVE_LIBTRACEEVENT |
4495 | int perf_event__process_tracing_data(struct perf_session *session, |
4496 | union perf_event *event) |
4497 | { |
4498 | ssize_t size_read, padding, size = event->tracing_data.size; |
4499 | int fd = perf_data__fd(session->data); |
4500 | char buf[BUFSIZ]; |
4501 | |
4502 | /* |
4503 | * The pipe fd is already in proper place and in any case |
4504 | * we can't move it, and we'd screw the case where we read |
4505 | * 'pipe' data from regular file. The trace_report reads |
4506 | * data from 'fd' so we need to set it directly behind the |
4507 | * event, where the tracing data starts. |
4508 | */ |
4509 | if (!perf_data__is_pipe(session->data)) { |
4510 | off_t offset = lseek(fd, 0, SEEK_CUR); |
4511 | |
4512 | /* setup for reading amidst mmap */ |
4513 | lseek(fd, offset + sizeof(struct perf_record_header_tracing_data), |
4514 | SEEK_SET); |
4515 | } |
4516 | |
4517 | size_read = trace_report(fd, &session->tevent, |
4518 | session->repipe); |
4519 | padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; |
4520 | |
4521 | if (readn(fd, buf, padding) < 0) { |
4522 | pr_err("%s: reading input file" , __func__); |
4523 | return -1; |
4524 | } |
4525 | if (session->repipe) { |
4526 | int retw = write(STDOUT_FILENO, buf, padding); |
4527 | if (retw <= 0 || retw != padding) { |
4528 | pr_err("%s: repiping tracing data padding" , __func__); |
4529 | return -1; |
4530 | } |
4531 | } |
4532 | |
4533 | if (size_read + padding != size) { |
4534 | pr_err("%s: tracing data size mismatch" , __func__); |
4535 | return -1; |
4536 | } |
4537 | |
4538 | evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent); |
4539 | |
4540 | return size_read + padding; |
4541 | } |
4542 | #endif |
4543 | |
4544 | int perf_event__process_build_id(struct perf_session *session, |
4545 | union perf_event *event) |
4546 | { |
4547 | __event_process_build_id(bev: &event->build_id, |
4548 | filename: event->build_id.filename, |
4549 | session); |
4550 | return 0; |
4551 | } |
4552 | |