Warning: This file is not a C or C++ file. It does not have highlighting.

1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * Performance events:
4 *
5 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8 *
9 * Data type definitions, declarations, prototypes.
10 *
11 * Started by: Thomas Gleixner and Ingo Molnar
12 *
13 * For licencing details see kernel-base/COPYING
14 */
15#ifndef _UAPI_LINUX_PERF_EVENT_H
16#define _UAPI_LINUX_PERF_EVENT_H
17
18#include <linux/types.h>
19#include <linux/ioctl.h>
20#include <asm/byteorder.h>
21
22/*
23 * User-space ABI bits:
24 */
25
26/*
27 * attr.type
28 */
29enum perf_type_id {
30 PERF_TYPE_HARDWARE = 0,
31 PERF_TYPE_SOFTWARE = 1,
32 PERF_TYPE_TRACEPOINT = 2,
33 PERF_TYPE_HW_CACHE = 3,
34 PERF_TYPE_RAW = 4,
35 PERF_TYPE_BREAKPOINT = 5,
36
37 PERF_TYPE_MAX, /* non-ABI */
38};
39
40/*
41 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
42 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
43 * AA: hardware event ID
44 * EEEEEEEE: PMU type ID
45 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
46 * BB: hardware cache ID
47 * CC: hardware cache op ID
48 * DD: hardware cache op result ID
49 * EEEEEEEE: PMU type ID
50 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
51 */
52#define PERF_PMU_TYPE_SHIFT 32
53#define PERF_HW_EVENT_MASK 0xffffffff
54
55/*
56 * Generalized performance event event_id types, used by the
57 * attr.event_id parameter of the sys_perf_event_open()
58 * syscall:
59 */
60enum perf_hw_id {
61 /*
62 * Common hardware events, generalized by the kernel:
63 */
64 PERF_COUNT_HW_CPU_CYCLES = 0,
65 PERF_COUNT_HW_INSTRUCTIONS = 1,
66 PERF_COUNT_HW_CACHE_REFERENCES = 2,
67 PERF_COUNT_HW_CACHE_MISSES = 3,
68 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
69 PERF_COUNT_HW_BRANCH_MISSES = 5,
70 PERF_COUNT_HW_BUS_CYCLES = 6,
71 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
72 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
73 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
74
75 PERF_COUNT_HW_MAX, /* non-ABI */
76};
77
78/*
79 * Generalized hardware cache events:
80 *
81 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
82 * { read, write, prefetch } x
83 * { accesses, misses }
84 */
85enum perf_hw_cache_id {
86 PERF_COUNT_HW_CACHE_L1D = 0,
87 PERF_COUNT_HW_CACHE_L1I = 1,
88 PERF_COUNT_HW_CACHE_LL = 2,
89 PERF_COUNT_HW_CACHE_DTLB = 3,
90 PERF_COUNT_HW_CACHE_ITLB = 4,
91 PERF_COUNT_HW_CACHE_BPU = 5,
92 PERF_COUNT_HW_CACHE_NODE = 6,
93
94 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
95};
96
97enum perf_hw_cache_op_id {
98 PERF_COUNT_HW_CACHE_OP_READ = 0,
99 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
100 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
101
102 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
103};
104
105enum perf_hw_cache_op_result_id {
106 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
107 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
108
109 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
110};
111
112/*
113 * Special "software" events provided by the kernel, even if the hardware
114 * does not support performance events. These events measure various
115 * physical and sw events of the kernel (and allow the profiling of them as
116 * well):
117 */
118enum perf_sw_ids {
119 PERF_COUNT_SW_CPU_CLOCK = 0,
120 PERF_COUNT_SW_TASK_CLOCK = 1,
121 PERF_COUNT_SW_PAGE_FAULTS = 2,
122 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
123 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
124 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
125 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
126 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
127 PERF_COUNT_SW_EMULATION_FAULTS = 8,
128 PERF_COUNT_SW_DUMMY = 9,
129 PERF_COUNT_SW_BPF_OUTPUT = 10,
130 PERF_COUNT_SW_CGROUP_SWITCHES = 11,
131
132 PERF_COUNT_SW_MAX, /* non-ABI */
133};
134
135/*
136 * Bits that can be set in attr.sample_type to request information
137 * in the overflow packets.
138 */
139enum perf_event_sample_format {
140 PERF_SAMPLE_IP = 1U << 0,
141 PERF_SAMPLE_TID = 1U << 1,
142 PERF_SAMPLE_TIME = 1U << 2,
143 PERF_SAMPLE_ADDR = 1U << 3,
144 PERF_SAMPLE_READ = 1U << 4,
145 PERF_SAMPLE_CALLCHAIN = 1U << 5,
146 PERF_SAMPLE_ID = 1U << 6,
147 PERF_SAMPLE_CPU = 1U << 7,
148 PERF_SAMPLE_PERIOD = 1U << 8,
149 PERF_SAMPLE_STREAM_ID = 1U << 9,
150 PERF_SAMPLE_RAW = 1U << 10,
151 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
152 PERF_SAMPLE_REGS_USER = 1U << 12,
153 PERF_SAMPLE_STACK_USER = 1U << 13,
154 PERF_SAMPLE_WEIGHT = 1U << 14,
155 PERF_SAMPLE_DATA_SRC = 1U << 15,
156 PERF_SAMPLE_IDENTIFIER = 1U << 16,
157 PERF_SAMPLE_TRANSACTION = 1U << 17,
158 PERF_SAMPLE_REGS_INTR = 1U << 18,
159 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
160 PERF_SAMPLE_AUX = 1U << 20,
161 PERF_SAMPLE_CGROUP = 1U << 21,
162 PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
163 PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
164 PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
165
166 PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
167};
168
169#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
170/*
171 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
172 *
173 * If the user does not pass priv level information via branch_sample_type,
174 * the kernel uses the event's priv level. Branch and event priv levels do
175 * not have to match. Branch priv level is checked for permissions.
176 *
177 * The branch types can be combined, however BRANCH_ANY covers all types
178 * of branches and therefore it supersedes all the other types.
179 */
180enum perf_branch_sample_type_shift {
181 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
182 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
183 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
184
185 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
186 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
187 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
188 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
189 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
190 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
191 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
192 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
193
194 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
195 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
196 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
197
198 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
199 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
200
201 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
202
203 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
204
205 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
206
207 PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
208
209 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
210};
211
212enum perf_branch_sample_type {
213 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
214 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
215 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
216
217 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
218 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
219 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
220 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
221 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
222 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
223 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
224 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
225
226 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
227 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
228 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
229
230 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
231 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
232
233 PERF_SAMPLE_BRANCH_TYPE_SAVE =
234 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
235
236 PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
237
238 PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
239
240 PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
241
242 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
243};
244
245/*
246 * Common flow change classification
247 */
248enum {
249 PERF_BR_UNKNOWN = 0, /* unknown */
250 PERF_BR_COND = 1, /* conditional */
251 PERF_BR_UNCOND = 2, /* unconditional */
252 PERF_BR_IND = 3, /* indirect */
253 PERF_BR_CALL = 4, /* function call */
254 PERF_BR_IND_CALL = 5, /* indirect function call */
255 PERF_BR_RET = 6, /* function return */
256 PERF_BR_SYSCALL = 7, /* syscall */
257 PERF_BR_SYSRET = 8, /* syscall return */
258 PERF_BR_COND_CALL = 9, /* conditional function call */
259 PERF_BR_COND_RET = 10, /* conditional function return */
260 PERF_BR_ERET = 11, /* exception return */
261 PERF_BR_IRQ = 12, /* irq */
262 PERF_BR_SERROR = 13, /* system error */
263 PERF_BR_NO_TX = 14, /* not in transaction */
264 PERF_BR_EXTEND_ABI = 15, /* extend ABI */
265 PERF_BR_MAX,
266};
267
268/*
269 * Common branch speculation outcome classification
270 */
271enum {
272 PERF_BR_SPEC_NA = 0, /* Not available */
273 PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
274 PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
275 PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
276 PERF_BR_SPEC_MAX,
277};
278
279enum {
280 PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
281 PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
282 PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
283 PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
284 PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
285 PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
286 PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
287 PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
288 PERF_BR_NEW_MAX,
289};
290
291enum {
292 PERF_BR_PRIV_UNKNOWN = 0,
293 PERF_BR_PRIV_USER = 1,
294 PERF_BR_PRIV_KERNEL = 2,
295 PERF_BR_PRIV_HV = 3,
296};
297
298#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
299#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
300#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
301#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
302#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
303
304#define PERF_SAMPLE_BRANCH_PLM_ALL \
305 (PERF_SAMPLE_BRANCH_USER|\
306 PERF_SAMPLE_BRANCH_KERNEL|\
307 PERF_SAMPLE_BRANCH_HV)
308
309/*
310 * Values to determine ABI of the registers dump.
311 */
312enum perf_sample_regs_abi {
313 PERF_SAMPLE_REGS_ABI_NONE = 0,
314 PERF_SAMPLE_REGS_ABI_32 = 1,
315 PERF_SAMPLE_REGS_ABI_64 = 2,
316};
317
318/*
319 * Values for the memory transaction event qualifier, mostly for
320 * abort events. Multiple bits can be set.
321 */
322enum {
323 PERF_TXN_ELISION = (1 << 0), /* From elision */
324 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
325 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
326 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
327 PERF_TXN_RETRY = (1 << 4), /* Retry possible */
328 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
329 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
330 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
331
332 PERF_TXN_MAX = (1 << 8), /* non-ABI */
333
334 /* bits 32..63 are reserved for the abort code */
335
336 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
337 PERF_TXN_ABORT_SHIFT = 32,
338};
339
340/*
341 * The format of the data returned by read() on a perf event fd,
342 * as specified by attr.read_format:
343 *
344 * struct read_format {
345 * { u64 value;
346 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
347 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
348 * { u64 id; } && PERF_FORMAT_ID
349 * { u64 lost; } && PERF_FORMAT_LOST
350 * } && !PERF_FORMAT_GROUP
351 *
352 * { u64 nr;
353 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
354 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
355 * { u64 value;
356 * { u64 id; } && PERF_FORMAT_ID
357 * { u64 lost; } && PERF_FORMAT_LOST
358 * } cntr[nr];
359 * } && PERF_FORMAT_GROUP
360 * };
361 */
362enum perf_event_read_format {
363 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
364 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
365 PERF_FORMAT_ID = 1U << 2,
366 PERF_FORMAT_GROUP = 1U << 3,
367 PERF_FORMAT_LOST = 1U << 4,
368
369 PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
370};
371
372#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
373#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
374#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
375#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
376 /* add: sample_stack_user */
377#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
378#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
379#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
380#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
381#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
382
383/*
384 * Hardware event_id to monitor via a performance monitoring event:
385 *
386 * @sample_max_stack: Max number of frame pointers in a callchain,
387 * should be < /proc/sys/kernel/perf_event_max_stack
388 */
389struct perf_event_attr {
390
391 /*
392 * Major type: hardware/software/tracepoint/etc.
393 */
394 __u32 type;
395
396 /*
397 * Size of the attr structure, for fwd/bwd compat.
398 */
399 __u32 size;
400
401 /*
402 * Type specific configuration information.
403 */
404 __u64 config;
405
406 union {
407 __u64 sample_period;
408 __u64 sample_freq;
409 };
410
411 __u64 sample_type;
412 __u64 read_format;
413
414 __u64 disabled : 1, /* off by default */
415 inherit : 1, /* children inherit it */
416 pinned : 1, /* must always be on PMU */
417 exclusive : 1, /* only group on PMU */
418 exclude_user : 1, /* don't count user */
419 exclude_kernel : 1, /* ditto kernel */
420 exclude_hv : 1, /* ditto hypervisor */
421 exclude_idle : 1, /* don't count when idle */
422 mmap : 1, /* include mmap data */
423 comm : 1, /* include comm data */
424 freq : 1, /* use freq, not period */
425 inherit_stat : 1, /* per task counts */
426 enable_on_exec : 1, /* next exec enables */
427 task : 1, /* trace fork/exit */
428 watermark : 1, /* wakeup_watermark */
429 /*
430 * precise_ip:
431 *
432 * 0 - SAMPLE_IP can have arbitrary skid
433 * 1 - SAMPLE_IP must have constant skid
434 * 2 - SAMPLE_IP requested to have 0 skid
435 * 3 - SAMPLE_IP must have 0 skid
436 *
437 * See also PERF_RECORD_MISC_EXACT_IP
438 */
439 precise_ip : 2, /* skid constraint */
440 mmap_data : 1, /* non-exec mmap data */
441 sample_id_all : 1, /* sample_type all events */
442
443 exclude_host : 1, /* don't count in host */
444 exclude_guest : 1, /* don't count in guest */
445
446 exclude_callchain_kernel : 1, /* exclude kernel callchains */
447 exclude_callchain_user : 1, /* exclude user callchains */
448 mmap2 : 1, /* include mmap with inode data */
449 comm_exec : 1, /* flag comm events that are due to an exec */
450 use_clockid : 1, /* use @clockid for time fields */
451 context_switch : 1, /* context switch data */
452 write_backward : 1, /* Write ring buffer from end to beginning */
453 namespaces : 1, /* include namespaces data */
454 ksymbol : 1, /* include ksymbol events */
455 bpf_event : 1, /* include bpf events */
456 aux_output : 1, /* generate AUX records instead of events */
457 cgroup : 1, /* include cgroup events */
458 text_poke : 1, /* include text poke events */
459 build_id : 1, /* use build id in mmap2 events */
460 inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
461 remove_on_exec : 1, /* event is removed from task on exec */
462 sigtrap : 1, /* send synchronous SIGTRAP on event */
463 __reserved_1 : 26;
464
465 union {
466 __u32 wakeup_events; /* wakeup every n events */
467 __u32 wakeup_watermark; /* bytes before wakeup */
468 };
469
470 __u32 bp_type;
471 union {
472 __u64 bp_addr;
473 __u64 kprobe_func; /* for perf_kprobe */
474 __u64 uprobe_path; /* for perf_uprobe */
475 __u64 config1; /* extension of config */
476 };
477 union {
478 __u64 bp_len;
479 __u64 kprobe_addr; /* when kprobe_func == NULL */
480 __u64 probe_offset; /* for perf_[k,u]probe */
481 __u64 config2; /* extension of config1 */
482 };
483 __u64 branch_sample_type; /* enum perf_branch_sample_type */
484
485 /*
486 * Defines set of user regs to dump on samples.
487 * See asm/perf_regs.h for details.
488 */
489 __u64 sample_regs_user;
490
491 /*
492 * Defines size of the user stack to dump on samples.
493 */
494 __u32 sample_stack_user;
495
496 __s32 clockid;
497 /*
498 * Defines set of regs to dump for each sample
499 * state captured on:
500 * - precise = 0: PMU interrupt
501 * - precise > 0: sampled instruction
502 *
503 * See asm/perf_regs.h for details.
504 */
505 __u64 sample_regs_intr;
506
507 /*
508 * Wakeup watermark for AUX area
509 */
510 __u32 aux_watermark;
511 __u16 sample_max_stack;
512 __u16 __reserved_2;
513 __u32 aux_sample_size;
514 __u32 __reserved_3;
515
516 /*
517 * User provided data if sigtrap=1, passed back to user via
518 * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
519 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
520 * truncated accordingly on 32 bit architectures.
521 */
522 __u64 sig_data;
523
524 __u64 config3; /* extension of config2 */
525};
526
527/*
528 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
529 * to query bpf programs attached to the same perf tracepoint
530 * as the given perf event.
531 */
532struct perf_event_query_bpf {
533 /*
534 * The below ids array length
535 */
536 __u32 ids_len;
537 /*
538 * Set by the kernel to indicate the number of
539 * available programs
540 */
541 __u32 prog_cnt;
542 /*
543 * User provided buffer to store program ids
544 */
545 __u32 ids[];
546};
547
548/*
549 * Ioctls that can be done on a perf event fd:
550 */
551#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
552#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
553#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
554#define PERF_EVENT_IOC_RESET _IO ('$', 3)
555#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
556#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
557#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
558#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
559#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
560#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
561#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
562#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
563
564enum perf_event_ioc_flags {
565 PERF_IOC_FLAG_GROUP = 1U << 0,
566};
567
568/*
569 * Structure of the page that can be mapped via mmap
570 */
571struct perf_event_mmap_page {
572 __u32 version; /* version number of this structure */
573 __u32 compat_version; /* lowest version this is compat with */
574
575 /*
576 * Bits needed to read the hw events in user-space.
577 *
578 * u32 seq, time_mult, time_shift, index, width;
579 * u64 count, enabled, running;
580 * u64 cyc, time_offset;
581 * s64 pmc = 0;
582 *
583 * do {
584 * seq = pc->lock;
585 * barrier()
586 *
587 * enabled = pc->time_enabled;
588 * running = pc->time_running;
589 *
590 * if (pc->cap_usr_time && enabled != running) {
591 * cyc = rdtsc();
592 * time_offset = pc->time_offset;
593 * time_mult = pc->time_mult;
594 * time_shift = pc->time_shift;
595 * }
596 *
597 * index = pc->index;
598 * count = pc->offset;
599 * if (pc->cap_user_rdpmc && index) {
600 * width = pc->pmc_width;
601 * pmc = rdpmc(index - 1);
602 * }
603 *
604 * barrier();
605 * } while (pc->lock != seq);
606 *
607 * NOTE: for obvious reason this only works on self-monitoring
608 * processes.
609 */
610 __u32 lock; /* seqlock for synchronization */
611 __u32 index; /* hardware event identifier */
612 __s64 offset; /* add to hardware event value */
613 __u64 time_enabled; /* time event active */
614 __u64 time_running; /* time event on cpu */
615 union {
616 __u64 capabilities;
617 struct {
618 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
619 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
620
621 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
622 cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
623 cap_user_time_zero : 1, /* The time_zero field is used */
624 cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
625 cap_____res : 58;
626 };
627 };
628
629 /*
630 * If cap_user_rdpmc this field provides the bit-width of the value
631 * read using the rdpmc() or equivalent instruction. This can be used
632 * to sign extend the result like:
633 *
634 * pmc <<= 64 - width;
635 * pmc >>= 64 - width; // signed shift right
636 * count += pmc;
637 */
638 __u16 pmc_width;
639
640 /*
641 * If cap_usr_time the below fields can be used to compute the time
642 * delta since time_enabled (in ns) using rdtsc or similar.
643 *
644 * u64 quot, rem;
645 * u64 delta;
646 *
647 * quot = (cyc >> time_shift);
648 * rem = cyc & (((u64)1 << time_shift) - 1);
649 * delta = time_offset + quot * time_mult +
650 * ((rem * time_mult) >> time_shift);
651 *
652 * Where time_offset,time_mult,time_shift and cyc are read in the
653 * seqcount loop described above. This delta can then be added to
654 * enabled and possible running (if index), improving the scaling:
655 *
656 * enabled += delta;
657 * if (index)
658 * running += delta;
659 *
660 * quot = count / running;
661 * rem = count % running;
662 * count = quot * enabled + (rem * enabled) / running;
663 */
664 __u16 time_shift;
665 __u32 time_mult;
666 __u64 time_offset;
667 /*
668 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
669 * from sample timestamps.
670 *
671 * time = timestamp - time_zero;
672 * quot = time / time_mult;
673 * rem = time % time_mult;
674 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
675 *
676 * And vice versa:
677 *
678 * quot = cyc >> time_shift;
679 * rem = cyc & (((u64)1 << time_shift) - 1);
680 * timestamp = time_zero + quot * time_mult +
681 * ((rem * time_mult) >> time_shift);
682 */
683 __u64 time_zero;
684
685 __u32 size; /* Header size up to __reserved[] fields. */
686 __u32 __reserved_1;
687
688 /*
689 * If cap_usr_time_short, the hardware clock is less than 64bit wide
690 * and we must compute the 'cyc' value, as used by cap_usr_time, as:
691 *
692 * cyc = time_cycles + ((cyc - time_cycles) & time_mask)
693 *
694 * NOTE: this form is explicitly chosen such that cap_usr_time_short
695 * is a correction on top of cap_usr_time, and code that doesn't
696 * know about cap_usr_time_short still works under the assumption
697 * the counter doesn't wrap.
698 */
699 __u64 time_cycles;
700 __u64 time_mask;
701
702 /*
703 * Hole for extension of the self monitor capabilities
704 */
705
706 __u8 __reserved[116*8]; /* align to 1k. */
707
708 /*
709 * Control data for the mmap() data buffer.
710 *
711 * User-space reading the @data_head value should issue an smp_rmb(),
712 * after reading this value.
713 *
714 * When the mapping is PROT_WRITE the @data_tail value should be
715 * written by userspace to reflect the last read data, after issueing
716 * an smp_mb() to separate the data read from the ->data_tail store.
717 * In this case the kernel will not over-write unread data.
718 *
719 * See perf_output_put_handle() for the data ordering.
720 *
721 * data_{offset,size} indicate the location and size of the perf record
722 * buffer within the mmapped area.
723 */
724 __u64 data_head; /* head in the data section */
725 __u64 data_tail; /* user-space written tail */
726 __u64 data_offset; /* where the buffer starts */
727 __u64 data_size; /* data buffer size */
728
729 /*
730 * AUX area is defined by aux_{offset,size} fields that should be set
731 * by the userspace, so that
732 *
733 * aux_offset >= data_offset + data_size
734 *
735 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
736 *
737 * Ring buffer pointers aux_{head,tail} have the same semantics as
738 * data_{head,tail} and same ordering rules apply.
739 */
740 __u64 aux_head;
741 __u64 aux_tail;
742 __u64 aux_offset;
743 __u64 aux_size;
744};
745
746/*
747 * The current state of perf_event_header::misc bits usage:
748 * ('|' used bit, '-' unused bit)
749 *
750 * 012 CDEF
751 * |||---------||||
752 *
753 * Where:
754 * 0-2 CPUMODE_MASK
755 *
756 * C PROC_MAP_PARSE_TIMEOUT
757 * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
758 * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
759 * F (reserved)
760 */
761
762#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
763#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
764#define PERF_RECORD_MISC_KERNEL (1 << 0)
765#define PERF_RECORD_MISC_USER (2 << 0)
766#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
767#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
768#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
769
770/*
771 * Indicates that /proc/PID/maps parsing are truncated by time out.
772 */
773#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
774/*
775 * Following PERF_RECORD_MISC_* are used on different
776 * events, so can reuse the same bit position:
777 *
778 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
779 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
780 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal)
781 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
782 */
783#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
784#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
785#define PERF_RECORD_MISC_FORK_EXEC (1 << 13)
786#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
787/*
788 * These PERF_RECORD_MISC_* flags below are safely reused
789 * for the following events:
790 *
791 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
792 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
793 * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event
794 *
795 *
796 * PERF_RECORD_MISC_EXACT_IP:
797 * Indicates that the content of PERF_SAMPLE_IP points to
798 * the actual instruction that triggered the event. See also
799 * perf_event_attr::precise_ip.
800 *
801 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
802 * Indicates that thread was preempted in TASK_RUNNING state.
803 *
804 * PERF_RECORD_MISC_MMAP_BUILD_ID:
805 * Indicates that mmap2 event carries build id data.
806 */
807#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
808#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
809#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14)
810/*
811 * Reserve the last bit to indicate some extended misc field
812 */
813#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
814
815struct perf_event_header {
816 __u32 type;
817 __u16 misc;
818 __u16 size;
819};
820
821struct perf_ns_link_info {
822 __u64 dev;
823 __u64 ino;
824};
825
826enum {
827 NET_NS_INDEX = 0,
828 UTS_NS_INDEX = 1,
829 IPC_NS_INDEX = 2,
830 PID_NS_INDEX = 3,
831 USER_NS_INDEX = 4,
832 MNT_NS_INDEX = 5,
833 CGROUP_NS_INDEX = 6,
834
835 NR_NAMESPACES, /* number of available namespaces */
836};
837
838enum perf_event_type {
839
840 /*
841 * If perf_event_attr.sample_id_all is set then all event types will
842 * have the sample_type selected fields related to where/when
843 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
844 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
845 * just after the perf_event_header and the fields already present for
846 * the existing fields, i.e. at the end of the payload. That way a newer
847 * perf.data file will be supported by older perf tools, with these new
848 * optional fields being ignored.
849 *
850 * struct sample_id {
851 * { u32 pid, tid; } && PERF_SAMPLE_TID
852 * { u64 time; } && PERF_SAMPLE_TIME
853 * { u64 id; } && PERF_SAMPLE_ID
854 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
855 * { u32 cpu, res; } && PERF_SAMPLE_CPU
856 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
857 * } && perf_event_attr::sample_id_all
858 *
859 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
860 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
861 * relative to header.size.
862 */
863
864 /*
865 * The MMAP events record the PROT_EXEC mappings so that we can
866 * correlate userspace IPs to code. They have the following structure:
867 *
868 * struct {
869 * struct perf_event_header header;
870 *
871 * u32 pid, tid;
872 * u64 addr;
873 * u64 len;
874 * u64 pgoff;
875 * char filename[];
876 * struct sample_id sample_id;
877 * };
878 */
879 PERF_RECORD_MMAP = 1,
880
881 /*
882 * struct {
883 * struct perf_event_header header;
884 * u64 id;
885 * u64 lost;
886 * struct sample_id sample_id;
887 * };
888 */
889 PERF_RECORD_LOST = 2,
890
891 /*
892 * struct {
893 * struct perf_event_header header;
894 *
895 * u32 pid, tid;
896 * char comm[];
897 * struct sample_id sample_id;
898 * };
899 */
900 PERF_RECORD_COMM = 3,
901
902 /*
903 * struct {
904 * struct perf_event_header header;
905 * u32 pid, ppid;
906 * u32 tid, ptid;
907 * u64 time;
908 * struct sample_id sample_id;
909 * };
910 */
911 PERF_RECORD_EXIT = 4,
912
913 /*
914 * struct {
915 * struct perf_event_header header;
916 * u64 time;
917 * u64 id;
918 * u64 stream_id;
919 * struct sample_id sample_id;
920 * };
921 */
922 PERF_RECORD_THROTTLE = 5,
923 PERF_RECORD_UNTHROTTLE = 6,
924
925 /*
926 * struct {
927 * struct perf_event_header header;
928 * u32 pid, ppid;
929 * u32 tid, ptid;
930 * u64 time;
931 * struct sample_id sample_id;
932 * };
933 */
934 PERF_RECORD_FORK = 7,
935
936 /*
937 * struct {
938 * struct perf_event_header header;
939 * u32 pid, tid;
940 *
941 * struct read_format values;
942 * struct sample_id sample_id;
943 * };
944 */
945 PERF_RECORD_READ = 8,
946
947 /*
948 * struct {
949 * struct perf_event_header header;
950 *
951 * #
952 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
953 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
954 * # is fixed relative to header.
955 * #
956 *
957 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
958 * { u64 ip; } && PERF_SAMPLE_IP
959 * { u32 pid, tid; } && PERF_SAMPLE_TID
960 * { u64 time; } && PERF_SAMPLE_TIME
961 * { u64 addr; } && PERF_SAMPLE_ADDR
962 * { u64 id; } && PERF_SAMPLE_ID
963 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
964 * { u32 cpu, res; } && PERF_SAMPLE_CPU
965 * { u64 period; } && PERF_SAMPLE_PERIOD
966 *
967 * { struct read_format values; } && PERF_SAMPLE_READ
968 *
969 * { u64 nr,
970 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
971 *
972 * #
973 * # The RAW record below is opaque data wrt the ABI
974 * #
975 * # That is, the ABI doesn't make any promises wrt to
976 * # the stability of its content, it may vary depending
977 * # on event, hardware, kernel version and phase of
978 * # the moon.
979 * #
980 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
981 * #
982 *
983 * { u32 size;
984 * char data[size];}&& PERF_SAMPLE_RAW
985 *
986 * { u64 nr;
987 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
988 * { u64 from, to, flags } lbr[nr];
989 * #
990 * # The format of the counters is decided by the
991 * # "branch_counter_nr" and "branch_counter_width",
992 * # which are defined in the ABI.
993 * #
994 * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
995 * } && PERF_SAMPLE_BRANCH_STACK
996 *
997 * { u64 abi; # enum perf_sample_regs_abi
998 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
999 *
1000 * { u64 size;
1001 * char data[size];
1002 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
1003 *
1004 * { union perf_sample_weight
1005 * {
1006 * u64 full; && PERF_SAMPLE_WEIGHT
1007 * #if defined(__LITTLE_ENDIAN_BITFIELD)
1008 * struct {
1009 * u32 var1_dw;
1010 * u16 var2_w;
1011 * u16 var3_w;
1012 * } && PERF_SAMPLE_WEIGHT_STRUCT
1013 * #elif defined(__BIG_ENDIAN_BITFIELD)
1014 * struct {
1015 * u16 var3_w;
1016 * u16 var2_w;
1017 * u32 var1_dw;
1018 * } && PERF_SAMPLE_WEIGHT_STRUCT
1019 * #endif
1020 * }
1021 * }
1022 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
1023 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
1024 * { u64 abi; # enum perf_sample_regs_abi
1025 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
1026 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
1027 * { u64 size;
1028 * char data[size]; } && PERF_SAMPLE_AUX
1029 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
1030 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
1031 * };
1032 */
1033 PERF_RECORD_SAMPLE = 9,
1034
1035 /*
1036 * The MMAP2 records are an augmented version of MMAP, they add
1037 * maj, min, ino numbers to be used to uniquely identify each mapping
1038 *
1039 * struct {
1040 * struct perf_event_header header;
1041 *
1042 * u32 pid, tid;
1043 * u64 addr;
1044 * u64 len;
1045 * u64 pgoff;
1046 * union {
1047 * struct {
1048 * u32 maj;
1049 * u32 min;
1050 * u64 ino;
1051 * u64 ino_generation;
1052 * };
1053 * struct {
1054 * u8 build_id_size;
1055 * u8 __reserved_1;
1056 * u16 __reserved_2;
1057 * u8 build_id[20];
1058 * };
1059 * };
1060 * u32 prot, flags;
1061 * char filename[];
1062 * struct sample_id sample_id;
1063 * };
1064 */
1065 PERF_RECORD_MMAP2 = 10,
1066
1067 /*
1068 * Records that new data landed in the AUX buffer part.
1069 *
1070 * struct {
1071 * struct perf_event_header header;
1072 *
1073 * u64 aux_offset;
1074 * u64 aux_size;
1075 * u64 flags;
1076 * struct sample_id sample_id;
1077 * };
1078 */
1079 PERF_RECORD_AUX = 11,
1080
1081 /*
1082 * Indicates that instruction trace has started
1083 *
1084 * struct {
1085 * struct perf_event_header header;
1086 * u32 pid;
1087 * u32 tid;
1088 * struct sample_id sample_id;
1089 * };
1090 */
1091 PERF_RECORD_ITRACE_START = 12,
1092
1093 /*
1094 * Records the dropped/lost sample number.
1095 *
1096 * struct {
1097 * struct perf_event_header header;
1098 *
1099 * u64 lost;
1100 * struct sample_id sample_id;
1101 * };
1102 */
1103 PERF_RECORD_LOST_SAMPLES = 13,
1104
1105 /*
1106 * Records a context switch in or out (flagged by
1107 * PERF_RECORD_MISC_SWITCH_OUT). See also
1108 * PERF_RECORD_SWITCH_CPU_WIDE.
1109 *
1110 * struct {
1111 * struct perf_event_header header;
1112 * struct sample_id sample_id;
1113 * };
1114 */
1115 PERF_RECORD_SWITCH = 14,
1116
1117 /*
1118 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
1119 * next_prev_tid that are the next (switching out) or previous
1120 * (switching in) pid/tid.
1121 *
1122 * struct {
1123 * struct perf_event_header header;
1124 * u32 next_prev_pid;
1125 * u32 next_prev_tid;
1126 * struct sample_id sample_id;
1127 * };
1128 */
1129 PERF_RECORD_SWITCH_CPU_WIDE = 15,
1130
1131 /*
1132 * struct {
1133 * struct perf_event_header header;
1134 * u32 pid;
1135 * u32 tid;
1136 * u64 nr_namespaces;
1137 * { u64 dev, inode; } [nr_namespaces];
1138 * struct sample_id sample_id;
1139 * };
1140 */
1141 PERF_RECORD_NAMESPACES = 16,
1142
1143 /*
1144 * Record ksymbol register/unregister events:
1145 *
1146 * struct {
1147 * struct perf_event_header header;
1148 * u64 addr;
1149 * u32 len;
1150 * u16 ksym_type;
1151 * u16 flags;
1152 * char name[];
1153 * struct sample_id sample_id;
1154 * };
1155 */
1156 PERF_RECORD_KSYMBOL = 17,
1157
1158 /*
1159 * Record bpf events:
1160 * enum perf_bpf_event_type {
1161 * PERF_BPF_EVENT_UNKNOWN = 0,
1162 * PERF_BPF_EVENT_PROG_LOAD = 1,
1163 * PERF_BPF_EVENT_PROG_UNLOAD = 2,
1164 * };
1165 *
1166 * struct {
1167 * struct perf_event_header header;
1168 * u16 type;
1169 * u16 flags;
1170 * u32 id;
1171 * u8 tag[BPF_TAG_SIZE];
1172 * struct sample_id sample_id;
1173 * };
1174 */
1175 PERF_RECORD_BPF_EVENT = 18,
1176
1177 /*
1178 * struct {
1179 * struct perf_event_header header;
1180 * u64 id;
1181 * char path[];
1182 * struct sample_id sample_id;
1183 * };
1184 */
1185 PERF_RECORD_CGROUP = 19,
1186
1187 /*
1188 * Records changes to kernel text i.e. self-modified code. 'old_len' is
1189 * the number of old bytes, 'new_len' is the number of new bytes. Either
1190 * 'old_len' or 'new_len' may be zero to indicate, for example, the
1191 * addition or removal of a trampoline. 'bytes' contains the old bytes
1192 * followed immediately by the new bytes.
1193 *
1194 * struct {
1195 * struct perf_event_header header;
1196 * u64 addr;
1197 * u16 old_len;
1198 * u16 new_len;
1199 * u8 bytes[];
1200 * struct sample_id sample_id;
1201 * };
1202 */
1203 PERF_RECORD_TEXT_POKE = 20,
1204
1205 /*
1206 * Data written to the AUX area by hardware due to aux_output, may need
1207 * to be matched to the event by an architecture-specific hardware ID.
1208 * This records the hardware ID, but requires sample_id to provide the
1209 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
1210 * records from multiple events.
1211 *
1212 * struct {
1213 * struct perf_event_header header;
1214 * u64 hw_id;
1215 * struct sample_id sample_id;
1216 * };
1217 */
1218 PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
1219
1220 PERF_RECORD_MAX, /* non-ABI */
1221};
1222
1223enum perf_record_ksymbol_type {
1224 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
1225 PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
1226 /*
1227 * Out of line code such as kprobe-replaced instructions or optimized
1228 * kprobes or ftrace trampolines.
1229 */
1230 PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
1231 PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
1232};
1233
1234#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
1235
1236enum perf_bpf_event_type {
1237 PERF_BPF_EVENT_UNKNOWN = 0,
1238 PERF_BPF_EVENT_PROG_LOAD = 1,
1239 PERF_BPF_EVENT_PROG_UNLOAD = 2,
1240 PERF_BPF_EVENT_MAX, /* non-ABI */
1241};
1242
1243#define PERF_MAX_STACK_DEPTH 127
1244#define PERF_MAX_CONTEXTS_PER_STACK 8
1245
1246enum perf_callchain_context {
1247 PERF_CONTEXT_HV = (__u64)-32,
1248 PERF_CONTEXT_KERNEL = (__u64)-128,
1249 PERF_CONTEXT_USER = (__u64)-512,
1250
1251 PERF_CONTEXT_GUEST = (__u64)-2048,
1252 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
1253 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
1254
1255 PERF_CONTEXT_MAX = (__u64)-4095,
1256};
1257
1258/**
1259 * PERF_RECORD_AUX::flags bits
1260 */
1261#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
1262#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
1263#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
1264#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
1265#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
1266
1267/* CoreSight PMU AUX buffer formats */
1268#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
1269#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
1270
1271#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
1272#define PERF_FLAG_FD_OUTPUT (1UL << 1)
1273#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
1274#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
1275
1276#if defined(__LITTLE_ENDIAN_BITFIELD)
1277union perf_mem_data_src {
1278 __u64 val;
1279 struct {
1280 __u64 mem_op:5, /* type of opcode */
1281 mem_lvl:14, /* memory hierarchy level */
1282 mem_snoop:5, /* snoop mode */
1283 mem_lock:2, /* lock instr */
1284 mem_dtlb:7, /* tlb access */
1285 mem_lvl_num:4, /* memory hierarchy level number */
1286 mem_remote:1, /* remote */
1287 mem_snoopx:2, /* snoop mode, ext */
1288 mem_blk:3, /* access blocked */
1289 mem_hops:3, /* hop level */
1290 mem_rsvd:18;
1291 };
1292};
1293#elif defined(__BIG_ENDIAN_BITFIELD)
1294union perf_mem_data_src {
1295 __u64 val;
1296 struct {
1297 __u64 mem_rsvd:18,
1298 mem_hops:3, /* hop level */
1299 mem_blk:3, /* access blocked */
1300 mem_snoopx:2, /* snoop mode, ext */
1301 mem_remote:1, /* remote */
1302 mem_lvl_num:4, /* memory hierarchy level number */
1303 mem_dtlb:7, /* tlb access */
1304 mem_lock:2, /* lock instr */
1305 mem_snoop:5, /* snoop mode */
1306 mem_lvl:14, /* memory hierarchy level */
1307 mem_op:5; /* type of opcode */
1308 };
1309};
1310#else
1311#error "Unknown endianness"
1312#endif
1313
1314/* type of opcode (load/store/prefetch,code) */
1315#define PERF_MEM_OP_NA 0x01 /* not available */
1316#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
1317#define PERF_MEM_OP_STORE 0x04 /* store instruction */
1318#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
1319#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
1320#define PERF_MEM_OP_SHIFT 0
1321
1322/*
1323 * PERF_MEM_LVL_* namespace being depricated to some extent in the
1324 * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
1325 * Supporting this namespace inorder to not break defined ABIs.
1326 *
1327 * memory hierarchy (memory level, hit or miss)
1328 */
1329#define PERF_MEM_LVL_NA 0x01 /* not available */
1330#define PERF_MEM_LVL_HIT 0x02 /* hit level */
1331#define PERF_MEM_LVL_MISS 0x04 /* miss level */
1332#define PERF_MEM_LVL_L1 0x08 /* L1 */
1333#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
1334#define PERF_MEM_LVL_L2 0x20 /* L2 */
1335#define PERF_MEM_LVL_L3 0x40 /* L3 */
1336#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
1337#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
1338#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
1339#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
1340#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
1341#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
1342#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
1343#define PERF_MEM_LVL_SHIFT 5
1344
1345#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
1346#define PERF_MEM_REMOTE_SHIFT 37
1347
1348#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
1349#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
1350#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
1351#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
1352/* 5-0x7 available */
1353#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
1354#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
1355#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
1356#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1357#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
1358#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
1359#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
1360#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
1361
1362#define PERF_MEM_LVLNUM_SHIFT 33
1363
1364/* snoop mode */
1365#define PERF_MEM_SNOOP_NA 0x01 /* not available */
1366#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
1367#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
1368#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
1369#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
1370#define PERF_MEM_SNOOP_SHIFT 19
1371
1372#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
1373#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
1374#define PERF_MEM_SNOOPX_SHIFT 38
1375
1376/* locked instruction */
1377#define PERF_MEM_LOCK_NA 0x01 /* not available */
1378#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
1379#define PERF_MEM_LOCK_SHIFT 24
1380
1381/* TLB access */
1382#define PERF_MEM_TLB_NA 0x01 /* not available */
1383#define PERF_MEM_TLB_HIT 0x02 /* hit level */
1384#define PERF_MEM_TLB_MISS 0x04 /* miss level */
1385#define PERF_MEM_TLB_L1 0x08 /* L1 */
1386#define PERF_MEM_TLB_L2 0x10 /* L2 */
1387#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
1388#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
1389#define PERF_MEM_TLB_SHIFT 26
1390
1391/* Access blocked */
1392#define PERF_MEM_BLK_NA 0x01 /* not available */
1393#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
1394#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
1395#define PERF_MEM_BLK_SHIFT 40
1396
1397/* hop level */
1398#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
1399#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
1400#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
1401#define PERF_MEM_HOPS_3 0x04 /* remote board */
1402/* 5-7 available */
1403#define PERF_MEM_HOPS_SHIFT 43
1404
1405#define PERF_MEM_S(a, s) \
1406 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1407
1408/*
1409 * single taken branch record layout:
1410 *
1411 * from: source instruction (may not always be a branch insn)
1412 * to: branch target
1413 * mispred: branch target was mispredicted
1414 * predicted: branch target was predicted
1415 *
1416 * support for mispred, predicted is optional. In case it
1417 * is not supported mispred = predicted = 0.
1418 *
1419 * in_tx: running in a hardware transaction
1420 * abort: aborting a hardware transaction
1421 * cycles: cycles from last branch (or 0 if not supported)
1422 * type: branch type
1423 * spec: branch speculation info (or 0 if not supported)
1424 */
1425struct perf_branch_entry {
1426 __u64 from;
1427 __u64 to;
1428 __u64 mispred:1, /* target mispredicted */
1429 predicted:1,/* target predicted */
1430 in_tx:1, /* in transaction */
1431 abort:1, /* transaction abort */
1432 cycles:16, /* cycle count to last branch */
1433 type:4, /* branch type */
1434 spec:2, /* branch speculation info */
1435 new_type:4, /* additional branch type */
1436 priv:3, /* privilege level */
1437 reserved:31;
1438};
1439
1440/* Size of used info bits in struct perf_branch_entry */
1441#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
1442
1443union perf_sample_weight {
1444 __u64 full;
1445#if defined(__LITTLE_ENDIAN_BITFIELD)
1446 struct {
1447 __u32 var1_dw;
1448 __u16 var2_w;
1449 __u16 var3_w;
1450 };
1451#elif defined(__BIG_ENDIAN_BITFIELD)
1452 struct {
1453 __u16 var3_w;
1454 __u16 var2_w;
1455 __u32 var1_dw;
1456 };
1457#else
1458#error "Unknown endianness"
1459#endif
1460};
1461
1462#endif /* _UAPI_LINUX_PERF_EVENT_H */
1463

Warning: This file is not a C or C++ file. It does not have highlighting.

source code of linux/tools/include/uapi/linux/perf_event.h