1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * Performance events:
4 *
5 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8 *
9 * Data type definitions, declarations, prototypes.
10 *
11 * Started by: Thomas Gleixner and Ingo Molnar
12 *
13 * For licencing details see kernel-base/COPYING
14 */
15#ifndef _UAPI_LINUX_PERF_EVENT_H
16#define _UAPI_LINUX_PERF_EVENT_H
17
18#include <linux/types.h>
19#include <linux/ioctl.h>
20#include <asm/byteorder.h>
21
22/*
23 * User-space ABI bits:
24 */
25
26/*
27 * attr.type
28 */
29enum perf_type_id {
30 PERF_TYPE_HARDWARE = 0,
31 PERF_TYPE_SOFTWARE = 1,
32 PERF_TYPE_TRACEPOINT = 2,
33 PERF_TYPE_HW_CACHE = 3,
34 PERF_TYPE_RAW = 4,
35 PERF_TYPE_BREAKPOINT = 5,
36
37 PERF_TYPE_MAX, /* non-ABI */
38};
39
40/*
41 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
42 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
43 * AA: hardware event ID
44 * EEEEEEEE: PMU type ID
45 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
46 * BB: hardware cache ID
47 * CC: hardware cache op ID
48 * DD: hardware cache op result ID
49 * EEEEEEEE: PMU type ID
50 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
51 */
52#define PERF_PMU_TYPE_SHIFT 32
53#define PERF_HW_EVENT_MASK 0xffffffff
54
55/*
56 * Generalized performance event event_id types, used by the
57 * attr.event_id parameter of the sys_perf_event_open()
58 * syscall:
59 */
60enum perf_hw_id {
61 /*
62 * Common hardware events, generalized by the kernel:
63 */
64 PERF_COUNT_HW_CPU_CYCLES = 0,
65 PERF_COUNT_HW_INSTRUCTIONS = 1,
66 PERF_COUNT_HW_CACHE_REFERENCES = 2,
67 PERF_COUNT_HW_CACHE_MISSES = 3,
68 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
69 PERF_COUNT_HW_BRANCH_MISSES = 5,
70 PERF_COUNT_HW_BUS_CYCLES = 6,
71 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
72 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
73 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
74
75 PERF_COUNT_HW_MAX, /* non-ABI */
76};
77
78/*
79 * Generalized hardware cache events:
80 *
81 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
82 * { read, write, prefetch } x
83 * { accesses, misses }
84 */
85enum perf_hw_cache_id {
86 PERF_COUNT_HW_CACHE_L1D = 0,
87 PERF_COUNT_HW_CACHE_L1I = 1,
88 PERF_COUNT_HW_CACHE_LL = 2,
89 PERF_COUNT_HW_CACHE_DTLB = 3,
90 PERF_COUNT_HW_CACHE_ITLB = 4,
91 PERF_COUNT_HW_CACHE_BPU = 5,
92 PERF_COUNT_HW_CACHE_NODE = 6,
93
94 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
95};
96
97enum perf_hw_cache_op_id {
98 PERF_COUNT_HW_CACHE_OP_READ = 0,
99 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
100 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
101
102 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
103};
104
105enum perf_hw_cache_op_result_id {
106 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
107 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
108
109 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
110};
111
112/*
113 * Special "software" events provided by the kernel, even if the hardware
114 * does not support performance events. These events measure various
115 * physical and sw events of the kernel (and allow the profiling of them as
116 * well):
117 */
118enum perf_sw_ids {
119 PERF_COUNT_SW_CPU_CLOCK = 0,
120 PERF_COUNT_SW_TASK_CLOCK = 1,
121 PERF_COUNT_SW_PAGE_FAULTS = 2,
122 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
123 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
124 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
125 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
126 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
127 PERF_COUNT_SW_EMULATION_FAULTS = 8,
128 PERF_COUNT_SW_DUMMY = 9,
129 PERF_COUNT_SW_BPF_OUTPUT = 10,
130 PERF_COUNT_SW_CGROUP_SWITCHES = 11,
131
132 PERF_COUNT_SW_MAX, /* non-ABI */
133};
134
135/*
136 * Bits that can be set in attr.sample_type to request information
137 * in the overflow packets.
138 */
139enum perf_event_sample_format {
140 PERF_SAMPLE_IP = 1U << 0,
141 PERF_SAMPLE_TID = 1U << 1,
142 PERF_SAMPLE_TIME = 1U << 2,
143 PERF_SAMPLE_ADDR = 1U << 3,
144 PERF_SAMPLE_READ = 1U << 4,
145 PERF_SAMPLE_CALLCHAIN = 1U << 5,
146 PERF_SAMPLE_ID = 1U << 6,
147 PERF_SAMPLE_CPU = 1U << 7,
148 PERF_SAMPLE_PERIOD = 1U << 8,
149 PERF_SAMPLE_STREAM_ID = 1U << 9,
150 PERF_SAMPLE_RAW = 1U << 10,
151 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
152 PERF_SAMPLE_REGS_USER = 1U << 12,
153 PERF_SAMPLE_STACK_USER = 1U << 13,
154 PERF_SAMPLE_WEIGHT = 1U << 14,
155 PERF_SAMPLE_DATA_SRC = 1U << 15,
156 PERF_SAMPLE_IDENTIFIER = 1U << 16,
157 PERF_SAMPLE_TRANSACTION = 1U << 17,
158 PERF_SAMPLE_REGS_INTR = 1U << 18,
159 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
160 PERF_SAMPLE_AUX = 1U << 20,
161 PERF_SAMPLE_CGROUP = 1U << 21,
162 PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
163 PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
164 PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
165
166 PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
167};
168
169#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
170/*
171 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
172 *
173 * If the user does not pass priv level information via branch_sample_type,
174 * the kernel uses the event's priv level. Branch and event priv levels do
175 * not have to match. Branch priv level is checked for permissions.
176 *
177 * The branch types can be combined, however BRANCH_ANY covers all types
178 * of branches and therefore it supersedes all the other types.
179 */
180enum perf_branch_sample_type_shift {
181 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
182 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
183 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
184
185 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
186 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
187 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
188 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
189 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
190 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
191 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
192 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
193
194 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
195 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
196 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
197
198 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
199 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
200
201 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
202
203 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
204
205 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
206
207 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
208};
209
210enum perf_branch_sample_type {
211 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
212 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
213 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
214
215 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
216 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
217 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
218 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
219 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
220 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
221 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
222 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
223
224 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
225 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
226 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
227
228 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
229 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
230
231 PERF_SAMPLE_BRANCH_TYPE_SAVE =
232 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
233
234 PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
235
236 PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
237
238 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
239};
240
241/*
242 * Common flow change classification
243 */
244enum {
245 PERF_BR_UNKNOWN = 0, /* unknown */
246 PERF_BR_COND = 1, /* conditional */
247 PERF_BR_UNCOND = 2, /* unconditional */
248 PERF_BR_IND = 3, /* indirect */
249 PERF_BR_CALL = 4, /* function call */
250 PERF_BR_IND_CALL = 5, /* indirect function call */
251 PERF_BR_RET = 6, /* function return */
252 PERF_BR_SYSCALL = 7, /* syscall */
253 PERF_BR_SYSRET = 8, /* syscall return */
254 PERF_BR_COND_CALL = 9, /* conditional function call */
255 PERF_BR_COND_RET = 10, /* conditional function return */
256 PERF_BR_ERET = 11, /* exception return */
257 PERF_BR_IRQ = 12, /* irq */
258 PERF_BR_SERROR = 13, /* system error */
259 PERF_BR_NO_TX = 14, /* not in transaction */
260 PERF_BR_EXTEND_ABI = 15, /* extend ABI */
261 PERF_BR_MAX,
262};
263
264/*
265 * Common branch speculation outcome classification
266 */
267enum {
268 PERF_BR_SPEC_NA = 0, /* Not available */
269 PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
270 PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
271 PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
272 PERF_BR_SPEC_MAX,
273};
274
275enum {
276 PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
277 PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
278 PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
279 PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
280 PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
281 PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
282 PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
283 PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
284 PERF_BR_NEW_MAX,
285};
286
287enum {
288 PERF_BR_PRIV_UNKNOWN = 0,
289 PERF_BR_PRIV_USER = 1,
290 PERF_BR_PRIV_KERNEL = 2,
291 PERF_BR_PRIV_HV = 3,
292};
293
294#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
295#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
296#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
297#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
298#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
299
300#define PERF_SAMPLE_BRANCH_PLM_ALL \
301 (PERF_SAMPLE_BRANCH_USER|\
302 PERF_SAMPLE_BRANCH_KERNEL|\
303 PERF_SAMPLE_BRANCH_HV)
304
305/*
306 * Values to determine ABI of the registers dump.
307 */
308enum perf_sample_regs_abi {
309 PERF_SAMPLE_REGS_ABI_NONE = 0,
310 PERF_SAMPLE_REGS_ABI_32 = 1,
311 PERF_SAMPLE_REGS_ABI_64 = 2,
312};
313
314/*
315 * Values for the memory transaction event qualifier, mostly for
316 * abort events. Multiple bits can be set.
317 */
318enum {
319 PERF_TXN_ELISION = (1 << 0), /* From elision */
320 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
321 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
322 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
323 PERF_TXN_RETRY = (1 << 4), /* Retry possible */
324 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
325 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
326 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
327
328 PERF_TXN_MAX = (1 << 8), /* non-ABI */
329
330 /* bits 32..63 are reserved for the abort code */
331
332 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
333 PERF_TXN_ABORT_SHIFT = 32,
334};
335
336/*
337 * The format of the data returned by read() on a perf event fd,
338 * as specified by attr.read_format:
339 *
340 * struct read_format {
341 * { u64 value;
342 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
343 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
344 * { u64 id; } && PERF_FORMAT_ID
345 * { u64 lost; } && PERF_FORMAT_LOST
346 * } && !PERF_FORMAT_GROUP
347 *
348 * { u64 nr;
349 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
350 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
351 * { u64 value;
352 * { u64 id; } && PERF_FORMAT_ID
353 * { u64 lost; } && PERF_FORMAT_LOST
354 * } cntr[nr];
355 * } && PERF_FORMAT_GROUP
356 * };
357 */
358enum perf_event_read_format {
359 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
360 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
361 PERF_FORMAT_ID = 1U << 2,
362 PERF_FORMAT_GROUP = 1U << 3,
363 PERF_FORMAT_LOST = 1U << 4,
364
365 PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
366};
367
368#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
369#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
370#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
371#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
372 /* add: sample_stack_user */
373#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
374#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
375#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
376#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
377#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
378
379/*
380 * Hardware event_id to monitor via a performance monitoring event:
381 *
382 * @sample_max_stack: Max number of frame pointers in a callchain,
383 * should be < /proc/sys/kernel/perf_event_max_stack
384 */
385struct perf_event_attr {
386
387 /*
388 * Major type: hardware/software/tracepoint/etc.
389 */
390 __u32 type;
391
392 /*
393 * Size of the attr structure, for fwd/bwd compat.
394 */
395 __u32 size;
396
397 /*
398 * Type specific configuration information.
399 */
400 __u64 config;
401
402 union {
403 __u64 sample_period;
404 __u64 sample_freq;
405 };
406
407 __u64 sample_type;
408 __u64 read_format;
409
410 __u64 disabled : 1, /* off by default */
411 inherit : 1, /* children inherit it */
412 pinned : 1, /* must always be on PMU */
413 exclusive : 1, /* only group on PMU */
414 exclude_user : 1, /* don't count user */
415 exclude_kernel : 1, /* ditto kernel */
416 exclude_hv : 1, /* ditto hypervisor */
417 exclude_idle : 1, /* don't count when idle */
418 mmap : 1, /* include mmap data */
419 comm : 1, /* include comm data */
420 freq : 1, /* use freq, not period */
421 inherit_stat : 1, /* per task counts */
422 enable_on_exec : 1, /* next exec enables */
423 task : 1, /* trace fork/exit */
424 watermark : 1, /* wakeup_watermark */
425 /*
426 * precise_ip:
427 *
428 * 0 - SAMPLE_IP can have arbitrary skid
429 * 1 - SAMPLE_IP must have constant skid
430 * 2 - SAMPLE_IP requested to have 0 skid
431 * 3 - SAMPLE_IP must have 0 skid
432 *
433 * See also PERF_RECORD_MISC_EXACT_IP
434 */
435 precise_ip : 2, /* skid constraint */
436 mmap_data : 1, /* non-exec mmap data */
437 sample_id_all : 1, /* sample_type all events */
438
439 exclude_host : 1, /* don't count in host */
440 exclude_guest : 1, /* don't count in guest */
441
442 exclude_callchain_kernel : 1, /* exclude kernel callchains */
443 exclude_callchain_user : 1, /* exclude user callchains */
444 mmap2 : 1, /* include mmap with inode data */
445 comm_exec : 1, /* flag comm events that are due to an exec */
446 use_clockid : 1, /* use @clockid for time fields */
447 context_switch : 1, /* context switch data */
448 write_backward : 1, /* Write ring buffer from end to beginning */
449 namespaces : 1, /* include namespaces data */
450 ksymbol : 1, /* include ksymbol events */
451 bpf_event : 1, /* include bpf events */
452 aux_output : 1, /* generate AUX records instead of events */
453 cgroup : 1, /* include cgroup events */
454 text_poke : 1, /* include text poke events */
455 build_id : 1, /* use build id in mmap2 events */
456 inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
457 remove_on_exec : 1, /* event is removed from task on exec */
458 sigtrap : 1, /* send synchronous SIGTRAP on event */
459 __reserved_1 : 26;
460
461 union {
462 __u32 wakeup_events; /* wakeup every n events */
463 __u32 wakeup_watermark; /* bytes before wakeup */
464 };
465
466 __u32 bp_type;
467 union {
468 __u64 bp_addr;
469 __u64 kprobe_func; /* for perf_kprobe */
470 __u64 uprobe_path; /* for perf_uprobe */
471 __u64 config1; /* extension of config */
472 };
473 union {
474 __u64 bp_len;
475 __u64 kprobe_addr; /* when kprobe_func == NULL */
476 __u64 probe_offset; /* for perf_[k,u]probe */
477 __u64 config2; /* extension of config1 */
478 };
479 __u64 branch_sample_type; /* enum perf_branch_sample_type */
480
481 /*
482 * Defines set of user regs to dump on samples.
483 * See asm/perf_regs.h for details.
484 */
485 __u64 sample_regs_user;
486
487 /*
488 * Defines size of the user stack to dump on samples.
489 */
490 __u32 sample_stack_user;
491
492 __s32 clockid;
493 /*
494 * Defines set of regs to dump for each sample
495 * state captured on:
496 * - precise = 0: PMU interrupt
497 * - precise > 0: sampled instruction
498 *
499 * See asm/perf_regs.h for details.
500 */
501 __u64 sample_regs_intr;
502
503 /*
504 * Wakeup watermark for AUX area
505 */
506 __u32 aux_watermark;
507 __u16 sample_max_stack;
508 __u16 __reserved_2;
509 __u32 aux_sample_size;
510 __u32 __reserved_3;
511
512 /*
513 * User provided data if sigtrap=1, passed back to user via
514 * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
515 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
516 * truncated accordingly on 32 bit architectures.
517 */
518 __u64 sig_data;
519
520 __u64 config3; /* extension of config2 */
521};
522
523/*
524 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
525 * to query bpf programs attached to the same perf tracepoint
526 * as the given perf event.
527 */
528struct perf_event_query_bpf {
529 /*
530 * The below ids array length
531 */
532 __u32 ids_len;
533 /*
534 * Set by the kernel to indicate the number of
535 * available programs
536 */
537 __u32 prog_cnt;
538 /*
539 * User provided buffer to store program ids
540 */
541 __u32 ids[];
542};
543
544/*
545 * Ioctls that can be done on a perf event fd:
546 */
547#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
548#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
549#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
550#define PERF_EVENT_IOC_RESET _IO ('$', 3)
551#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
552#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
553#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
554#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
555#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
556#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
557#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
558#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
559
560enum perf_event_ioc_flags {
561 PERF_IOC_FLAG_GROUP = 1U << 0,
562};
563
564/*
565 * Structure of the page that can be mapped via mmap
566 */
567struct perf_event_mmap_page {
568 __u32 version; /* version number of this structure */
569 __u32 compat_version; /* lowest version this is compat with */
570
571 /*
572 * Bits needed to read the hw events in user-space.
573 *
574 * u32 seq, time_mult, time_shift, index, width;
575 * u64 count, enabled, running;
576 * u64 cyc, time_offset;
577 * s64 pmc = 0;
578 *
579 * do {
580 * seq = pc->lock;
581 * barrier()
582 *
583 * enabled = pc->time_enabled;
584 * running = pc->time_running;
585 *
586 * if (pc->cap_usr_time && enabled != running) {
587 * cyc = rdtsc();
588 * time_offset = pc->time_offset;
589 * time_mult = pc->time_mult;
590 * time_shift = pc->time_shift;
591 * }
592 *
593 * index = pc->index;
594 * count = pc->offset;
595 * if (pc->cap_user_rdpmc && index) {
596 * width = pc->pmc_width;
597 * pmc = rdpmc(index - 1);
598 * }
599 *
600 * barrier();
601 * } while (pc->lock != seq);
602 *
603 * NOTE: for obvious reason this only works on self-monitoring
604 * processes.
605 */
606 __u32 lock; /* seqlock for synchronization */
607 __u32 index; /* hardware event identifier */
608 __s64 offset; /* add to hardware event value */
609 __u64 time_enabled; /* time event active */
610 __u64 time_running; /* time event on cpu */
611 union {
612 __u64 capabilities;
613 struct {
614 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
615 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
616
617 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
618 cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
619 cap_user_time_zero : 1, /* The time_zero field is used */
620 cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
621 cap_____res : 58;
622 };
623 };
624
625 /*
626 * If cap_user_rdpmc this field provides the bit-width of the value
627 * read using the rdpmc() or equivalent instruction. This can be used
628 * to sign extend the result like:
629 *
630 * pmc <<= 64 - width;
631 * pmc >>= 64 - width; // signed shift right
632 * count += pmc;
633 */
634 __u16 pmc_width;
635
636 /*
637 * If cap_usr_time the below fields can be used to compute the time
638 * delta since time_enabled (in ns) using rdtsc or similar.
639 *
640 * u64 quot, rem;
641 * u64 delta;
642 *
643 * quot = (cyc >> time_shift);
644 * rem = cyc & (((u64)1 << time_shift) - 1);
645 * delta = time_offset + quot * time_mult +
646 * ((rem * time_mult) >> time_shift);
647 *
648 * Where time_offset,time_mult,time_shift and cyc are read in the
649 * seqcount loop described above. This delta can then be added to
650 * enabled and possible running (if index), improving the scaling:
651 *
652 * enabled += delta;
653 * if (index)
654 * running += delta;
655 *
656 * quot = count / running;
657 * rem = count % running;
658 * count = quot * enabled + (rem * enabled) / running;
659 */
660 __u16 time_shift;
661 __u32 time_mult;
662 __u64 time_offset;
663 /*
664 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
665 * from sample timestamps.
666 *
667 * time = timestamp - time_zero;
668 * quot = time / time_mult;
669 * rem = time % time_mult;
670 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
671 *
672 * And vice versa:
673 *
674 * quot = cyc >> time_shift;
675 * rem = cyc & (((u64)1 << time_shift) - 1);
676 * timestamp = time_zero + quot * time_mult +
677 * ((rem * time_mult) >> time_shift);
678 */
679 __u64 time_zero;
680
681 __u32 size; /* Header size up to __reserved[] fields. */
682 __u32 __reserved_1;
683
684 /*
685 * If cap_usr_time_short, the hardware clock is less than 64bit wide
686 * and we must compute the 'cyc' value, as used by cap_usr_time, as:
687 *
688 * cyc = time_cycles + ((cyc - time_cycles) & time_mask)
689 *
690 * NOTE: this form is explicitly chosen such that cap_usr_time_short
691 * is a correction on top of cap_usr_time, and code that doesn't
692 * know about cap_usr_time_short still works under the assumption
693 * the counter doesn't wrap.
694 */
695 __u64 time_cycles;
696 __u64 time_mask;
697
698 /*
699 * Hole for extension of the self monitor capabilities
700 */
701
702 __u8 __reserved[116*8]; /* align to 1k. */
703
704 /*
705 * Control data for the mmap() data buffer.
706 *
707 * User-space reading the @data_head value should issue an smp_rmb(),
708 * after reading this value.
709 *
710 * When the mapping is PROT_WRITE the @data_tail value should be
711 * written by userspace to reflect the last read data, after issueing
712 * an smp_mb() to separate the data read from the ->data_tail store.
713 * In this case the kernel will not over-write unread data.
714 *
715 * See perf_output_put_handle() for the data ordering.
716 *
717 * data_{offset,size} indicate the location and size of the perf record
718 * buffer within the mmapped area.
719 */
720 __u64 data_head; /* head in the data section */
721 __u64 data_tail; /* user-space written tail */
722 __u64 data_offset; /* where the buffer starts */
723 __u64 data_size; /* data buffer size */
724
725 /*
726 * AUX area is defined by aux_{offset,size} fields that should be set
727 * by the userspace, so that
728 *
729 * aux_offset >= data_offset + data_size
730 *
731 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
732 *
733 * Ring buffer pointers aux_{head,tail} have the same semantics as
734 * data_{head,tail} and same ordering rules apply.
735 */
736 __u64 aux_head;
737 __u64 aux_tail;
738 __u64 aux_offset;
739 __u64 aux_size;
740};
741
742/*
743 * The current state of perf_event_header::misc bits usage:
744 * ('|' used bit, '-' unused bit)
745 *
746 * 012 CDEF
747 * |||---------||||
748 *
749 * Where:
750 * 0-2 CPUMODE_MASK
751 *
752 * C PROC_MAP_PARSE_TIMEOUT
753 * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
754 * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
755 * F (reserved)
756 */
757
758#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
759#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
760#define PERF_RECORD_MISC_KERNEL (1 << 0)
761#define PERF_RECORD_MISC_USER (2 << 0)
762#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
763#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
764#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
765
766/*
767 * Indicates that /proc/PID/maps parsing are truncated by time out.
768 */
769#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
770/*
771 * Following PERF_RECORD_MISC_* are used on different
772 * events, so can reuse the same bit position:
773 *
774 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
775 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
776 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal)
777 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
778 */
779#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
780#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
781#define PERF_RECORD_MISC_FORK_EXEC (1 << 13)
782#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
783/*
784 * These PERF_RECORD_MISC_* flags below are safely reused
785 * for the following events:
786 *
787 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
788 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
789 * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event
790 *
791 *
792 * PERF_RECORD_MISC_EXACT_IP:
793 * Indicates that the content of PERF_SAMPLE_IP points to
794 * the actual instruction that triggered the event. See also
795 * perf_event_attr::precise_ip.
796 *
797 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
798 * Indicates that thread was preempted in TASK_RUNNING state.
799 *
800 * PERF_RECORD_MISC_MMAP_BUILD_ID:
801 * Indicates that mmap2 event carries build id data.
802 */
803#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
804#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
805#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14)
806/*
807 * Reserve the last bit to indicate some extended misc field
808 */
809#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
810
811struct perf_event_header {
812 __u32 type;
813 __u16 misc;
814 __u16 size;
815};
816
817struct perf_ns_link_info {
818 __u64 dev;
819 __u64 ino;
820};
821
822enum {
823 NET_NS_INDEX = 0,
824 UTS_NS_INDEX = 1,
825 IPC_NS_INDEX = 2,
826 PID_NS_INDEX = 3,
827 USER_NS_INDEX = 4,
828 MNT_NS_INDEX = 5,
829 CGROUP_NS_INDEX = 6,
830
831 NR_NAMESPACES, /* number of available namespaces */
832};
833
834enum perf_event_type {
835
836 /*
837 * If perf_event_attr.sample_id_all is set then all event types will
838 * have the sample_type selected fields related to where/when
839 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
840 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
841 * just after the perf_event_header and the fields already present for
842 * the existing fields, i.e. at the end of the payload. That way a newer
843 * perf.data file will be supported by older perf tools, with these new
844 * optional fields being ignored.
845 *
846 * struct sample_id {
847 * { u32 pid, tid; } && PERF_SAMPLE_TID
848 * { u64 time; } && PERF_SAMPLE_TIME
849 * { u64 id; } && PERF_SAMPLE_ID
850 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
851 * { u32 cpu, res; } && PERF_SAMPLE_CPU
852 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
853 * } && perf_event_attr::sample_id_all
854 *
855 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
856 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
857 * relative to header.size.
858 */
859
860 /*
861 * The MMAP events record the PROT_EXEC mappings so that we can
862 * correlate userspace IPs to code. They have the following structure:
863 *
864 * struct {
865 * struct perf_event_header header;
866 *
867 * u32 pid, tid;
868 * u64 addr;
869 * u64 len;
870 * u64 pgoff;
871 * char filename[];
872 * struct sample_id sample_id;
873 * };
874 */
875 PERF_RECORD_MMAP = 1,
876
877 /*
878 * struct {
879 * struct perf_event_header header;
880 * u64 id;
881 * u64 lost;
882 * struct sample_id sample_id;
883 * };
884 */
885 PERF_RECORD_LOST = 2,
886
887 /*
888 * struct {
889 * struct perf_event_header header;
890 *
891 * u32 pid, tid;
892 * char comm[];
893 * struct sample_id sample_id;
894 * };
895 */
896 PERF_RECORD_COMM = 3,
897
898 /*
899 * struct {
900 * struct perf_event_header header;
901 * u32 pid, ppid;
902 * u32 tid, ptid;
903 * u64 time;
904 * struct sample_id sample_id;
905 * };
906 */
907 PERF_RECORD_EXIT = 4,
908
909 /*
910 * struct {
911 * struct perf_event_header header;
912 * u64 time;
913 * u64 id;
914 * u64 stream_id;
915 * struct sample_id sample_id;
916 * };
917 */
918 PERF_RECORD_THROTTLE = 5,
919 PERF_RECORD_UNTHROTTLE = 6,
920
921 /*
922 * struct {
923 * struct perf_event_header header;
924 * u32 pid, ppid;
925 * u32 tid, ptid;
926 * u64 time;
927 * struct sample_id sample_id;
928 * };
929 */
930 PERF_RECORD_FORK = 7,
931
932 /*
933 * struct {
934 * struct perf_event_header header;
935 * u32 pid, tid;
936 *
937 * struct read_format values;
938 * struct sample_id sample_id;
939 * };
940 */
941 PERF_RECORD_READ = 8,
942
943 /*
944 * struct {
945 * struct perf_event_header header;
946 *
947 * #
948 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
949 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
950 * # is fixed relative to header.
951 * #
952 *
953 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
954 * { u64 ip; } && PERF_SAMPLE_IP
955 * { u32 pid, tid; } && PERF_SAMPLE_TID
956 * { u64 time; } && PERF_SAMPLE_TIME
957 * { u64 addr; } && PERF_SAMPLE_ADDR
958 * { u64 id; } && PERF_SAMPLE_ID
959 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
960 * { u32 cpu, res; } && PERF_SAMPLE_CPU
961 * { u64 period; } && PERF_SAMPLE_PERIOD
962 *
963 * { struct read_format values; } && PERF_SAMPLE_READ
964 *
965 * { u64 nr,
966 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
967 *
968 * #
969 * # The RAW record below is opaque data wrt the ABI
970 * #
971 * # That is, the ABI doesn't make any promises wrt to
972 * # the stability of its content, it may vary depending
973 * # on event, hardware, kernel version and phase of
974 * # the moon.
975 * #
976 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
977 * #
978 *
979 * { u32 size;
980 * char data[size];}&& PERF_SAMPLE_RAW
981 *
982 * { u64 nr;
983 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
984 * { u64 from, to, flags } lbr[nr];
985 * } && PERF_SAMPLE_BRANCH_STACK
986 *
987 * { u64 abi; # enum perf_sample_regs_abi
988 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
989 *
990 * { u64 size;
991 * char data[size];
992 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
993 *
994 * { union perf_sample_weight
995 * {
996 * u64 full; && PERF_SAMPLE_WEIGHT
997 * #if defined(__LITTLE_ENDIAN_BITFIELD)
998 * struct {
999 * u32 var1_dw;
1000 * u16 var2_w;
1001 * u16 var3_w;
1002 * } && PERF_SAMPLE_WEIGHT_STRUCT
1003 * #elif defined(__BIG_ENDIAN_BITFIELD)
1004 * struct {
1005 * u16 var3_w;
1006 * u16 var2_w;
1007 * u32 var1_dw;
1008 * } && PERF_SAMPLE_WEIGHT_STRUCT
1009 * #endif
1010 * }
1011 * }
1012 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
1013 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
1014 * { u64 abi; # enum perf_sample_regs_abi
1015 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
1016 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
1017 * { u64 size;
1018 * char data[size]; } && PERF_SAMPLE_AUX
1019 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
1020 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
1021 * };
1022 */
1023 PERF_RECORD_SAMPLE = 9,
1024
1025 /*
1026 * The MMAP2 records are an augmented version of MMAP, they add
1027 * maj, min, ino numbers to be used to uniquely identify each mapping
1028 *
1029 * struct {
1030 * struct perf_event_header header;
1031 *
1032 * u32 pid, tid;
1033 * u64 addr;
1034 * u64 len;
1035 * u64 pgoff;
1036 * union {
1037 * struct {
1038 * u32 maj;
1039 * u32 min;
1040 * u64 ino;
1041 * u64 ino_generation;
1042 * };
1043 * struct {
1044 * u8 build_id_size;
1045 * u8 __reserved_1;
1046 * u16 __reserved_2;
1047 * u8 build_id[20];
1048 * };
1049 * };
1050 * u32 prot, flags;
1051 * char filename[];
1052 * struct sample_id sample_id;
1053 * };
1054 */
1055 PERF_RECORD_MMAP2 = 10,
1056
1057 /*
1058 * Records that new data landed in the AUX buffer part.
1059 *
1060 * struct {
1061 * struct perf_event_header header;
1062 *
1063 * u64 aux_offset;
1064 * u64 aux_size;
1065 * u64 flags;
1066 * struct sample_id sample_id;
1067 * };
1068 */
1069 PERF_RECORD_AUX = 11,
1070
1071 /*
1072 * Indicates that instruction trace has started
1073 *
1074 * struct {
1075 * struct perf_event_header header;
1076 * u32 pid;
1077 * u32 tid;
1078 * struct sample_id sample_id;
1079 * };
1080 */
1081 PERF_RECORD_ITRACE_START = 12,
1082
1083 /*
1084 * Records the dropped/lost sample number.
1085 *
1086 * struct {
1087 * struct perf_event_header header;
1088 *
1089 * u64 lost;
1090 * struct sample_id sample_id;
1091 * };
1092 */
1093 PERF_RECORD_LOST_SAMPLES = 13,
1094
1095 /*
1096 * Records a context switch in or out (flagged by
1097 * PERF_RECORD_MISC_SWITCH_OUT). See also
1098 * PERF_RECORD_SWITCH_CPU_WIDE.
1099 *
1100 * struct {
1101 * struct perf_event_header header;
1102 * struct sample_id sample_id;
1103 * };
1104 */
1105 PERF_RECORD_SWITCH = 14,
1106
1107 /*
1108 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
1109 * next_prev_tid that are the next (switching out) or previous
1110 * (switching in) pid/tid.
1111 *
1112 * struct {
1113 * struct perf_event_header header;
1114 * u32 next_prev_pid;
1115 * u32 next_prev_tid;
1116 * struct sample_id sample_id;
1117 * };
1118 */
1119 PERF_RECORD_SWITCH_CPU_WIDE = 15,
1120
1121 /*
1122 * struct {
1123 * struct perf_event_header header;
1124 * u32 pid;
1125 * u32 tid;
1126 * u64 nr_namespaces;
1127 * { u64 dev, inode; } [nr_namespaces];
1128 * struct sample_id sample_id;
1129 * };
1130 */
1131 PERF_RECORD_NAMESPACES = 16,
1132
1133 /*
1134 * Record ksymbol register/unregister events:
1135 *
1136 * struct {
1137 * struct perf_event_header header;
1138 * u64 addr;
1139 * u32 len;
1140 * u16 ksym_type;
1141 * u16 flags;
1142 * char name[];
1143 * struct sample_id sample_id;
1144 * };
1145 */
1146 PERF_RECORD_KSYMBOL = 17,
1147
1148 /*
1149 * Record bpf events:
1150 * enum perf_bpf_event_type {
1151 * PERF_BPF_EVENT_UNKNOWN = 0,
1152 * PERF_BPF_EVENT_PROG_LOAD = 1,
1153 * PERF_BPF_EVENT_PROG_UNLOAD = 2,
1154 * };
1155 *
1156 * struct {
1157 * struct perf_event_header header;
1158 * u16 type;
1159 * u16 flags;
1160 * u32 id;
1161 * u8 tag[BPF_TAG_SIZE];
1162 * struct sample_id sample_id;
1163 * };
1164 */
1165 PERF_RECORD_BPF_EVENT = 18,
1166
1167 /*
1168 * struct {
1169 * struct perf_event_header header;
1170 * u64 id;
1171 * char path[];
1172 * struct sample_id sample_id;
1173 * };
1174 */
1175 PERF_RECORD_CGROUP = 19,
1176
1177 /*
1178 * Records changes to kernel text i.e. self-modified code. 'old_len' is
1179 * the number of old bytes, 'new_len' is the number of new bytes. Either
1180 * 'old_len' or 'new_len' may be zero to indicate, for example, the
1181 * addition or removal of a trampoline. 'bytes' contains the old bytes
1182 * followed immediately by the new bytes.
1183 *
1184 * struct {
1185 * struct perf_event_header header;
1186 * u64 addr;
1187 * u16 old_len;
1188 * u16 new_len;
1189 * u8 bytes[];
1190 * struct sample_id sample_id;
1191 * };
1192 */
1193 PERF_RECORD_TEXT_POKE = 20,
1194
1195 /*
1196 * Data written to the AUX area by hardware due to aux_output, may need
1197 * to be matched to the event by an architecture-specific hardware ID.
1198 * This records the hardware ID, but requires sample_id to provide the
1199 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
1200 * records from multiple events.
1201 *
1202 * struct {
1203 * struct perf_event_header header;
1204 * u64 hw_id;
1205 * struct sample_id sample_id;
1206 * };
1207 */
1208 PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
1209
1210 PERF_RECORD_MAX, /* non-ABI */
1211};
1212
1213enum perf_record_ksymbol_type {
1214 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
1215 PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
1216 /*
1217 * Out of line code such as kprobe-replaced instructions or optimized
1218 * kprobes or ftrace trampolines.
1219 */
1220 PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
1221 PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
1222};
1223
1224#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
1225
1226enum perf_bpf_event_type {
1227 PERF_BPF_EVENT_UNKNOWN = 0,
1228 PERF_BPF_EVENT_PROG_LOAD = 1,
1229 PERF_BPF_EVENT_PROG_UNLOAD = 2,
1230 PERF_BPF_EVENT_MAX, /* non-ABI */
1231};
1232
1233#define PERF_MAX_STACK_DEPTH 127
1234#define PERF_MAX_CONTEXTS_PER_STACK 8
1235
1236enum perf_callchain_context {
1237 PERF_CONTEXT_HV = (__u64)-32,
1238 PERF_CONTEXT_KERNEL = (__u64)-128,
1239 PERF_CONTEXT_USER = (__u64)-512,
1240
1241 PERF_CONTEXT_GUEST = (__u64)-2048,
1242 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
1243 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
1244
1245 PERF_CONTEXT_MAX = (__u64)-4095,
1246};
1247
1248/**
1249 * PERF_RECORD_AUX::flags bits
1250 */
1251#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
1252#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
1253#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
1254#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
1255#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
1256
1257/* CoreSight PMU AUX buffer formats */
1258#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
1259#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
1260
1261#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
1262#define PERF_FLAG_FD_OUTPUT (1UL << 1)
1263#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
1264#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
1265
1266#if defined(__LITTLE_ENDIAN_BITFIELD)
1267union perf_mem_data_src {
1268 __u64 val;
1269 struct {
1270 __u64 mem_op:5, /* type of opcode */
1271 mem_lvl:14, /* memory hierarchy level */
1272 mem_snoop:5, /* snoop mode */
1273 mem_lock:2, /* lock instr */
1274 mem_dtlb:7, /* tlb access */
1275 mem_lvl_num:4, /* memory hierarchy level number */
1276 mem_remote:1, /* remote */
1277 mem_snoopx:2, /* snoop mode, ext */
1278 mem_blk:3, /* access blocked */
1279 mem_hops:3, /* hop level */
1280 mem_rsvd:18;
1281 };
1282};
1283#elif defined(__BIG_ENDIAN_BITFIELD)
1284union perf_mem_data_src {
1285 __u64 val;
1286 struct {
1287 __u64 mem_rsvd:18,
1288 mem_hops:3, /* hop level */
1289 mem_blk:3, /* access blocked */
1290 mem_snoopx:2, /* snoop mode, ext */
1291 mem_remote:1, /* remote */
1292 mem_lvl_num:4, /* memory hierarchy level number */
1293 mem_dtlb:7, /* tlb access */
1294 mem_lock:2, /* lock instr */
1295 mem_snoop:5, /* snoop mode */
1296 mem_lvl:14, /* memory hierarchy level */
1297 mem_op:5; /* type of opcode */
1298 };
1299};
1300#else
1301#error "Unknown endianness"
1302#endif
1303
1304/* type of opcode (load/store/prefetch,code) */
1305#define PERF_MEM_OP_NA 0x01 /* not available */
1306#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
1307#define PERF_MEM_OP_STORE 0x04 /* store instruction */
1308#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
1309#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
1310#define PERF_MEM_OP_SHIFT 0
1311
1312/*
1313 * PERF_MEM_LVL_* namespace being depricated to some extent in the
1314 * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
1315 * Supporting this namespace inorder to not break defined ABIs.
1316 *
1317 * memory hierarchy (memory level, hit or miss)
1318 */
1319#define PERF_MEM_LVL_NA 0x01 /* not available */
1320#define PERF_MEM_LVL_HIT 0x02 /* hit level */
1321#define PERF_MEM_LVL_MISS 0x04 /* miss level */
1322#define PERF_MEM_LVL_L1 0x08 /* L1 */
1323#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
1324#define PERF_MEM_LVL_L2 0x20 /* L2 */
1325#define PERF_MEM_LVL_L3 0x40 /* L3 */
1326#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
1327#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
1328#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
1329#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
1330#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
1331#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
1332#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
1333#define PERF_MEM_LVL_SHIFT 5
1334
1335#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
1336#define PERF_MEM_REMOTE_SHIFT 37
1337
1338#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
1339#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
1340#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
1341#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
1342/* 5-0x7 available */
1343#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
1344#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
1345#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
1346#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1347#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
1348#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
1349#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
1350#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
1351
1352#define PERF_MEM_LVLNUM_SHIFT 33
1353
1354/* snoop mode */
1355#define PERF_MEM_SNOOP_NA 0x01 /* not available */
1356#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
1357#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
1358#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
1359#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
1360#define PERF_MEM_SNOOP_SHIFT 19
1361
1362#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
1363#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
1364#define PERF_MEM_SNOOPX_SHIFT 38
1365
1366/* locked instruction */
1367#define PERF_MEM_LOCK_NA 0x01 /* not available */
1368#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
1369#define PERF_MEM_LOCK_SHIFT 24
1370
1371/* TLB access */
1372#define PERF_MEM_TLB_NA 0x01 /* not available */
1373#define PERF_MEM_TLB_HIT 0x02 /* hit level */
1374#define PERF_MEM_TLB_MISS 0x04 /* miss level */
1375#define PERF_MEM_TLB_L1 0x08 /* L1 */
1376#define PERF_MEM_TLB_L2 0x10 /* L2 */
1377#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
1378#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
1379#define PERF_MEM_TLB_SHIFT 26
1380
1381/* Access blocked */
1382#define PERF_MEM_BLK_NA 0x01 /* not available */
1383#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
1384#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
1385#define PERF_MEM_BLK_SHIFT 40
1386
1387/* hop level */
1388#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
1389#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
1390#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
1391#define PERF_MEM_HOPS_3 0x04 /* remote board */
1392/* 5-7 available */
1393#define PERF_MEM_HOPS_SHIFT 43
1394
1395#define PERF_MEM_S(a, s) \
1396 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1397
1398/*
1399 * single taken branch record layout:
1400 *
1401 * from: source instruction (may not always be a branch insn)
1402 * to: branch target
1403 * mispred: branch target was mispredicted
1404 * predicted: branch target was predicted
1405 *
1406 * support for mispred, predicted is optional. In case it
1407 * is not supported mispred = predicted = 0.
1408 *
1409 * in_tx: running in a hardware transaction
1410 * abort: aborting a hardware transaction
1411 * cycles: cycles from last branch (or 0 if not supported)
1412 * type: branch type
1413 * spec: branch speculation info (or 0 if not supported)
1414 */
1415struct perf_branch_entry {
1416 __u64 from;
1417 __u64 to;
1418 __u64 mispred:1, /* target mispredicted */
1419 predicted:1,/* target predicted */
1420 in_tx:1, /* in transaction */
1421 abort:1, /* transaction abort */
1422 cycles:16, /* cycle count to last branch */
1423 type:4, /* branch type */
1424 spec:2, /* branch speculation info */
1425 new_type:4, /* additional branch type */
1426 priv:3, /* privilege level */
1427 reserved:31;
1428};
1429
1430union perf_sample_weight {
1431 __u64 full;
1432#if defined(__LITTLE_ENDIAN_BITFIELD)
1433 struct {
1434 __u32 var1_dw;
1435 __u16 var2_w;
1436 __u16 var3_w;
1437 };
1438#elif defined(__BIG_ENDIAN_BITFIELD)
1439 struct {
1440 __u16 var3_w;
1441 __u16 var2_w;
1442 __u32 var1_dw;
1443 };
1444#else
1445#error "Unknown endianness"
1446#endif
1447};
1448
1449#endif /* _UAPI_LINUX_PERF_EVENT_H */
1450

source code of linux/include/uapi/linux/perf_event.h