1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
3 | * |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of version 2 of the GNU General Public |
6 | * License as published by the Free Software Foundation. |
7 | */ |
8 | #ifndef __LINUX_BPF_H__ |
9 | #define __LINUX_BPF_H__ |
10 | |
11 | #include <linux/types.h> |
12 | #include <linux/bpf_common.h> |
13 | |
14 | /* Extended instruction set based on top of classic BPF */ |
15 | |
16 | /* instruction classes */ |
17 | #define BPF_JMP32 0x06 /* jmp mode in word width */ |
18 | #define BPF_ALU64 0x07 /* alu mode in double word width */ |
19 | |
20 | /* ld/ldx fields */ |
21 | #define BPF_DW 0x18 /* double word (64-bit) */ |
22 | #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ |
23 | #define BPF_XADD 0xc0 /* exclusive add - legacy name */ |
24 | |
25 | /* alu/jmp fields */ |
26 | #define BPF_MOV 0xb0 /* mov reg to reg */ |
27 | #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ |
28 | |
29 | /* change endianness of a register */ |
30 | #define BPF_END 0xd0 /* flags for endianness conversion: */ |
31 | #define BPF_TO_LE 0x00 /* convert to little-endian */ |
32 | #define BPF_TO_BE 0x08 /* convert to big-endian */ |
33 | #define BPF_FROM_LE BPF_TO_LE |
34 | #define BPF_FROM_BE BPF_TO_BE |
35 | |
36 | /* jmp encodings */ |
37 | #define BPF_JNE 0x50 /* jump != */ |
38 | #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ |
39 | #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ |
40 | #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ |
41 | #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ |
42 | #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ |
43 | #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ |
44 | #define BPF_CALL 0x80 /* function call */ |
45 | #define BPF_EXIT 0x90 /* function return */ |
46 | |
47 | /* atomic op type fields (stored in immediate) */ |
48 | #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ |
49 | #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ |
50 | #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ |
51 | |
52 | /* Register numbers */ |
53 | enum { |
54 | BPF_REG_0 = 0, |
55 | BPF_REG_1, |
56 | BPF_REG_2, |
57 | BPF_REG_3, |
58 | BPF_REG_4, |
59 | BPF_REG_5, |
60 | BPF_REG_6, |
61 | BPF_REG_7, |
62 | BPF_REG_8, |
63 | BPF_REG_9, |
64 | BPF_REG_10, |
65 | __MAX_BPF_REG, |
66 | }; |
67 | |
68 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ |
69 | #define MAX_BPF_REG __MAX_BPF_REG |
70 | |
71 | struct bpf_insn { |
72 | __u8 code; /* opcode */ |
73 | __u8 dst_reg:4; /* dest register */ |
74 | __u8 src_reg:4; /* source register */ |
75 | __s16 off; /* signed offset */ |
76 | __s32 imm; /* signed immediate constant */ |
77 | }; |
78 | |
79 | /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ |
80 | struct bpf_lpm_trie_key { |
81 | __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ |
82 | __u8 data[0]; /* Arbitrary size */ |
83 | }; |
84 | |
85 | struct bpf_cgroup_storage_key { |
86 | __u64 cgroup_inode_id; /* cgroup inode id */ |
87 | __u32 attach_type; /* program attach type (enum bpf_attach_type) */ |
88 | }; |
89 | |
90 | union bpf_iter_link_info { |
91 | struct { |
92 | __u32 map_fd; |
93 | } map; |
94 | }; |
95 | |
96 | /* BPF syscall commands, see bpf(2) man-page for more details. */ |
97 | /** |
98 | * DOC: eBPF Syscall Preamble |
99 | * |
100 | * The operation to be performed by the **bpf**\ () system call is determined |
101 | * by the *cmd* argument. Each operation takes an accompanying argument, |
102 | * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see |
103 | * below). The size argument is the size of the union pointed to by *attr*. |
104 | */ |
105 | /** |
106 | * DOC: eBPF Syscall Commands |
107 | * |
108 | * BPF_MAP_CREATE |
109 | * Description |
110 | * Create a map and return a file descriptor that refers to the |
111 | * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) |
112 | * is automatically enabled for the new file descriptor. |
113 | * |
114 | * Applying **close**\ (2) to the file descriptor returned by |
115 | * **BPF_MAP_CREATE** will delete the map (but see NOTES). |
116 | * |
117 | * Return |
118 | * A new file descriptor (a nonnegative integer), or -1 if an |
119 | * error occurred (in which case, *errno* is set appropriately). |
120 | * |
121 | * BPF_MAP_LOOKUP_ELEM |
122 | * Description |
123 | * Look up an element with a given *key* in the map referred to |
124 | * by the file descriptor *map_fd*. |
125 | * |
126 | * The *flags* argument may be specified as one of the |
127 | * following: |
128 | * |
129 | * **BPF_F_LOCK** |
130 | * Look up the value of a spin-locked map without |
131 | * returning the lock. This must be specified if the |
132 | * elements contain a spinlock. |
133 | * |
134 | * Return |
135 | * Returns zero on success. On error, -1 is returned and *errno* |
136 | * is set appropriately. |
137 | * |
138 | * BPF_MAP_UPDATE_ELEM |
139 | * Description |
140 | * Create or update an element (key/value pair) in a specified map. |
141 | * |
142 | * The *flags* argument should be specified as one of the |
143 | * following: |
144 | * |
145 | * **BPF_ANY** |
146 | * Create a new element or update an existing element. |
147 | * **BPF_NOEXIST** |
148 | * Create a new element only if it did not exist. |
149 | * **BPF_EXIST** |
150 | * Update an existing element. |
151 | * **BPF_F_LOCK** |
152 | * Update a spin_lock-ed map element. |
153 | * |
154 | * Return |
155 | * Returns zero on success. On error, -1 is returned and *errno* |
156 | * is set appropriately. |
157 | * |
158 | * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, |
159 | * **E2BIG**, **EEXIST**, or **ENOENT**. |
160 | * |
161 | * **E2BIG** |
162 | * The number of elements in the map reached the |
163 | * *max_entries* limit specified at map creation time. |
164 | * **EEXIST** |
165 | * If *flags* specifies **BPF_NOEXIST** and the element |
166 | * with *key* already exists in the map. |
167 | * **ENOENT** |
168 | * If *flags* specifies **BPF_EXIST** and the element with |
169 | * *key* does not exist in the map. |
170 | * |
171 | * BPF_MAP_DELETE_ELEM |
172 | * Description |
173 | * Look up and delete an element by key in a specified map. |
174 | * |
175 | * Return |
176 | * Returns zero on success. On error, -1 is returned and *errno* |
177 | * is set appropriately. |
178 | * |
179 | * BPF_MAP_GET_NEXT_KEY |
180 | * Description |
181 | * Look up an element by key in a specified map and return the key |
182 | * of the next element. Can be used to iterate over all elements |
183 | * in the map. |
184 | * |
185 | * Return |
186 | * Returns zero on success. On error, -1 is returned and *errno* |
187 | * is set appropriately. |
188 | * |
189 | * The following cases can be used to iterate over all elements of |
190 | * the map: |
191 | * |
192 | * * If *key* is not found, the operation returns zero and sets |
193 | * the *next_key* pointer to the key of the first element. |
194 | * * If *key* is found, the operation returns zero and sets the |
195 | * *next_key* pointer to the key of the next element. |
196 | * * If *key* is the last element, returns -1 and *errno* is set |
197 | * to **ENOENT**. |
198 | * |
199 | * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or |
200 | * **EINVAL** on error. |
201 | * |
202 | * BPF_PROG_LOAD |
203 | * Description |
204 | * Verify and load an eBPF program, returning a new file |
205 | * descriptor associated with the program. |
206 | * |
207 | * Applying **close**\ (2) to the file descriptor returned by |
208 | * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). |
209 | * |
210 | * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is |
211 | * automatically enabled for the new file descriptor. |
212 | * |
213 | * Return |
214 | * A new file descriptor (a nonnegative integer), or -1 if an |
215 | * error occurred (in which case, *errno* is set appropriately). |
216 | * |
217 | * BPF_OBJ_PIN |
218 | * Description |
219 | * Pin an eBPF program or map referred by the specified *bpf_fd* |
220 | * to the provided *pathname* on the filesystem. |
221 | * |
222 | * The *pathname* argument must not contain a dot ("."). |
223 | * |
224 | * On success, *pathname* retains a reference to the eBPF object, |
225 | * preventing deallocation of the object when the original |
226 | * *bpf_fd* is closed. This allow the eBPF object to live beyond |
227 | * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent |
228 | * process. |
229 | * |
230 | * Applying **unlink**\ (2) or similar calls to the *pathname* |
231 | * unpins the object from the filesystem, removing the reference. |
232 | * If no other file descriptors or filesystem nodes refer to the |
233 | * same object, it will be deallocated (see NOTES). |
234 | * |
235 | * The filesystem type for the parent directory of *pathname* must |
236 | * be **BPF_FS_MAGIC**. |
237 | * |
238 | * Return |
239 | * Returns zero on success. On error, -1 is returned and *errno* |
240 | * is set appropriately. |
241 | * |
242 | * BPF_OBJ_GET |
243 | * Description |
244 | * Open a file descriptor for the eBPF object pinned to the |
245 | * specified *pathname*. |
246 | * |
247 | * Return |
248 | * A new file descriptor (a nonnegative integer), or -1 if an |
249 | * error occurred (in which case, *errno* is set appropriately). |
250 | * |
251 | * BPF_PROG_ATTACH |
252 | * Description |
253 | * Attach an eBPF program to a *target_fd* at the specified |
254 | * *attach_type* hook. |
255 | * |
256 | * The *attach_type* specifies the eBPF attachment point to |
257 | * attach the program to, and must be one of *bpf_attach_type* |
258 | * (see below). |
259 | * |
260 | * The *attach_bpf_fd* must be a valid file descriptor for a |
261 | * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap |
262 | * or sock_ops type corresponding to the specified *attach_type*. |
263 | * |
264 | * The *target_fd* must be a valid file descriptor for a kernel |
265 | * object which depends on the attach type of *attach_bpf_fd*: |
266 | * |
267 | * **BPF_PROG_TYPE_CGROUP_DEVICE**, |
268 | * **BPF_PROG_TYPE_CGROUP_SKB**, |
269 | * **BPF_PROG_TYPE_CGROUP_SOCK**, |
270 | * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, |
271 | * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, |
272 | * **BPF_PROG_TYPE_CGROUP_SYSCTL**, |
273 | * **BPF_PROG_TYPE_SOCK_OPS** |
274 | * |
275 | * Control Group v2 hierarchy with the eBPF controller |
276 | * enabled. Requires the kernel to be compiled with |
277 | * **CONFIG_CGROUP_BPF**. |
278 | * |
279 | * **BPF_PROG_TYPE_FLOW_DISSECTOR** |
280 | * |
281 | * Network namespace (eg /proc/self/ns/net). |
282 | * |
283 | * **BPF_PROG_TYPE_LIRC_MODE2** |
284 | * |
285 | * LIRC device path (eg /dev/lircN). Requires the kernel |
286 | * to be compiled with **CONFIG_BPF_LIRC_MODE2**. |
287 | * |
288 | * **BPF_PROG_TYPE_SK_SKB**, |
289 | * **BPF_PROG_TYPE_SK_MSG** |
290 | * |
291 | * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). |
292 | * |
293 | * Return |
294 | * Returns zero on success. On error, -1 is returned and *errno* |
295 | * is set appropriately. |
296 | * |
297 | * BPF_PROG_DETACH |
298 | * Description |
299 | * Detach the eBPF program associated with the *target_fd* at the |
300 | * hook specified by *attach_type*. The program must have been |
301 | * previously attached using **BPF_PROG_ATTACH**. |
302 | * |
303 | * Return |
304 | * Returns zero on success. On error, -1 is returned and *errno* |
305 | * is set appropriately. |
306 | * |
307 | * BPF_PROG_TEST_RUN |
308 | * Description |
309 | * Run the eBPF program associated with the *prog_fd* a *repeat* |
310 | * number of times against a provided program context *ctx_in* and |
311 | * data *data_in*, and return the modified program context |
312 | * *ctx_out*, *data_out* (for example, packet data), result of the |
313 | * execution *retval*, and *duration* of the test run. |
314 | * |
315 | * The sizes of the buffers provided as input and output |
316 | * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must |
317 | * be provided in the corresponding variables *ctx_size_in*, |
318 | * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any |
319 | * of these parameters are not provided (ie set to NULL), the |
320 | * corresponding size field must be zero. |
321 | * |
322 | * Some program types have particular requirements: |
323 | * |
324 | * **BPF_PROG_TYPE_SK_LOOKUP** |
325 | * *data_in* and *data_out* must be NULL. |
326 | * |
327 | * **BPF_PROG_TYPE_RAW_TRACEPOINT**, |
328 | * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** |
329 | * |
330 | * *ctx_out*, *data_in* and *data_out* must be NULL. |
331 | * *repeat* must be zero. |
332 | * |
333 | * Return |
334 | * Returns zero on success. On error, -1 is returned and *errno* |
335 | * is set appropriately. |
336 | * |
337 | * **ENOSPC** |
338 | * Either *data_size_out* or *ctx_size_out* is too small. |
339 | * **ENOTSUPP** |
340 | * This command is not supported by the program type of |
341 | * the program referred to by *prog_fd*. |
342 | * |
343 | * BPF_PROG_GET_NEXT_ID |
344 | * Description |
345 | * Fetch the next eBPF program currently loaded into the kernel. |
346 | * |
347 | * Looks for the eBPF program with an id greater than *start_id* |
348 | * and updates *next_id* on success. If no other eBPF programs |
349 | * remain with ids higher than *start_id*, returns -1 and sets |
350 | * *errno* to **ENOENT**. |
351 | * |
352 | * Return |
353 | * Returns zero on success. On error, or when no id remains, -1 |
354 | * is returned and *errno* is set appropriately. |
355 | * |
356 | * BPF_MAP_GET_NEXT_ID |
357 | * Description |
358 | * Fetch the next eBPF map currently loaded into the kernel. |
359 | * |
360 | * Looks for the eBPF map with an id greater than *start_id* |
361 | * and updates *next_id* on success. If no other eBPF maps |
362 | * remain with ids higher than *start_id*, returns -1 and sets |
363 | * *errno* to **ENOENT**. |
364 | * |
365 | * Return |
366 | * Returns zero on success. On error, or when no id remains, -1 |
367 | * is returned and *errno* is set appropriately. |
368 | * |
369 | * BPF_PROG_GET_FD_BY_ID |
370 | * Description |
371 | * Open a file descriptor for the eBPF program corresponding to |
372 | * *prog_id*. |
373 | * |
374 | * Return |
375 | * A new file descriptor (a nonnegative integer), or -1 if an |
376 | * error occurred (in which case, *errno* is set appropriately). |
377 | * |
378 | * BPF_MAP_GET_FD_BY_ID |
379 | * Description |
380 | * Open a file descriptor for the eBPF map corresponding to |
381 | * *map_id*. |
382 | * |
383 | * Return |
384 | * A new file descriptor (a nonnegative integer), or -1 if an |
385 | * error occurred (in which case, *errno* is set appropriately). |
386 | * |
387 | * BPF_OBJ_GET_INFO_BY_FD |
388 | * Description |
389 | * Obtain information about the eBPF object corresponding to |
390 | * *bpf_fd*. |
391 | * |
392 | * Populates up to *info_len* bytes of *info*, which will be in |
393 | * one of the following formats depending on the eBPF object type |
394 | * of *bpf_fd*: |
395 | * |
396 | * * **struct bpf_prog_info** |
397 | * * **struct bpf_map_info** |
398 | * * **struct bpf_btf_info** |
399 | * * **struct bpf_link_info** |
400 | * |
401 | * Return |
402 | * Returns zero on success. On error, -1 is returned and *errno* |
403 | * is set appropriately. |
404 | * |
405 | * BPF_PROG_QUERY |
406 | * Description |
407 | * Obtain information about eBPF programs associated with the |
408 | * specified *attach_type* hook. |
409 | * |
410 | * The *target_fd* must be a valid file descriptor for a kernel |
411 | * object which depends on the attach type of *attach_bpf_fd*: |
412 | * |
413 | * **BPF_PROG_TYPE_CGROUP_DEVICE**, |
414 | * **BPF_PROG_TYPE_CGROUP_SKB**, |
415 | * **BPF_PROG_TYPE_CGROUP_SOCK**, |
416 | * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, |
417 | * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, |
418 | * **BPF_PROG_TYPE_CGROUP_SYSCTL**, |
419 | * **BPF_PROG_TYPE_SOCK_OPS** |
420 | * |
421 | * Control Group v2 hierarchy with the eBPF controller |
422 | * enabled. Requires the kernel to be compiled with |
423 | * **CONFIG_CGROUP_BPF**. |
424 | * |
425 | * **BPF_PROG_TYPE_FLOW_DISSECTOR** |
426 | * |
427 | * Network namespace (eg /proc/self/ns/net). |
428 | * |
429 | * **BPF_PROG_TYPE_LIRC_MODE2** |
430 | * |
431 | * LIRC device path (eg /dev/lircN). Requires the kernel |
432 | * to be compiled with **CONFIG_BPF_LIRC_MODE2**. |
433 | * |
434 | * **BPF_PROG_QUERY** always fetches the number of programs |
435 | * attached and the *attach_flags* which were used to attach those |
436 | * programs. Additionally, if *prog_ids* is nonzero and the number |
437 | * of attached programs is less than *prog_cnt*, populates |
438 | * *prog_ids* with the eBPF program ids of the programs attached |
439 | * at *target_fd*. |
440 | * |
441 | * The following flags may alter the result: |
442 | * |
443 | * **BPF_F_QUERY_EFFECTIVE** |
444 | * Only return information regarding programs which are |
445 | * currently effective at the specified *target_fd*. |
446 | * |
447 | * Return |
448 | * Returns zero on success. On error, -1 is returned and *errno* |
449 | * is set appropriately. |
450 | * |
451 | * BPF_RAW_TRACEPOINT_OPEN |
452 | * Description |
453 | * Attach an eBPF program to a tracepoint *name* to access kernel |
454 | * internal arguments of the tracepoint in their raw form. |
455 | * |
456 | * The *prog_fd* must be a valid file descriptor associated with |
457 | * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. |
458 | * |
459 | * No ABI guarantees are made about the content of tracepoint |
460 | * arguments exposed to the corresponding eBPF program. |
461 | * |
462 | * Applying **close**\ (2) to the file descriptor returned by |
463 | * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). |
464 | * |
465 | * Return |
466 | * A new file descriptor (a nonnegative integer), or -1 if an |
467 | * error occurred (in which case, *errno* is set appropriately). |
468 | * |
469 | * BPF_BTF_LOAD |
470 | * Description |
471 | * Verify and load BPF Type Format (BTF) metadata into the kernel, |
472 | * returning a new file descriptor associated with the metadata. |
473 | * BTF is described in more detail at |
474 | * https://www.kernel.org/doc/html/latest/bpf/btf.html. |
475 | * |
476 | * The *btf* parameter must point to valid memory providing |
477 | * *btf_size* bytes of BTF binary metadata. |
478 | * |
479 | * The returned file descriptor can be passed to other **bpf**\ () |
480 | * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to |
481 | * associate the BTF with those objects. |
482 | * |
483 | * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional |
484 | * parameters to specify a *btf_log_buf*, *btf_log_size* and |
485 | * *btf_log_level* which allow the kernel to return freeform log |
486 | * output regarding the BTF verification process. |
487 | * |
488 | * Return |
489 | * A new file descriptor (a nonnegative integer), or -1 if an |
490 | * error occurred (in which case, *errno* is set appropriately). |
491 | * |
492 | * BPF_BTF_GET_FD_BY_ID |
493 | * Description |
494 | * Open a file descriptor for the BPF Type Format (BTF) |
495 | * corresponding to *btf_id*. |
496 | * |
497 | * Return |
498 | * A new file descriptor (a nonnegative integer), or -1 if an |
499 | * error occurred (in which case, *errno* is set appropriately). |
500 | * |
501 | * BPF_TASK_FD_QUERY |
502 | * Description |
503 | * Obtain information about eBPF programs associated with the |
504 | * target process identified by *pid* and *fd*. |
505 | * |
506 | * If the *pid* and *fd* are associated with a tracepoint, kprobe |
507 | * or uprobe perf event, then the *prog_id* and *fd_type* will |
508 | * be populated with the eBPF program id and file descriptor type |
509 | * of type **bpf_task_fd_type**. If associated with a kprobe or |
510 | * uprobe, the *probe_offset* and *probe_addr* will also be |
511 | * populated. Optionally, if *buf* is provided, then up to |
512 | * *buf_len* bytes of *buf* will be populated with the name of |
513 | * the tracepoint, kprobe or uprobe. |
514 | * |
515 | * The resulting *prog_id* may be introspected in deeper detail |
516 | * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. |
517 | * |
518 | * Return |
519 | * Returns zero on success. On error, -1 is returned and *errno* |
520 | * is set appropriately. |
521 | * |
522 | * BPF_MAP_LOOKUP_AND_DELETE_ELEM |
523 | * Description |
524 | * Look up an element with the given *key* in the map referred to |
525 | * by the file descriptor *fd*, and if found, delete the element. |
526 | * |
527 | * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map |
528 | * types, the *flags* argument needs to be set to 0, but for other |
529 | * map types, it may be specified as: |
530 | * |
531 | * **BPF_F_LOCK** |
532 | * Look up and delete the value of a spin-locked map |
533 | * without returning the lock. This must be specified if |
534 | * the elements contain a spinlock. |
535 | * |
536 | * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types |
537 | * implement this command as a "pop" operation, deleting the top |
538 | * element rather than one corresponding to *key*. |
539 | * The *key* and *key_len* parameters should be zeroed when |
540 | * issuing this operation for these map types. |
541 | * |
542 | * This command is only valid for the following map types: |
543 | * * **BPF_MAP_TYPE_QUEUE** |
544 | * * **BPF_MAP_TYPE_STACK** |
545 | * * **BPF_MAP_TYPE_HASH** |
546 | * * **BPF_MAP_TYPE_PERCPU_HASH** |
547 | * * **BPF_MAP_TYPE_LRU_HASH** |
548 | * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** |
549 | * |
550 | * Return |
551 | * Returns zero on success. On error, -1 is returned and *errno* |
552 | * is set appropriately. |
553 | * |
554 | * BPF_MAP_FREEZE |
555 | * Description |
556 | * Freeze the permissions of the specified map. |
557 | * |
558 | * Write permissions may be frozen by passing zero *flags*. |
559 | * Upon success, no future syscall invocations may alter the |
560 | * map state of *map_fd*. Write operations from eBPF programs |
561 | * are still possible for a frozen map. |
562 | * |
563 | * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. |
564 | * |
565 | * Return |
566 | * Returns zero on success. On error, -1 is returned and *errno* |
567 | * is set appropriately. |
568 | * |
569 | * BPF_BTF_GET_NEXT_ID |
570 | * Description |
571 | * Fetch the next BPF Type Format (BTF) object currently loaded |
572 | * into the kernel. |
573 | * |
574 | * Looks for the BTF object with an id greater than *start_id* |
575 | * and updates *next_id* on success. If no other BTF objects |
576 | * remain with ids higher than *start_id*, returns -1 and sets |
577 | * *errno* to **ENOENT**. |
578 | * |
579 | * Return |
580 | * Returns zero on success. On error, or when no id remains, -1 |
581 | * is returned and *errno* is set appropriately. |
582 | * |
583 | * BPF_MAP_LOOKUP_BATCH |
584 | * Description |
585 | * Iterate and fetch multiple elements in a map. |
586 | * |
587 | * Two opaque values are used to manage batch operations, |
588 | * *in_batch* and *out_batch*. Initially, *in_batch* must be set |
589 | * to NULL to begin the batched operation. After each subsequent |
590 | * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant |
591 | * *out_batch* as the *in_batch* for the next operation to |
592 | * continue iteration from the current point. |
593 | * |
594 | * The *keys* and *values* are output parameters which must point |
595 | * to memory large enough to hold *count* items based on the key |
596 | * and value size of the map *map_fd*. The *keys* buffer must be |
597 | * of *key_size* * *count*. The *values* buffer must be of |
598 | * *value_size* * *count*. |
599 | * |
600 | * The *elem_flags* argument may be specified as one of the |
601 | * following: |
602 | * |
603 | * **BPF_F_LOCK** |
604 | * Look up the value of a spin-locked map without |
605 | * returning the lock. This must be specified if the |
606 | * elements contain a spinlock. |
607 | * |
608 | * On success, *count* elements from the map are copied into the |
609 | * user buffer, with the keys copied into *keys* and the values |
610 | * copied into the corresponding indices in *values*. |
611 | * |
612 | * If an error is returned and *errno* is not **EFAULT**, *count* |
613 | * is set to the number of successfully processed elements. |
614 | * |
615 | * Return |
616 | * Returns zero on success. On error, -1 is returned and *errno* |
617 | * is set appropriately. |
618 | * |
619 | * May set *errno* to **ENOSPC** to indicate that *keys* or |
620 | * *values* is too small to dump an entire bucket during |
621 | * iteration of a hash-based map type. |
622 | * |
623 | * BPF_MAP_LOOKUP_AND_DELETE_BATCH |
624 | * Description |
625 | * Iterate and delete all elements in a map. |
626 | * |
627 | * This operation has the same behavior as |
628 | * **BPF_MAP_LOOKUP_BATCH** with two exceptions: |
629 | * |
630 | * * Every element that is successfully returned is also deleted |
631 | * from the map. This is at least *count* elements. Note that |
632 | * *count* is both an input and an output parameter. |
633 | * * Upon returning with *errno* set to **EFAULT**, up to |
634 | * *count* elements may be deleted without returning the keys |
635 | * and values of the deleted elements. |
636 | * |
637 | * Return |
638 | * Returns zero on success. On error, -1 is returned and *errno* |
639 | * is set appropriately. |
640 | * |
641 | * BPF_MAP_UPDATE_BATCH |
642 | * Description |
643 | * Update multiple elements in a map by *key*. |
644 | * |
645 | * The *keys* and *values* are input parameters which must point |
646 | * to memory large enough to hold *count* items based on the key |
647 | * and value size of the map *map_fd*. The *keys* buffer must be |
648 | * of *key_size* * *count*. The *values* buffer must be of |
649 | * *value_size* * *count*. |
650 | * |
651 | * Each element specified in *keys* is sequentially updated to the |
652 | * value in the corresponding index in *values*. The *in_batch* |
653 | * and *out_batch* parameters are ignored and should be zeroed. |
654 | * |
655 | * The *elem_flags* argument should be specified as one of the |
656 | * following: |
657 | * |
658 | * **BPF_ANY** |
659 | * Create new elements or update a existing elements. |
660 | * **BPF_NOEXIST** |
661 | * Create new elements only if they do not exist. |
662 | * **BPF_EXIST** |
663 | * Update existing elements. |
664 | * **BPF_F_LOCK** |
665 | * Update spin_lock-ed map elements. This must be |
666 | * specified if the map value contains a spinlock. |
667 | * |
668 | * On success, *count* elements from the map are updated. |
669 | * |
670 | * If an error is returned and *errno* is not **EFAULT**, *count* |
671 | * is set to the number of successfully processed elements. |
672 | * |
673 | * Return |
674 | * Returns zero on success. On error, -1 is returned and *errno* |
675 | * is set appropriately. |
676 | * |
677 | * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or |
678 | * **E2BIG**. **E2BIG** indicates that the number of elements in |
679 | * the map reached the *max_entries* limit specified at map |
680 | * creation time. |
681 | * |
682 | * May set *errno* to one of the following error codes under |
683 | * specific circumstances: |
684 | * |
685 | * **EEXIST** |
686 | * If *flags* specifies **BPF_NOEXIST** and the element |
687 | * with *key* already exists in the map. |
688 | * **ENOENT** |
689 | * If *flags* specifies **BPF_EXIST** and the element with |
690 | * *key* does not exist in the map. |
691 | * |
692 | * BPF_MAP_DELETE_BATCH |
693 | * Description |
694 | * Delete multiple elements in a map by *key*. |
695 | * |
696 | * The *keys* parameter is an input parameter which must point |
697 | * to memory large enough to hold *count* items based on the key |
698 | * size of the map *map_fd*, that is, *key_size* * *count*. |
699 | * |
700 | * Each element specified in *keys* is sequentially deleted. The |
701 | * *in_batch*, *out_batch*, and *values* parameters are ignored |
702 | * and should be zeroed. |
703 | * |
704 | * The *elem_flags* argument may be specified as one of the |
705 | * following: |
706 | * |
707 | * **BPF_F_LOCK** |
708 | * Look up the value of a spin-locked map without |
709 | * returning the lock. This must be specified if the |
710 | * elements contain a spinlock. |
711 | * |
712 | * On success, *count* elements from the map are updated. |
713 | * |
714 | * If an error is returned and *errno* is not **EFAULT**, *count* |
715 | * is set to the number of successfully processed elements. If |
716 | * *errno* is **EFAULT**, up to *count* elements may be been |
717 | * deleted. |
718 | * |
719 | * Return |
720 | * Returns zero on success. On error, -1 is returned and *errno* |
721 | * is set appropriately. |
722 | * |
723 | * BPF_LINK_CREATE |
724 | * Description |
725 | * Attach an eBPF program to a *target_fd* at the specified |
726 | * *attach_type* hook and return a file descriptor handle for |
727 | * managing the link. |
728 | * |
729 | * Return |
730 | * A new file descriptor (a nonnegative integer), or -1 if an |
731 | * error occurred (in which case, *errno* is set appropriately). |
732 | * |
733 | * BPF_LINK_UPDATE |
734 | * Description |
735 | * Update the eBPF program in the specified *link_fd* to |
736 | * *new_prog_fd*. |
737 | * |
738 | * Return |
739 | * Returns zero on success. On error, -1 is returned and *errno* |
740 | * is set appropriately. |
741 | * |
742 | * BPF_LINK_GET_FD_BY_ID |
743 | * Description |
744 | * Open a file descriptor for the eBPF Link corresponding to |
745 | * *link_id*. |
746 | * |
747 | * Return |
748 | * A new file descriptor (a nonnegative integer), or -1 if an |
749 | * error occurred (in which case, *errno* is set appropriately). |
750 | * |
751 | * BPF_LINK_GET_NEXT_ID |
752 | * Description |
753 | * Fetch the next eBPF link currently loaded into the kernel. |
754 | * |
755 | * Looks for the eBPF link with an id greater than *start_id* |
756 | * and updates *next_id* on success. If no other eBPF links |
757 | * remain with ids higher than *start_id*, returns -1 and sets |
758 | * *errno* to **ENOENT**. |
759 | * |
760 | * Return |
761 | * Returns zero on success. On error, or when no id remains, -1 |
762 | * is returned and *errno* is set appropriately. |
763 | * |
764 | * BPF_ENABLE_STATS |
765 | * Description |
766 | * Enable eBPF runtime statistics gathering. |
767 | * |
768 | * Runtime statistics gathering for the eBPF runtime is disabled |
769 | * by default to minimize the corresponding performance overhead. |
770 | * This command enables statistics globally. |
771 | * |
772 | * Multiple programs may independently enable statistics. |
773 | * After gathering the desired statistics, eBPF runtime statistics |
774 | * may be disabled again by calling **close**\ (2) for the file |
775 | * descriptor returned by this function. Statistics will only be |
776 | * disabled system-wide when all outstanding file descriptors |
777 | * returned by prior calls for this subcommand are closed. |
778 | * |
779 | * Return |
780 | * A new file descriptor (a nonnegative integer), or -1 if an |
781 | * error occurred (in which case, *errno* is set appropriately). |
782 | * |
783 | * BPF_ITER_CREATE |
784 | * Description |
785 | * Create an iterator on top of the specified *link_fd* (as |
786 | * previously created using **BPF_LINK_CREATE**) and return a |
787 | * file descriptor that can be used to trigger the iteration. |
788 | * |
789 | * If the resulting file descriptor is pinned to the filesystem |
790 | * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls |
791 | * for that path will trigger the iterator to read kernel state |
792 | * using the eBPF program attached to *link_fd*. |
793 | * |
794 | * Return |
795 | * A new file descriptor (a nonnegative integer), or -1 if an |
796 | * error occurred (in which case, *errno* is set appropriately). |
797 | * |
798 | * BPF_LINK_DETACH |
799 | * Description |
800 | * Forcefully detach the specified *link_fd* from its |
801 | * corresponding attachment point. |
802 | * |
803 | * Return |
804 | * Returns zero on success. On error, -1 is returned and *errno* |
805 | * is set appropriately. |
806 | * |
807 | * BPF_PROG_BIND_MAP |
808 | * Description |
809 | * Bind a map to the lifetime of an eBPF program. |
810 | * |
811 | * The map identified by *map_fd* is bound to the program |
812 | * identified by *prog_fd* and only released when *prog_fd* is |
813 | * released. This may be used in cases where metadata should be |
814 | * associated with a program which otherwise does not contain any |
815 | * references to the map (for example, embedded in the eBPF |
816 | * program instructions). |
817 | * |
818 | * Return |
819 | * Returns zero on success. On error, -1 is returned and *errno* |
820 | * is set appropriately. |
821 | * |
822 | * NOTES |
823 | * eBPF objects (maps and programs) can be shared between processes. |
824 | * |
825 | * * After **fork**\ (2), the child inherits file descriptors |
826 | * referring to the same eBPF objects. |
827 | * * File descriptors referring to eBPF objects can be transferred over |
828 | * **unix**\ (7) domain sockets. |
829 | * * File descriptors referring to eBPF objects can be duplicated in the |
830 | * usual way, using **dup**\ (2) and similar calls. |
831 | * * File descriptors referring to eBPF objects can be pinned to the |
832 | * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). |
833 | * |
834 | * An eBPF object is deallocated only after all file descriptors referring |
835 | * to the object have been closed and no references remain pinned to the |
836 | * filesystem or attached (for example, bound to a program or device). |
837 | */ |
838 | enum bpf_cmd { |
839 | BPF_MAP_CREATE, |
840 | BPF_MAP_LOOKUP_ELEM, |
841 | BPF_MAP_UPDATE_ELEM, |
842 | BPF_MAP_DELETE_ELEM, |
843 | BPF_MAP_GET_NEXT_KEY, |
844 | BPF_PROG_LOAD, |
845 | BPF_OBJ_PIN, |
846 | BPF_OBJ_GET, |
847 | BPF_PROG_ATTACH, |
848 | BPF_PROG_DETACH, |
849 | BPF_PROG_TEST_RUN, |
850 | BPF_PROG_RUN = BPF_PROG_TEST_RUN, |
851 | BPF_PROG_GET_NEXT_ID, |
852 | BPF_MAP_GET_NEXT_ID, |
853 | BPF_PROG_GET_FD_BY_ID, |
854 | BPF_MAP_GET_FD_BY_ID, |
855 | BPF_OBJ_GET_INFO_BY_FD, |
856 | BPF_PROG_QUERY, |
857 | BPF_RAW_TRACEPOINT_OPEN, |
858 | BPF_BTF_LOAD, |
859 | BPF_BTF_GET_FD_BY_ID, |
860 | BPF_TASK_FD_QUERY, |
861 | BPF_MAP_LOOKUP_AND_DELETE_ELEM, |
862 | BPF_MAP_FREEZE, |
863 | BPF_BTF_GET_NEXT_ID, |
864 | BPF_MAP_LOOKUP_BATCH, |
865 | BPF_MAP_LOOKUP_AND_DELETE_BATCH, |
866 | BPF_MAP_UPDATE_BATCH, |
867 | BPF_MAP_DELETE_BATCH, |
868 | BPF_LINK_CREATE, |
869 | BPF_LINK_UPDATE, |
870 | BPF_LINK_GET_FD_BY_ID, |
871 | BPF_LINK_GET_NEXT_ID, |
872 | BPF_ENABLE_STATS, |
873 | BPF_ITER_CREATE, |
874 | BPF_LINK_DETACH, |
875 | BPF_PROG_BIND_MAP, |
876 | }; |
877 | |
878 | enum bpf_map_type { |
879 | BPF_MAP_TYPE_UNSPEC, |
880 | BPF_MAP_TYPE_HASH, |
881 | BPF_MAP_TYPE_ARRAY, |
882 | BPF_MAP_TYPE_PROG_ARRAY, |
883 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
884 | BPF_MAP_TYPE_PERCPU_HASH, |
885 | BPF_MAP_TYPE_PERCPU_ARRAY, |
886 | BPF_MAP_TYPE_STACK_TRACE, |
887 | BPF_MAP_TYPE_CGROUP_ARRAY, |
888 | BPF_MAP_TYPE_LRU_HASH, |
889 | BPF_MAP_TYPE_LRU_PERCPU_HASH, |
890 | BPF_MAP_TYPE_LPM_TRIE, |
891 | BPF_MAP_TYPE_ARRAY_OF_MAPS, |
892 | BPF_MAP_TYPE_HASH_OF_MAPS, |
893 | BPF_MAP_TYPE_DEVMAP, |
894 | BPF_MAP_TYPE_SOCKMAP, |
895 | BPF_MAP_TYPE_CPUMAP, |
896 | BPF_MAP_TYPE_XSKMAP, |
897 | BPF_MAP_TYPE_SOCKHASH, |
898 | BPF_MAP_TYPE_CGROUP_STORAGE, |
899 | BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, |
900 | BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, |
901 | BPF_MAP_TYPE_QUEUE, |
902 | BPF_MAP_TYPE_STACK, |
903 | BPF_MAP_TYPE_SK_STORAGE, |
904 | BPF_MAP_TYPE_DEVMAP_HASH, |
905 | BPF_MAP_TYPE_STRUCT_OPS, |
906 | BPF_MAP_TYPE_RINGBUF, |
907 | BPF_MAP_TYPE_INODE_STORAGE, |
908 | BPF_MAP_TYPE_TASK_STORAGE, |
909 | }; |
910 | |
911 | /* Note that tracing related programs such as |
912 | * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} |
913 | * are not subject to a stable API since kernel internal data |
914 | * structures can change from release to release and may |
915 | * therefore break existing tracing BPF programs. Tracing BPF |
916 | * programs correspond to /a/ specific kernel which is to be |
917 | * analyzed, and not /a/ specific kernel /and/ all future ones. |
918 | */ |
919 | enum bpf_prog_type { |
920 | BPF_PROG_TYPE_UNSPEC, |
921 | BPF_PROG_TYPE_SOCKET_FILTER, |
922 | BPF_PROG_TYPE_KPROBE, |
923 | BPF_PROG_TYPE_SCHED_CLS, |
924 | BPF_PROG_TYPE_SCHED_ACT, |
925 | BPF_PROG_TYPE_TRACEPOINT, |
926 | BPF_PROG_TYPE_XDP, |
927 | BPF_PROG_TYPE_PERF_EVENT, |
928 | BPF_PROG_TYPE_CGROUP_SKB, |
929 | BPF_PROG_TYPE_CGROUP_SOCK, |
930 | BPF_PROG_TYPE_LWT_IN, |
931 | BPF_PROG_TYPE_LWT_OUT, |
932 | BPF_PROG_TYPE_LWT_XMIT, |
933 | BPF_PROG_TYPE_SOCK_OPS, |
934 | BPF_PROG_TYPE_SK_SKB, |
935 | BPF_PROG_TYPE_CGROUP_DEVICE, |
936 | BPF_PROG_TYPE_SK_MSG, |
937 | BPF_PROG_TYPE_RAW_TRACEPOINT, |
938 | BPF_PROG_TYPE_CGROUP_SOCK_ADDR, |
939 | BPF_PROG_TYPE_LWT_SEG6LOCAL, |
940 | BPF_PROG_TYPE_LIRC_MODE2, |
941 | BPF_PROG_TYPE_SK_REUSEPORT, |
942 | BPF_PROG_TYPE_FLOW_DISSECTOR, |
943 | BPF_PROG_TYPE_CGROUP_SYSCTL, |
944 | BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, |
945 | BPF_PROG_TYPE_CGROUP_SOCKOPT, |
946 | BPF_PROG_TYPE_TRACING, |
947 | BPF_PROG_TYPE_STRUCT_OPS, |
948 | BPF_PROG_TYPE_EXT, |
949 | BPF_PROG_TYPE_LSM, |
950 | BPF_PROG_TYPE_SK_LOOKUP, |
951 | BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ |
952 | }; |
953 | |
954 | enum bpf_attach_type { |
955 | BPF_CGROUP_INET_INGRESS, |
956 | BPF_CGROUP_INET_EGRESS, |
957 | BPF_CGROUP_INET_SOCK_CREATE, |
958 | BPF_CGROUP_SOCK_OPS, |
959 | BPF_SK_SKB_STREAM_PARSER, |
960 | BPF_SK_SKB_STREAM_VERDICT, |
961 | BPF_CGROUP_DEVICE, |
962 | BPF_SK_MSG_VERDICT, |
963 | BPF_CGROUP_INET4_BIND, |
964 | BPF_CGROUP_INET6_BIND, |
965 | BPF_CGROUP_INET4_CONNECT, |
966 | BPF_CGROUP_INET6_CONNECT, |
967 | BPF_CGROUP_INET4_POST_BIND, |
968 | BPF_CGROUP_INET6_POST_BIND, |
969 | BPF_CGROUP_UDP4_SENDMSG, |
970 | BPF_CGROUP_UDP6_SENDMSG, |
971 | BPF_LIRC_MODE2, |
972 | BPF_FLOW_DISSECTOR, |
973 | BPF_CGROUP_SYSCTL, |
974 | BPF_CGROUP_UDP4_RECVMSG, |
975 | BPF_CGROUP_UDP6_RECVMSG, |
976 | BPF_CGROUP_GETSOCKOPT, |
977 | BPF_CGROUP_SETSOCKOPT, |
978 | BPF_TRACE_RAW_TP, |
979 | BPF_TRACE_FENTRY, |
980 | BPF_TRACE_FEXIT, |
981 | BPF_MODIFY_RETURN, |
982 | BPF_LSM_MAC, |
983 | BPF_TRACE_ITER, |
984 | BPF_CGROUP_INET4_GETPEERNAME, |
985 | BPF_CGROUP_INET6_GETPEERNAME, |
986 | BPF_CGROUP_INET4_GETSOCKNAME, |
987 | BPF_CGROUP_INET6_GETSOCKNAME, |
988 | BPF_XDP_DEVMAP, |
989 | BPF_CGROUP_INET_SOCK_RELEASE, |
990 | BPF_XDP_CPUMAP, |
991 | BPF_SK_LOOKUP, |
992 | BPF_XDP, |
993 | BPF_SK_SKB_VERDICT, |
994 | BPF_SK_REUSEPORT_SELECT, |
995 | BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, |
996 | BPF_PERF_EVENT, |
997 | __MAX_BPF_ATTACH_TYPE |
998 | }; |
999 | |
1000 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE |
1001 | |
1002 | enum bpf_link_type { |
1003 | BPF_LINK_TYPE_UNSPEC = 0, |
1004 | BPF_LINK_TYPE_RAW_TRACEPOINT = 1, |
1005 | BPF_LINK_TYPE_TRACING = 2, |
1006 | BPF_LINK_TYPE_CGROUP = 3, |
1007 | BPF_LINK_TYPE_ITER = 4, |
1008 | BPF_LINK_TYPE_NETNS = 5, |
1009 | BPF_LINK_TYPE_XDP = 6, |
1010 | BPF_LINK_TYPE_PERF_EVENT = 7, |
1011 | |
1012 | MAX_BPF_LINK_TYPE, |
1013 | }; |
1014 | |
1015 | /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command |
1016 | * |
1017 | * NONE(default): No further bpf programs allowed in the subtree. |
1018 | * |
1019 | * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, |
1020 | * the program in this cgroup yields to sub-cgroup program. |
1021 | * |
1022 | * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, |
1023 | * that cgroup program gets run in addition to the program in this cgroup. |
1024 | * |
1025 | * Only one program is allowed to be attached to a cgroup with |
1026 | * NONE or BPF_F_ALLOW_OVERRIDE flag. |
1027 | * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will |
1028 | * release old program and attach the new one. Attach flags has to match. |
1029 | * |
1030 | * Multiple programs are allowed to be attached to a cgroup with |
1031 | * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order |
1032 | * (those that were attached first, run first) |
1033 | * The programs of sub-cgroup are executed first, then programs of |
1034 | * this cgroup and then programs of parent cgroup. |
1035 | * When children program makes decision (like picking TCP CA or sock bind) |
1036 | * parent program has a chance to override it. |
1037 | * |
1038 | * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of |
1039 | * programs for a cgroup. Though it's possible to replace an old program at |
1040 | * any position by also specifying BPF_F_REPLACE flag and position itself in |
1041 | * replace_bpf_fd attribute. Old program at this position will be released. |
1042 | * |
1043 | * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. |
1044 | * A cgroup with NONE doesn't allow any programs in sub-cgroups. |
1045 | * Ex1: |
1046 | * cgrp1 (MULTI progs A, B) -> |
1047 | * cgrp2 (OVERRIDE prog C) -> |
1048 | * cgrp3 (MULTI prog D) -> |
1049 | * cgrp4 (OVERRIDE prog E) -> |
1050 | * cgrp5 (NONE prog F) |
1051 | * the event in cgrp5 triggers execution of F,D,A,B in that order. |
1052 | * if prog F is detached, the execution is E,D,A,B |
1053 | * if prog F and D are detached, the execution is E,A,B |
1054 | * if prog F, E and D are detached, the execution is C,A,B |
1055 | * |
1056 | * All eligible programs are executed regardless of return code from |
1057 | * earlier programs. |
1058 | */ |
1059 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
1060 | #define BPF_F_ALLOW_MULTI (1U << 1) |
1061 | #define BPF_F_REPLACE (1U << 2) |
1062 | |
1063 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the |
1064 | * verifier will perform strict alignment checking as if the kernel |
1065 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, |
1066 | * and NET_IP_ALIGN defined to 2. |
1067 | */ |
1068 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) |
1069 | |
1070 | /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the |
1071 | * verifier will allow any alignment whatsoever. On platforms |
1072 | * with strict alignment requirements for loads ands stores (such |
1073 | * as sparc and mips) the verifier validates that all loads and |
1074 | * stores provably follow this requirement. This flag turns that |
1075 | * checking and enforcement off. |
1076 | * |
1077 | * It is mostly used for testing when we want to validate the |
1078 | * context and memory access aspects of the verifier, but because |
1079 | * of an unaligned access the alignment check would trigger before |
1080 | * the one we are interested in. |
1081 | */ |
1082 | #define BPF_F_ANY_ALIGNMENT (1U << 1) |
1083 | |
1084 | /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. |
1085 | * Verifier does sub-register def/use analysis and identifies instructions whose |
1086 | * def only matters for low 32-bit, high 32-bit is never referenced later |
1087 | * through implicit zero extension. Therefore verifier notifies JIT back-ends |
1088 | * that it is safe to ignore clearing high 32-bit for these instructions. This |
1089 | * saves some back-ends a lot of code-gen. However such optimization is not |
1090 | * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends |
1091 | * hence hasn't used verifier's analysis result. But, we really want to have a |
1092 | * way to be able to verify the correctness of the described optimization on |
1093 | * x86_64 on which testsuites are frequently exercised. |
1094 | * |
1095 | * So, this flag is introduced. Once it is set, verifier will randomize high |
1096 | * 32-bit for those instructions who has been identified as safe to ignore them. |
1097 | * Then, if verifier is not doing correct analysis, such randomization will |
1098 | * regress tests to expose bugs. |
1099 | */ |
1100 | #define BPF_F_TEST_RND_HI32 (1U << 2) |
1101 | |
1102 | /* The verifier internal test flag. Behavior is undefined */ |
1103 | #define BPF_F_TEST_STATE_FREQ (1U << 3) |
1104 | |
1105 | /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will |
1106 | * restrict map and helper usage for such programs. Sleepable BPF programs can |
1107 | * only be attached to hooks where kernel execution context allows sleeping. |
1108 | * Such programs are allowed to use helpers that may sleep like |
1109 | * bpf_copy_from_user(). |
1110 | */ |
1111 | #define BPF_F_SLEEPABLE (1U << 4) |
1112 | |
1113 | /* When BPF ldimm64's insn[0].src_reg != 0 then this can have |
1114 | * the following extensions: |
1115 | * |
1116 | * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] |
1117 | * insn[0].imm: map fd or fd_idx |
1118 | * insn[1].imm: 0 |
1119 | * insn[0].off: 0 |
1120 | * insn[1].off: 0 |
1121 | * ldimm64 rewrite: address of map |
1122 | * verifier type: CONST_PTR_TO_MAP |
1123 | */ |
1124 | #define BPF_PSEUDO_MAP_FD 1 |
1125 | #define BPF_PSEUDO_MAP_IDX 5 |
1126 | |
1127 | /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE |
1128 | * insn[0].imm: map fd or fd_idx |
1129 | * insn[1].imm: offset into value |
1130 | * insn[0].off: 0 |
1131 | * insn[1].off: 0 |
1132 | * ldimm64 rewrite: address of map[0]+offset |
1133 | * verifier type: PTR_TO_MAP_VALUE |
1134 | */ |
1135 | #define BPF_PSEUDO_MAP_VALUE 2 |
1136 | #define BPF_PSEUDO_MAP_IDX_VALUE 6 |
1137 | |
1138 | /* insn[0].src_reg: BPF_PSEUDO_BTF_ID |
1139 | * insn[0].imm: kernel btd id of VAR |
1140 | * insn[1].imm: 0 |
1141 | * insn[0].off: 0 |
1142 | * insn[1].off: 0 |
1143 | * ldimm64 rewrite: address of the kernel variable |
1144 | * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var |
1145 | * is struct/union. |
1146 | */ |
1147 | #define BPF_PSEUDO_BTF_ID 3 |
1148 | /* insn[0].src_reg: BPF_PSEUDO_FUNC |
1149 | * insn[0].imm: insn offset to the func |
1150 | * insn[1].imm: 0 |
1151 | * insn[0].off: 0 |
1152 | * insn[1].off: 0 |
1153 | * ldimm64 rewrite: address of the function |
1154 | * verifier type: PTR_TO_FUNC. |
1155 | */ |
1156 | #define BPF_PSEUDO_FUNC 4 |
1157 | |
1158 | /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative |
1159 | * offset to another bpf function |
1160 | */ |
1161 | #define BPF_PSEUDO_CALL 1 |
1162 | /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, |
1163 | * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel |
1164 | */ |
1165 | #define BPF_PSEUDO_KFUNC_CALL 2 |
1166 | |
1167 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
1168 | enum { |
1169 | BPF_ANY = 0, /* create new element or update existing */ |
1170 | BPF_NOEXIST = 1, /* create new element if it didn't exist */ |
1171 | BPF_EXIST = 2, /* update existing element */ |
1172 | BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ |
1173 | }; |
1174 | |
1175 | /* flags for BPF_MAP_CREATE command */ |
1176 | enum { |
1177 | BPF_F_NO_PREALLOC = (1U << 0), |
1178 | /* Instead of having one common LRU list in the |
1179 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list |
1180 | * which can scale and perform better. |
1181 | * Note, the LRU nodes (including free nodes) cannot be moved |
1182 | * across different LRU lists. |
1183 | */ |
1184 | BPF_F_NO_COMMON_LRU = (1U << 1), |
1185 | /* Specify numa node during map creation */ |
1186 | BPF_F_NUMA_NODE = (1U << 2), |
1187 | |
1188 | /* Flags for accessing BPF object from syscall side. */ |
1189 | BPF_F_RDONLY = (1U << 3), |
1190 | BPF_F_WRONLY = (1U << 4), |
1191 | |
1192 | /* Flag for stack_map, store build_id+offset instead of pointer */ |
1193 | BPF_F_STACK_BUILD_ID = (1U << 5), |
1194 | |
1195 | /* Zero-initialize hash function seed. This should only be used for testing. */ |
1196 | BPF_F_ZERO_SEED = (1U << 6), |
1197 | |
1198 | /* Flags for accessing BPF object from program side. */ |
1199 | BPF_F_RDONLY_PROG = (1U << 7), |
1200 | BPF_F_WRONLY_PROG = (1U << 8), |
1201 | |
1202 | /* Clone map from listener for newly accepted socket */ |
1203 | BPF_F_CLONE = (1U << 9), |
1204 | |
1205 | /* Enable memory-mapping BPF map */ |
1206 | BPF_F_MMAPABLE = (1U << 10), |
1207 | |
1208 | /* Share perf_event among processes */ |
1209 | BPF_F_PRESERVE_ELEMS = (1U << 11), |
1210 | |
1211 | /* Create a map that is suitable to be an inner map with dynamic max entries */ |
1212 | BPF_F_INNER_MAP = (1U << 12), |
1213 | }; |
1214 | |
1215 | /* Flags for BPF_PROG_QUERY. */ |
1216 | |
1217 | /* Query effective (directly attached + inherited from ancestor cgroups) |
1218 | * programs that will be executed for events within a cgroup. |
1219 | * attach_flags with this flag are returned only for directly attached programs. |
1220 | */ |
1221 | #define BPF_F_QUERY_EFFECTIVE (1U << 0) |
1222 | |
1223 | /* Flags for BPF_PROG_TEST_RUN */ |
1224 | |
1225 | /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ |
1226 | #define BPF_F_TEST_RUN_ON_CPU (1U << 0) |
1227 | |
1228 | /* type for BPF_ENABLE_STATS */ |
1229 | enum bpf_stats_type { |
1230 | /* enabled run_time_ns and run_cnt */ |
1231 | BPF_STATS_RUN_TIME = 0, |
1232 | }; |
1233 | |
1234 | enum bpf_stack_build_id_status { |
1235 | /* user space need an empty entry to identify end of a trace */ |
1236 | BPF_STACK_BUILD_ID_EMPTY = 0, |
1237 | /* with valid build_id and offset */ |
1238 | BPF_STACK_BUILD_ID_VALID = 1, |
1239 | /* couldn't get build_id, fallback to ip */ |
1240 | BPF_STACK_BUILD_ID_IP = 2, |
1241 | }; |
1242 | |
1243 | #define BPF_BUILD_ID_SIZE 20 |
1244 | struct bpf_stack_build_id { |
1245 | __s32 status; |
1246 | unsigned char build_id[BPF_BUILD_ID_SIZE]; |
1247 | union { |
1248 | __u64 offset; |
1249 | __u64 ip; |
1250 | }; |
1251 | }; |
1252 | |
1253 | #define BPF_OBJ_NAME_LEN 16U |
1254 | |
1255 | union bpf_attr { |
1256 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ |
1257 | __u32 map_type; /* one of enum bpf_map_type */ |
1258 | __u32 key_size; /* size of key in bytes */ |
1259 | __u32 value_size; /* size of value in bytes */ |
1260 | __u32 max_entries; /* max number of entries in a map */ |
1261 | __u32 map_flags; /* BPF_MAP_CREATE related |
1262 | * flags defined above. |
1263 | */ |
1264 | __u32 inner_map_fd; /* fd pointing to the inner map */ |
1265 | __u32 numa_node; /* numa node (effective only if |
1266 | * BPF_F_NUMA_NODE is set). |
1267 | */ |
1268 | char map_name[BPF_OBJ_NAME_LEN]; |
1269 | __u32 map_ifindex; /* ifindex of netdev to create on */ |
1270 | __u32 btf_fd; /* fd pointing to a BTF type data */ |
1271 | __u32 btf_key_type_id; /* BTF type_id of the key */ |
1272 | __u32 btf_value_type_id; /* BTF type_id of the value */ |
1273 | __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- |
1274 | * struct stored as the |
1275 | * map value |
1276 | */ |
1277 | }; |
1278 | |
1279 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ |
1280 | __u32 map_fd; |
1281 | __aligned_u64 key; |
1282 | union { |
1283 | __aligned_u64 value; |
1284 | __aligned_u64 next_key; |
1285 | }; |
1286 | __u64 flags; |
1287 | }; |
1288 | |
1289 | struct { /* struct used by BPF_MAP_*_BATCH commands */ |
1290 | __aligned_u64 in_batch; /* start batch, |
1291 | * NULL to start from beginning |
1292 | */ |
1293 | __aligned_u64 out_batch; /* output: next start batch */ |
1294 | __aligned_u64 keys; |
1295 | __aligned_u64 values; |
1296 | __u32 count; /* input/output: |
1297 | * input: # of key/value |
1298 | * elements |
1299 | * output: # of filled elements |
1300 | */ |
1301 | __u32 map_fd; |
1302 | __u64 elem_flags; |
1303 | __u64 flags; |
1304 | } batch; |
1305 | |
1306 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ |
1307 | __u32 prog_type; /* one of enum bpf_prog_type */ |
1308 | __u32 insn_cnt; |
1309 | __aligned_u64 insns; |
1310 | __aligned_u64 license; |
1311 | __u32 log_level; /* verbosity level of verifier */ |
1312 | __u32 log_size; /* size of user buffer */ |
1313 | __aligned_u64 log_buf; /* user supplied buffer */ |
1314 | __u32 kern_version; /* not used */ |
1315 | __u32 prog_flags; |
1316 | char prog_name[BPF_OBJ_NAME_LEN]; |
1317 | __u32 prog_ifindex; /* ifindex of netdev to prep for */ |
1318 | /* For some prog types expected attach type must be known at |
1319 | * load time to verify attach type specific parts of prog |
1320 | * (context accesses, allowed helpers, etc). |
1321 | */ |
1322 | __u32 expected_attach_type; |
1323 | __u32 prog_btf_fd; /* fd pointing to BTF type data */ |
1324 | __u32 func_info_rec_size; /* userspace bpf_func_info size */ |
1325 | __aligned_u64 func_info; /* func info */ |
1326 | __u32 func_info_cnt; /* number of bpf_func_info records */ |
1327 | __u32 line_info_rec_size; /* userspace bpf_line_info size */ |
1328 | __aligned_u64 line_info; /* line info */ |
1329 | __u32 line_info_cnt; /* number of bpf_line_info records */ |
1330 | __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
1331 | union { |
1332 | /* valid prog_fd to attach to bpf prog */ |
1333 | __u32 attach_prog_fd; |
1334 | /* or valid module BTF object fd or 0 to attach to vmlinux */ |
1335 | __u32 attach_btf_obj_fd; |
1336 | }; |
1337 | __u32 :32; /* pad */ |
1338 | __aligned_u64 fd_array; /* array of FDs */ |
1339 | }; |
1340 | |
1341 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
1342 | __aligned_u64 pathname; |
1343 | __u32 bpf_fd; |
1344 | __u32 file_flags; |
1345 | }; |
1346 | |
1347 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ |
1348 | __u32 target_fd; /* container object to attach to */ |
1349 | __u32 attach_bpf_fd; /* eBPF program to attach */ |
1350 | __u32 attach_type; |
1351 | __u32 attach_flags; |
1352 | __u32 replace_bpf_fd; /* previously attached eBPF |
1353 | * program to replace if |
1354 | * BPF_F_REPLACE is used |
1355 | */ |
1356 | }; |
1357 | |
1358 | struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ |
1359 | __u32 prog_fd; |
1360 | __u32 retval; |
1361 | __u32 data_size_in; /* input: len of data_in */ |
1362 | __u32 data_size_out; /* input/output: len of data_out |
1363 | * returns ENOSPC if data_out |
1364 | * is too small. |
1365 | */ |
1366 | __aligned_u64 data_in; |
1367 | __aligned_u64 data_out; |
1368 | __u32 repeat; |
1369 | __u32 duration; |
1370 | __u32 ctx_size_in; /* input: len of ctx_in */ |
1371 | __u32 ctx_size_out; /* input/output: len of ctx_out |
1372 | * returns ENOSPC if ctx_out |
1373 | * is too small. |
1374 | */ |
1375 | __aligned_u64 ctx_in; |
1376 | __aligned_u64 ctx_out; |
1377 | __u32 flags; |
1378 | __u32 cpu; |
1379 | } test; |
1380 | |
1381 | struct { /* anonymous struct used by BPF_*_GET_*_ID */ |
1382 | union { |
1383 | __u32 start_id; |
1384 | __u32 prog_id; |
1385 | __u32 map_id; |
1386 | __u32 btf_id; |
1387 | __u32 link_id; |
1388 | }; |
1389 | __u32 next_id; |
1390 | __u32 open_flags; |
1391 | }; |
1392 | |
1393 | struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ |
1394 | __u32 bpf_fd; |
1395 | __u32 info_len; |
1396 | __aligned_u64 info; |
1397 | } info; |
1398 | |
1399 | struct { /* anonymous struct used by BPF_PROG_QUERY command */ |
1400 | __u32 target_fd; /* container object to query */ |
1401 | __u32 attach_type; |
1402 | __u32 query_flags; |
1403 | __u32 attach_flags; |
1404 | __aligned_u64 prog_ids; |
1405 | __u32 prog_cnt; |
1406 | } query; |
1407 | |
1408 | struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ |
1409 | __u64 name; |
1410 | __u32 prog_fd; |
1411 | } raw_tracepoint; |
1412 | |
1413 | struct { /* anonymous struct for BPF_BTF_LOAD */ |
1414 | __aligned_u64 btf; |
1415 | __aligned_u64 btf_log_buf; |
1416 | __u32 btf_size; |
1417 | __u32 btf_log_size; |
1418 | __u32 btf_log_level; |
1419 | }; |
1420 | |
1421 | struct { |
1422 | __u32 pid; /* input: pid */ |
1423 | __u32 fd; /* input: fd */ |
1424 | __u32 flags; /* input: flags */ |
1425 | __u32 buf_len; /* input/output: buf len */ |
1426 | __aligned_u64 buf; /* input/output: |
1427 | * tp_name for tracepoint |
1428 | * symbol for kprobe |
1429 | * filename for uprobe |
1430 | */ |
1431 | __u32 prog_id; /* output: prod_id */ |
1432 | __u32 fd_type; /* output: BPF_FD_TYPE_* */ |
1433 | __u64 probe_offset; /* output: probe_offset */ |
1434 | __u64 probe_addr; /* output: probe_addr */ |
1435 | } task_fd_query; |
1436 | |
1437 | struct { /* struct used by BPF_LINK_CREATE command */ |
1438 | __u32 prog_fd; /* eBPF program to attach */ |
1439 | union { |
1440 | __u32 target_fd; /* object to attach to */ |
1441 | __u32 target_ifindex; /* target ifindex */ |
1442 | }; |
1443 | __u32 attach_type; /* attach type */ |
1444 | __u32 flags; /* extra flags */ |
1445 | union { |
1446 | __u32 target_btf_id; /* btf_id of target to attach to */ |
1447 | struct { |
1448 | __aligned_u64 iter_info; /* extra bpf_iter_link_info */ |
1449 | __u32 iter_info_len; /* iter_info length */ |
1450 | }; |
1451 | struct { |
1452 | /* black box user-provided value passed through |
1453 | * to BPF program at the execution time and |
1454 | * accessible through bpf_get_attach_cookie() BPF helper |
1455 | */ |
1456 | __u64 bpf_cookie; |
1457 | } perf_event; |
1458 | }; |
1459 | } link_create; |
1460 | |
1461 | struct { /* struct used by BPF_LINK_UPDATE command */ |
1462 | __u32 link_fd; /* link fd */ |
1463 | /* new program fd to update link with */ |
1464 | __u32 new_prog_fd; |
1465 | __u32 flags; /* extra flags */ |
1466 | /* expected link's program fd; is specified only if |
1467 | * BPF_F_REPLACE flag is set in flags */ |
1468 | __u32 old_prog_fd; |
1469 | } link_update; |
1470 | |
1471 | struct { |
1472 | __u32 link_fd; |
1473 | } link_detach; |
1474 | |
1475 | struct { /* struct used by BPF_ENABLE_STATS command */ |
1476 | __u32 type; |
1477 | } enable_stats; |
1478 | |
1479 | struct { /* struct used by BPF_ITER_CREATE command */ |
1480 | __u32 link_fd; |
1481 | __u32 flags; |
1482 | } iter_create; |
1483 | |
1484 | struct { /* struct used by BPF_PROG_BIND_MAP command */ |
1485 | __u32 prog_fd; |
1486 | __u32 map_fd; |
1487 | __u32 flags; /* extra flags */ |
1488 | } prog_bind_map; |
1489 | |
1490 | } __attribute__((aligned(8))); |
1491 | |
1492 | /* The description below is an attempt at providing documentation to eBPF |
1493 | * developers about the multiple available eBPF helper functions. It can be |
1494 | * parsed and used to produce a manual page. The workflow is the following, |
1495 | * and requires the rst2man utility: |
1496 | * |
1497 | * $ ./scripts/bpf_doc.py \ |
1498 | * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst |
1499 | * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 |
1500 | * $ man /tmp/bpf-helpers.7 |
1501 | * |
1502 | * Note that in order to produce this external documentation, some RST |
1503 | * formatting is used in the descriptions to get "bold" and "italics" in |
1504 | * manual pages. Also note that the few trailing white spaces are |
1505 | * intentional, removing them would break paragraphs for rst2man. |
1506 | * |
1507 | * Start of BPF helper function descriptions: |
1508 | * |
1509 | * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) |
1510 | * Description |
1511 | * Perform a lookup in *map* for an entry associated to *key*. |
1512 | * Return |
1513 | * Map value associated to *key*, or **NULL** if no entry was |
1514 | * found. |
1515 | * |
1516 | * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) |
1517 | * Description |
1518 | * Add or update the value of the entry associated to *key* in |
1519 | * *map* with *value*. *flags* is one of: |
1520 | * |
1521 | * **BPF_NOEXIST** |
1522 | * The entry for *key* must not exist in the map. |
1523 | * **BPF_EXIST** |
1524 | * The entry for *key* must already exist in the map. |
1525 | * **BPF_ANY** |
1526 | * No condition on the existence of the entry for *key*. |
1527 | * |
1528 | * Flag value **BPF_NOEXIST** cannot be used for maps of types |
1529 | * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all |
1530 | * elements always exist), the helper would return an error. |
1531 | * Return |
1532 | * 0 on success, or a negative error in case of failure. |
1533 | * |
1534 | * long bpf_map_delete_elem(struct bpf_map *map, const void *key) |
1535 | * Description |
1536 | * Delete entry with *key* from *map*. |
1537 | * Return |
1538 | * 0 on success, or a negative error in case of failure. |
1539 | * |
1540 | * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) |
1541 | * Description |
1542 | * For tracing programs, safely attempt to read *size* bytes from |
1543 | * kernel space address *unsafe_ptr* and store the data in *dst*. |
1544 | * |
1545 | * Generally, use **bpf_probe_read_user**\ () or |
1546 | * **bpf_probe_read_kernel**\ () instead. |
1547 | * Return |
1548 | * 0 on success, or a negative error in case of failure. |
1549 | * |
1550 | * u64 bpf_ktime_get_ns(void) |
1551 | * Description |
1552 | * Return the time elapsed since system boot, in nanoseconds. |
1553 | * Does not include time the system was suspended. |
1554 | * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) |
1555 | * Return |
1556 | * Current *ktime*. |
1557 | * |
1558 | * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) |
1559 | * Description |
1560 | * This helper is a "printk()-like" facility for debugging. It |
1561 | * prints a message defined by format *fmt* (of size *fmt_size*) |
1562 | * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if |
1563 | * available. It can take up to three additional **u64** |
1564 | * arguments (as an eBPF helpers, the total number of arguments is |
1565 | * limited to five). |
1566 | * |
1567 | * Each time the helper is called, it appends a line to the trace. |
1568 | * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is |
1569 | * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. |
1570 | * The format of the trace is customizable, and the exact output |
1571 | * one will get depends on the options set in |
1572 | * *\/sys/kernel/debug/tracing/trace_options* (see also the |
1573 | * *README* file under the same directory). However, it usually |
1574 | * defaults to something like: |
1575 | * |
1576 | * :: |
1577 | * |
1578 | * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> |
1579 | * |
1580 | * In the above: |
1581 | * |
1582 | * * ``telnet`` is the name of the current task. |
1583 | * * ``470`` is the PID of the current task. |
1584 | * * ``001`` is the CPU number on which the task is |
1585 | * running. |
1586 | * * In ``.N..``, each character refers to a set of |
1587 | * options (whether irqs are enabled, scheduling |
1588 | * options, whether hard/softirqs are running, level of |
1589 | * preempt_disabled respectively). **N** means that |
1590 | * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** |
1591 | * are set. |
1592 | * * ``419421.045894`` is a timestamp. |
1593 | * * ``0x00000001`` is a fake value used by BPF for the |
1594 | * instruction pointer register. |
1595 | * * ``<formatted msg>`` is the message formatted with |
1596 | * *fmt*. |
1597 | * |
1598 | * The conversion specifiers supported by *fmt* are similar, but |
1599 | * more limited than for printk(). They are **%d**, **%i**, |
1600 | * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, |
1601 | * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size |
1602 | * of field, padding with zeroes, etc.) is available, and the |
1603 | * helper will return **-EINVAL** (but print nothing) if it |
1604 | * encounters an unknown specifier. |
1605 | * |
1606 | * Also, note that **bpf_trace_printk**\ () is slow, and should |
1607 | * only be used for debugging purposes. For this reason, a notice |
1608 | * block (spanning several lines) is printed to kernel logs and |
1609 | * states that the helper should not be used "for production use" |
1610 | * the first time this helper is used (or more precisely, when |
1611 | * **trace_printk**\ () buffers are allocated). For passing values |
1612 | * to user space, perf events should be preferred. |
1613 | * Return |
1614 | * The number of bytes written to the buffer, or a negative error |
1615 | * in case of failure. |
1616 | * |
1617 | * u32 bpf_get_prandom_u32(void) |
1618 | * Description |
1619 | * Get a pseudo-random number. |
1620 | * |
1621 | * From a security point of view, this helper uses its own |
1622 | * pseudo-random internal state, and cannot be used to infer the |
1623 | * seed of other random functions in the kernel. However, it is |
1624 | * essential to note that the generator used by the helper is not |
1625 | * cryptographically secure. |
1626 | * Return |
1627 | * A random 32-bit unsigned value. |
1628 | * |
1629 | * u32 bpf_get_smp_processor_id(void) |
1630 | * Description |
1631 | * Get the SMP (symmetric multiprocessing) processor id. Note that |
1632 | * all programs run with preemption disabled, which means that the |
1633 | * SMP processor id is stable during all the execution of the |
1634 | * program. |
1635 | * Return |
1636 | * The SMP id of the processor running the program. |
1637 | * |
1638 | * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) |
1639 | * Description |
1640 | * Store *len* bytes from address *from* into the packet |
1641 | * associated to *skb*, at *offset*. *flags* are a combination of |
1642 | * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the |
1643 | * checksum for the packet after storing the bytes) and |
1644 | * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ |
1645 | * **->swhash** and *skb*\ **->l4hash** to 0). |
1646 | * |
1647 | * A call to this helper is susceptible to change the underlying |
1648 | * packet buffer. Therefore, at load time, all checks on pointers |
1649 | * previously done by the verifier are invalidated and must be |
1650 | * performed again, if the helper is used in combination with |
1651 | * direct packet access. |
1652 | * Return |
1653 | * 0 on success, or a negative error in case of failure. |
1654 | * |
1655 | * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) |
1656 | * Description |
1657 | * Recompute the layer 3 (e.g. IP) checksum for the packet |
1658 | * associated to *skb*. Computation is incremental, so the helper |
1659 | * must know the former value of the header field that was |
1660 | * modified (*from*), the new value of this field (*to*), and the |
1661 | * number of bytes (2 or 4) for this field, stored in *size*. |
1662 | * Alternatively, it is possible to store the difference between |
1663 | * the previous and the new values of the header field in *to*, by |
1664 | * setting *from* and *size* to 0. For both methods, *offset* |
1665 | * indicates the location of the IP checksum within the packet. |
1666 | * |
1667 | * This helper works in combination with **bpf_csum_diff**\ (), |
1668 | * which does not update the checksum in-place, but offers more |
1669 | * flexibility and can handle sizes larger than 2 or 4 for the |
1670 | * checksum to update. |
1671 | * |
1672 | * A call to this helper is susceptible to change the underlying |
1673 | * packet buffer. Therefore, at load time, all checks on pointers |
1674 | * previously done by the verifier are invalidated and must be |
1675 | * performed again, if the helper is used in combination with |
1676 | * direct packet access. |
1677 | * Return |
1678 | * 0 on success, or a negative error in case of failure. |
1679 | * |
1680 | * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) |
1681 | * Description |
1682 | * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the |
1683 | * packet associated to *skb*. Computation is incremental, so the |
1684 | * helper must know the former value of the header field that was |
1685 | * modified (*from*), the new value of this field (*to*), and the |
1686 | * number of bytes (2 or 4) for this field, stored on the lowest |
1687 | * four bits of *flags*. Alternatively, it is possible to store |
1688 | * the difference between the previous and the new values of the |
1689 | * header field in *to*, by setting *from* and the four lowest |
1690 | * bits of *flags* to 0. For both methods, *offset* indicates the |
1691 | * location of the IP checksum within the packet. In addition to |
1692 | * the size of the field, *flags* can be added (bitwise OR) actual |
1693 | * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left |
1694 | * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and |
1695 | * for updates resulting in a null checksum the value is set to |
1696 | * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates |
1697 | * the checksum is to be computed against a pseudo-header. |
1698 | * |
1699 | * This helper works in combination with **bpf_csum_diff**\ (), |
1700 | * which does not update the checksum in-place, but offers more |
1701 | * flexibility and can handle sizes larger than 2 or 4 for the |
1702 | * checksum to update. |
1703 | * |
1704 | * A call to this helper is susceptible to change the underlying |
1705 | * packet buffer. Therefore, at load time, all checks on pointers |
1706 | * previously done by the verifier are invalidated and must be |
1707 | * performed again, if the helper is used in combination with |
1708 | * direct packet access. |
1709 | * Return |
1710 | * 0 on success, or a negative error in case of failure. |
1711 | * |
1712 | * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) |
1713 | * Description |
1714 | * This special helper is used to trigger a "tail call", or in |
1715 | * other words, to jump into another eBPF program. The same stack |
1716 | * frame is used (but values on stack and in registers for the |
1717 | * caller are not accessible to the callee). This mechanism allows |
1718 | * for program chaining, either for raising the maximum number of |
1719 | * available eBPF instructions, or to execute given programs in |
1720 | * conditional blocks. For security reasons, there is an upper |
1721 | * limit to the number of successive tail calls that can be |
1722 | * performed. |
1723 | * |
1724 | * Upon call of this helper, the program attempts to jump into a |
1725 | * program referenced at index *index* in *prog_array_map*, a |
1726 | * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes |
1727 | * *ctx*, a pointer to the context. |
1728 | * |
1729 | * If the call succeeds, the kernel immediately runs the first |
1730 | * instruction of the new program. This is not a function call, |
1731 | * and it never returns to the previous program. If the call |
1732 | * fails, then the helper has no effect, and the caller continues |
1733 | * to run its subsequent instructions. A call can fail if the |
1734 | * destination program for the jump does not exist (i.e. *index* |
1735 | * is superior to the number of entries in *prog_array_map*), or |
1736 | * if the maximum number of tail calls has been reached for this |
1737 | * chain of programs. This limit is defined in the kernel by the |
1738 | * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), |
1739 | * which is currently set to 32. |
1740 | * Return |
1741 | * 0 on success, or a negative error in case of failure. |
1742 | * |
1743 | * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) |
1744 | * Description |
1745 | * Clone and redirect the packet associated to *skb* to another |
1746 | * net device of index *ifindex*. Both ingress and egress |
1747 | * interfaces can be used for redirection. The **BPF_F_INGRESS** |
1748 | * value in *flags* is used to make the distinction (ingress path |
1749 | * is selected if the flag is present, egress path otherwise). |
1750 | * This is the only flag supported for now. |
1751 | * |
1752 | * In comparison with **bpf_redirect**\ () helper, |
1753 | * **bpf_clone_redirect**\ () has the associated cost of |
1754 | * duplicating the packet buffer, but this can be executed out of |
1755 | * the eBPF program. Conversely, **bpf_redirect**\ () is more |
1756 | * efficient, but it is handled through an action code where the |
1757 | * redirection happens only after the eBPF program has returned. |
1758 | * |
1759 | * A call to this helper is susceptible to change the underlying |
1760 | * packet buffer. Therefore, at load time, all checks on pointers |
1761 | * previously done by the verifier are invalidated and must be |
1762 | * performed again, if the helper is used in combination with |
1763 | * direct packet access. |
1764 | * Return |
1765 | * 0 on success, or a negative error in case of failure. |
1766 | * |
1767 | * u64 bpf_get_current_pid_tgid(void) |
1768 | * Return |
1769 | * A 64-bit integer containing the current tgid and pid, and |
1770 | * created as such: |
1771 | * *current_task*\ **->tgid << 32 \|** |
1772 | * *current_task*\ **->pid**. |
1773 | * |
1774 | * u64 bpf_get_current_uid_gid(void) |
1775 | * Return |
1776 | * A 64-bit integer containing the current GID and UID, and |
1777 | * created as such: *current_gid* **<< 32 \|** *current_uid*. |
1778 | * |
1779 | * long bpf_get_current_comm(void *buf, u32 size_of_buf) |
1780 | * Description |
1781 | * Copy the **comm** attribute of the current task into *buf* of |
1782 | * *size_of_buf*. The **comm** attribute contains the name of |
1783 | * the executable (excluding the path) for the current task. The |
1784 | * *size_of_buf* must be strictly positive. On success, the |
1785 | * helper makes sure that the *buf* is NUL-terminated. On failure, |
1786 | * it is filled with zeroes. |
1787 | * Return |
1788 | * 0 on success, or a negative error in case of failure. |
1789 | * |
1790 | * u32 bpf_get_cgroup_classid(struct sk_buff *skb) |
1791 | * Description |
1792 | * Retrieve the classid for the current task, i.e. for the net_cls |
1793 | * cgroup to which *skb* belongs. |
1794 | * |
1795 | * This helper can be used on TC egress path, but not on ingress. |
1796 | * |
1797 | * The net_cls cgroup provides an interface to tag network packets |
1798 | * based on a user-provided identifier for all traffic coming from |
1799 | * the tasks belonging to the related cgroup. See also the related |
1800 | * kernel documentation, available from the Linux sources in file |
1801 | * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. |
1802 | * |
1803 | * The Linux kernel has two versions for cgroups: there are |
1804 | * cgroups v1 and cgroups v2. Both are available to users, who can |
1805 | * use a mixture of them, but note that the net_cls cgroup is for |
1806 | * cgroup v1 only. This makes it incompatible with BPF programs |
1807 | * run on cgroups, which is a cgroup-v2-only feature (a socket can |
1808 | * only hold data for one version of cgroups at a time). |
1809 | * |
1810 | * This helper is only available is the kernel was compiled with |
1811 | * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to |
1812 | * "**y**" or to "**m**". |
1813 | * Return |
1814 | * The classid, or 0 for the default unconfigured classid. |
1815 | * |
1816 | * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
1817 | * Description |
1818 | * Push a *vlan_tci* (VLAN tag control information) of protocol |
1819 | * *vlan_proto* to the packet associated to *skb*, then update |
1820 | * the checksum. Note that if *vlan_proto* is different from |
1821 | * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to |
1822 | * be **ETH_P_8021Q**. |
1823 | * |
1824 | * A call to this helper is susceptible to change the underlying |
1825 | * packet buffer. Therefore, at load time, all checks on pointers |
1826 | * previously done by the verifier are invalidated and must be |
1827 | * performed again, if the helper is used in combination with |
1828 | * direct packet access. |
1829 | * Return |
1830 | * 0 on success, or a negative error in case of failure. |
1831 | * |
1832 | * long bpf_skb_vlan_pop(struct sk_buff *skb) |
1833 | * Description |
1834 | * Pop a VLAN header from the packet associated to *skb*. |
1835 | * |
1836 | * A call to this helper is susceptible to change the underlying |
1837 | * packet buffer. Therefore, at load time, all checks on pointers |
1838 | * previously done by the verifier are invalidated and must be |
1839 | * performed again, if the helper is used in combination with |
1840 | * direct packet access. |
1841 | * Return |
1842 | * 0 on success, or a negative error in case of failure. |
1843 | * |
1844 | * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) |
1845 | * Description |
1846 | * Get tunnel metadata. This helper takes a pointer *key* to an |
1847 | * empty **struct bpf_tunnel_key** of **size**, that will be |
1848 | * filled with tunnel metadata for the packet associated to *skb*. |
1849 | * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which |
1850 | * indicates that the tunnel is based on IPv6 protocol instead of |
1851 | * IPv4. |
1852 | * |
1853 | * The **struct bpf_tunnel_key** is an object that generalizes the |
1854 | * principal parameters used by various tunneling protocols into a |
1855 | * single struct. This way, it can be used to easily make a |
1856 | * decision based on the contents of the encapsulation header, |
1857 | * "summarized" in this struct. In particular, it holds the IP |
1858 | * address of the remote end (IPv4 or IPv6, depending on the case) |
1859 | * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, |
1860 | * this struct exposes the *key*\ **->tunnel_id**, which is |
1861 | * generally mapped to a VNI (Virtual Network Identifier), making |
1862 | * it programmable together with the **bpf_skb_set_tunnel_key**\ |
1863 | * () helper. |
1864 | * |
1865 | * Let's imagine that the following code is part of a program |
1866 | * attached to the TC ingress interface, on one end of a GRE |
1867 | * tunnel, and is supposed to filter out all messages coming from |
1868 | * remote ends with IPv4 address other than 10.0.0.1: |
1869 | * |
1870 | * :: |
1871 | * |
1872 | * int ret; |
1873 | * struct bpf_tunnel_key key = {}; |
1874 | * |
1875 | * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); |
1876 | * if (ret < 0) |
1877 | * return TC_ACT_SHOT; // drop packet |
1878 | * |
1879 | * if (key.remote_ipv4 != 0x0a000001) |
1880 | * return TC_ACT_SHOT; // drop packet |
1881 | * |
1882 | * return TC_ACT_OK; // accept packet |
1883 | * |
1884 | * This interface can also be used with all encapsulation devices |
1885 | * that can operate in "collect metadata" mode: instead of having |
1886 | * one network device per specific configuration, the "collect |
1887 | * metadata" mode only requires a single device where the |
1888 | * configuration can be extracted from this helper. |
1889 | * |
1890 | * This can be used together with various tunnels such as VXLan, |
1891 | * Geneve, GRE or IP in IP (IPIP). |
1892 | * Return |
1893 | * 0 on success, or a negative error in case of failure. |
1894 | * |
1895 | * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) |
1896 | * Description |
1897 | * Populate tunnel metadata for packet associated to *skb.* The |
1898 | * tunnel metadata is set to the contents of *key*, of *size*. The |
1899 | * *flags* can be set to a combination of the following values: |
1900 | * |
1901 | * **BPF_F_TUNINFO_IPV6** |
1902 | * Indicate that the tunnel is based on IPv6 protocol |
1903 | * instead of IPv4. |
1904 | * **BPF_F_ZERO_CSUM_TX** |
1905 | * For IPv4 packets, add a flag to tunnel metadata |
1906 | * indicating that checksum computation should be skipped |
1907 | * and checksum set to zeroes. |
1908 | * **BPF_F_DONT_FRAGMENT** |
1909 | * Add a flag to tunnel metadata indicating that the |
1910 | * packet should not be fragmented. |
1911 | * **BPF_F_SEQ_NUMBER** |
1912 | * Add a flag to tunnel metadata indicating that a |
1913 | * sequence number should be added to tunnel header before |
1914 | * sending the packet. This flag was added for GRE |
1915 | * encapsulation, but might be used with other protocols |
1916 | * as well in the future. |
1917 | * |
1918 | * Here is a typical usage on the transmit path: |
1919 | * |
1920 | * :: |
1921 | * |
1922 | * struct bpf_tunnel_key key; |
1923 | * populate key ... |
1924 | * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); |
1925 | * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); |
1926 | * |
1927 | * See also the description of the **bpf_skb_get_tunnel_key**\ () |
1928 | * helper for additional information. |
1929 | * Return |
1930 | * 0 on success, or a negative error in case of failure. |
1931 | * |
1932 | * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) |
1933 | * Description |
1934 | * Read the value of a perf event counter. This helper relies on a |
1935 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of |
1936 | * the perf event counter is selected when *map* is updated with |
1937 | * perf event file descriptors. The *map* is an array whose size |
1938 | * is the number of available CPUs, and each cell contains a value |
1939 | * relative to one CPU. The value to retrieve is indicated by |
1940 | * *flags*, that contains the index of the CPU to look up, masked |
1941 | * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to |
1942 | * **BPF_F_CURRENT_CPU** to indicate that the value for the |
1943 | * current CPU should be retrieved. |
1944 | * |
1945 | * Note that before Linux 4.13, only hardware perf event can be |
1946 | * retrieved. |
1947 | * |
1948 | * Also, be aware that the newer helper |
1949 | * **bpf_perf_event_read_value**\ () is recommended over |
1950 | * **bpf_perf_event_read**\ () in general. The latter has some ABI |
1951 | * quirks where error and counter value are used as a return code |
1952 | * (which is wrong to do since ranges may overlap). This issue is |
1953 | * fixed with **bpf_perf_event_read_value**\ (), which at the same |
1954 | * time provides more features over the **bpf_perf_event_read**\ |
1955 | * () interface. Please refer to the description of |
1956 | * **bpf_perf_event_read_value**\ () for details. |
1957 | * Return |
1958 | * The value of the perf event counter read from the map, or a |
1959 | * negative error code in case of failure. |
1960 | * |
1961 | * long bpf_redirect(u32 ifindex, u64 flags) |
1962 | * Description |
1963 | * Redirect the packet to another net device of index *ifindex*. |
1964 | * This helper is somewhat similar to **bpf_clone_redirect**\ |
1965 | * (), except that the packet is not cloned, which provides |
1966 | * increased performance. |
1967 | * |
1968 | * Except for XDP, both ingress and egress interfaces can be used |
1969 | * for redirection. The **BPF_F_INGRESS** value in *flags* is used |
1970 | * to make the distinction (ingress path is selected if the flag |
1971 | * is present, egress path otherwise). Currently, XDP only |
1972 | * supports redirection to the egress interface, and accepts no |
1973 | * flag at all. |
1974 | * |
1975 | * The same effect can also be attained with the more generic |
1976 | * **bpf_redirect_map**\ (), which uses a BPF map to store the |
1977 | * redirect target instead of providing it directly to the helper. |
1978 | * Return |
1979 | * For XDP, the helper returns **XDP_REDIRECT** on success or |
1980 | * **XDP_ABORTED** on error. For other program types, the values |
1981 | * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on |
1982 | * error. |
1983 | * |
1984 | * u32 bpf_get_route_realm(struct sk_buff *skb) |
1985 | * Description |
1986 | * Retrieve the realm or the route, that is to say the |
1987 | * **tclassid** field of the destination for the *skb*. The |
1988 | * identifier retrieved is a user-provided tag, similar to the |
1989 | * one used with the net_cls cgroup (see description for |
1990 | * **bpf_get_cgroup_classid**\ () helper), but here this tag is |
1991 | * held by a route (a destination entry), not by a task. |
1992 | * |
1993 | * Retrieving this identifier works with the clsact TC egress hook |
1994 | * (see also **tc-bpf(8)**), or alternatively on conventional |
1995 | * classful egress qdiscs, but not on TC ingress path. In case of |
1996 | * clsact TC egress hook, this has the advantage that, internally, |
1997 | * the destination entry has not been dropped yet in the transmit |
1998 | * path. Therefore, the destination entry does not need to be |
1999 | * artificially held via **netif_keep_dst**\ () for a classful |
2000 | * qdisc until the *skb* is freed. |
2001 | * |
2002 | * This helper is available only if the kernel was compiled with |
2003 | * **CONFIG_IP_ROUTE_CLASSID** configuration option. |
2004 | * Return |
2005 | * The realm of the route for the packet associated to *skb*, or 0 |
2006 | * if none was found. |
2007 | * |
2008 | * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
2009 | * Description |
2010 | * Write raw *data* blob into a special BPF perf event held by |
2011 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
2012 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
2013 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
2014 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
2015 | * |
2016 | * The *flags* are used to indicate the index in *map* for which |
2017 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
2018 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
2019 | * to indicate that the index of the current CPU core should be |
2020 | * used. |
2021 | * |
2022 | * The value to write, of *size*, is passed through eBPF stack and |
2023 | * pointed by *data*. |
2024 | * |
2025 | * The context of the program *ctx* needs also be passed to the |
2026 | * helper. |
2027 | * |
2028 | * On user space, a program willing to read the values needs to |
2029 | * call **perf_event_open**\ () on the perf event (either for |
2030 | * one or for all CPUs) and to store the file descriptor into the |
2031 | * *map*. This must be done before the eBPF program can send data |
2032 | * into it. An example is available in file |
2033 | * *samples/bpf/trace_output_user.c* in the Linux kernel source |
2034 | * tree (the eBPF program counterpart is in |
2035 | * *samples/bpf/trace_output_kern.c*). |
2036 | * |
2037 | * **bpf_perf_event_output**\ () achieves better performance |
2038 | * than **bpf_trace_printk**\ () for sharing data with user |
2039 | * space, and is much better suitable for streaming data from eBPF |
2040 | * programs. |
2041 | * |
2042 | * Note that this helper is not restricted to tracing use cases |
2043 | * and can be used with programs attached to TC or XDP as well, |
2044 | * where it allows for passing data to user space listeners. Data |
2045 | * can be: |
2046 | * |
2047 | * * Only custom structs, |
2048 | * * Only the packet payload, or |
2049 | * * A combination of both. |
2050 | * Return |
2051 | * 0 on success, or a negative error in case of failure. |
2052 | * |
2053 | * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) |
2054 | * Description |
2055 | * This helper was provided as an easy way to load data from a |
2056 | * packet. It can be used to load *len* bytes from *offset* from |
2057 | * the packet associated to *skb*, into the buffer pointed by |
2058 | * *to*. |
2059 | * |
2060 | * Since Linux 4.7, usage of this helper has mostly been replaced |
2061 | * by "direct packet access", enabling packet data to be |
2062 | * manipulated with *skb*\ **->data** and *skb*\ **->data_end** |
2063 | * pointing respectively to the first byte of packet data and to |
2064 | * the byte after the last byte of packet data. However, it |
2065 | * remains useful if one wishes to read large quantities of data |
2066 | * at once from a packet into the eBPF stack. |
2067 | * Return |
2068 | * 0 on success, or a negative error in case of failure. |
2069 | * |
2070 | * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) |
2071 | * Description |
2072 | * Walk a user or a kernel stack and return its id. To achieve |
2073 | * this, the helper needs *ctx*, which is a pointer to the context |
2074 | * on which the tracing program is executed, and a pointer to a |
2075 | * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. |
2076 | * |
2077 | * The last argument, *flags*, holds the number of stack frames to |
2078 | * skip (from 0 to 255), masked with |
2079 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
2080 | * a combination of the following flags: |
2081 | * |
2082 | * **BPF_F_USER_STACK** |
2083 | * Collect a user space stack instead of a kernel stack. |
2084 | * **BPF_F_FAST_STACK_CMP** |
2085 | * Compare stacks by hash only. |
2086 | * **BPF_F_REUSE_STACKID** |
2087 | * If two different stacks hash into the same *stackid*, |
2088 | * discard the old one. |
2089 | * |
2090 | * The stack id retrieved is a 32 bit long integer handle which |
2091 | * can be further combined with other data (including other stack |
2092 | * ids) and used as a key into maps. This can be useful for |
2093 | * generating a variety of graphs (such as flame graphs or off-cpu |
2094 | * graphs). |
2095 | * |
2096 | * For walking a stack, this helper is an improvement over |
2097 | * **bpf_probe_read**\ (), which can be used with unrolled loops |
2098 | * but is not efficient and consumes a lot of eBPF instructions. |
2099 | * Instead, **bpf_get_stackid**\ () can collect up to |
2100 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that |
2101 | * this limit can be controlled with the **sysctl** program, and |
2102 | * that it should be manually increased in order to profile long |
2103 | * user stacks (such as stacks for Java programs). To do so, use: |
2104 | * |
2105 | * :: |
2106 | * |
2107 | * # sysctl kernel.perf_event_max_stack=<new value> |
2108 | * Return |
2109 | * The positive or null stack id on success, or a negative error |
2110 | * in case of failure. |
2111 | * |
2112 | * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) |
2113 | * Description |
2114 | * Compute a checksum difference, from the raw buffer pointed by |
2115 | * *from*, of length *from_size* (that must be a multiple of 4), |
2116 | * towards the raw buffer pointed by *to*, of size *to_size* |
2117 | * (same remark). An optional *seed* can be added to the value |
2118 | * (this can be cascaded, the seed may come from a previous call |
2119 | * to the helper). |
2120 | * |
2121 | * This is flexible enough to be used in several ways: |
2122 | * |
2123 | * * With *from_size* == 0, *to_size* > 0 and *seed* set to |
2124 | * checksum, it can be used when pushing new data. |
2125 | * * With *from_size* > 0, *to_size* == 0 and *seed* set to |
2126 | * checksum, it can be used when removing data from a packet. |
2127 | * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it |
2128 | * can be used to compute a diff. Note that *from_size* and |
2129 | * *to_size* do not need to be equal. |
2130 | * |
2131 | * This helper can be used in combination with |
2132 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to |
2133 | * which one can feed in the difference computed with |
2134 | * **bpf_csum_diff**\ (). |
2135 | * Return |
2136 | * The checksum result, or a negative error code in case of |
2137 | * failure. |
2138 | * |
2139 | * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) |
2140 | * Description |
2141 | * Retrieve tunnel options metadata for the packet associated to |
2142 | * *skb*, and store the raw tunnel option data to the buffer *opt* |
2143 | * of *size*. |
2144 | * |
2145 | * This helper can be used with encapsulation devices that can |
2146 | * operate in "collect metadata" mode (please refer to the related |
2147 | * note in the description of **bpf_skb_get_tunnel_key**\ () for |
2148 | * more details). A particular example where this can be used is |
2149 | * in combination with the Geneve encapsulation protocol, where it |
2150 | * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) |
2151 | * and retrieving arbitrary TLVs (Type-Length-Value headers) from |
2152 | * the eBPF program. This allows for full customization of these |
2153 | * headers. |
2154 | * Return |
2155 | * The size of the option data retrieved. |
2156 | * |
2157 | * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) |
2158 | * Description |
2159 | * Set tunnel options metadata for the packet associated to *skb* |
2160 | * to the option data contained in the raw buffer *opt* of *size*. |
2161 | * |
2162 | * See also the description of the **bpf_skb_get_tunnel_opt**\ () |
2163 | * helper for additional information. |
2164 | * Return |
2165 | * 0 on success, or a negative error in case of failure. |
2166 | * |
2167 | * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) |
2168 | * Description |
2169 | * Change the protocol of the *skb* to *proto*. Currently |
2170 | * supported are transition from IPv4 to IPv6, and from IPv6 to |
2171 | * IPv4. The helper takes care of the groundwork for the |
2172 | * transition, including resizing the socket buffer. The eBPF |
2173 | * program is expected to fill the new headers, if any, via |
2174 | * **skb_store_bytes**\ () and to recompute the checksums with |
2175 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ |
2176 | * (). The main case for this helper is to perform NAT64 |
2177 | * operations out of an eBPF program. |
2178 | * |
2179 | * Internally, the GSO type is marked as dodgy so that headers are |
2180 | * checked and segments are recalculated by the GSO/GRO engine. |
2181 | * The size for GSO target is adapted as well. |
2182 | * |
2183 | * All values for *flags* are reserved for future usage, and must |
2184 | * be left at zero. |
2185 | * |
2186 | * A call to this helper is susceptible to change the underlying |
2187 | * packet buffer. Therefore, at load time, all checks on pointers |
2188 | * previously done by the verifier are invalidated and must be |
2189 | * performed again, if the helper is used in combination with |
2190 | * direct packet access. |
2191 | * Return |
2192 | * 0 on success, or a negative error in case of failure. |
2193 | * |
2194 | * long bpf_skb_change_type(struct sk_buff *skb, u32 type) |
2195 | * Description |
2196 | * Change the packet type for the packet associated to *skb*. This |
2197 | * comes down to setting *skb*\ **->pkt_type** to *type*, except |
2198 | * the eBPF program does not have a write access to *skb*\ |
2199 | * **->pkt_type** beside this helper. Using a helper here allows |
2200 | * for graceful handling of errors. |
2201 | * |
2202 | * The major use case is to change incoming *skb*s to |
2203 | * **PACKET_HOST** in a programmatic way instead of having to |
2204 | * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for |
2205 | * example. |
2206 | * |
2207 | * Note that *type* only allows certain values. At this time, they |
2208 | * are: |
2209 | * |
2210 | * **PACKET_HOST** |
2211 | * Packet is for us. |
2212 | * **PACKET_BROADCAST** |
2213 | * Send packet to all. |
2214 | * **PACKET_MULTICAST** |
2215 | * Send packet to group. |
2216 | * **PACKET_OTHERHOST** |
2217 | * Send packet to someone else. |
2218 | * Return |
2219 | * 0 on success, or a negative error in case of failure. |
2220 | * |
2221 | * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) |
2222 | * Description |
2223 | * Check whether *skb* is a descendant of the cgroup2 held by |
2224 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. |
2225 | * Return |
2226 | * The return value depends on the result of the test, and can be: |
2227 | * |
2228 | * * 0, if the *skb* failed the cgroup2 descendant test. |
2229 | * * 1, if the *skb* succeeded the cgroup2 descendant test. |
2230 | * * A negative error code, if an error occurred. |
2231 | * |
2232 | * u32 bpf_get_hash_recalc(struct sk_buff *skb) |
2233 | * Description |
2234 | * Retrieve the hash of the packet, *skb*\ **->hash**. If it is |
2235 | * not set, in particular if the hash was cleared due to mangling, |
2236 | * recompute this hash. Later accesses to the hash can be done |
2237 | * directly with *skb*\ **->hash**. |
2238 | * |
2239 | * Calling **bpf_set_hash_invalid**\ (), changing a packet |
2240 | * prototype with **bpf_skb_change_proto**\ (), or calling |
2241 | * **bpf_skb_store_bytes**\ () with the |
2242 | * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear |
2243 | * the hash and to trigger a new computation for the next call to |
2244 | * **bpf_get_hash_recalc**\ (). |
2245 | * Return |
2246 | * The 32-bit hash. |
2247 | * |
2248 | * u64 bpf_get_current_task(void) |
2249 | * Return |
2250 | * A pointer to the current task struct. |
2251 | * |
2252 | * long bpf_probe_write_user(void *dst, const void *src, u32 len) |
2253 | * Description |
2254 | * Attempt in a safe way to write *len* bytes from the buffer |
2255 | * *src* to *dst* in memory. It only works for threads that are in |
2256 | * user context, and *dst* must be a valid user space address. |
2257 | * |
2258 | * This helper should not be used to implement any kind of |
2259 | * security mechanism because of TOC-TOU attacks, but rather to |
2260 | * debug, divert, and manipulate execution of semi-cooperative |
2261 | * processes. |
2262 | * |
2263 | * Keep in mind that this feature is meant for experiments, and it |
2264 | * has a risk of crashing the system and running programs. |
2265 | * Therefore, when an eBPF program using this helper is attached, |
2266 | * a warning including PID and process name is printed to kernel |
2267 | * logs. |
2268 | * Return |
2269 | * 0 on success, or a negative error in case of failure. |
2270 | * |
2271 | * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) |
2272 | * Description |
2273 | * Check whether the probe is being run is the context of a given |
2274 | * subset of the cgroup2 hierarchy. The cgroup2 to test is held by |
2275 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. |
2276 | * Return |
2277 | * The return value depends on the result of the test, and can be: |
2278 | * |
2279 | * * 1, if current task belongs to the cgroup2. |
2280 | * * 0, if current task does not belong to the cgroup2. |
2281 | * * A negative error code, if an error occurred. |
2282 | * |
2283 | * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) |
2284 | * Description |
2285 | * Resize (trim or grow) the packet associated to *skb* to the |
2286 | * new *len*. The *flags* are reserved for future usage, and must |
2287 | * be left at zero. |
2288 | * |
2289 | * The basic idea is that the helper performs the needed work to |
2290 | * change the size of the packet, then the eBPF program rewrites |
2291 | * the rest via helpers like **bpf_skb_store_bytes**\ (), |
2292 | * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () |
2293 | * and others. This helper is a slow path utility intended for |
2294 | * replies with control messages. And because it is targeted for |
2295 | * slow path, the helper itself can afford to be slow: it |
2296 | * implicitly linearizes, unclones and drops offloads from the |
2297 | * *skb*. |
2298 | * |
2299 | * A call to this helper is susceptible to change the underlying |
2300 | * packet buffer. Therefore, at load time, all checks on pointers |
2301 | * previously done by the verifier are invalidated and must be |
2302 | * performed again, if the helper is used in combination with |
2303 | * direct packet access. |
2304 | * Return |
2305 | * 0 on success, or a negative error in case of failure. |
2306 | * |
2307 | * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) |
2308 | * Description |
2309 | * Pull in non-linear data in case the *skb* is non-linear and not |
2310 | * all of *len* are part of the linear section. Make *len* bytes |
2311 | * from *skb* readable and writable. If a zero value is passed for |
2312 | * *len*, then the whole length of the *skb* is pulled. |
2313 | * |
2314 | * This helper is only needed for reading and writing with direct |
2315 | * packet access. |
2316 | * |
2317 | * For direct packet access, testing that offsets to access |
2318 | * are within packet boundaries (test on *skb*\ **->data_end**) is |
2319 | * susceptible to fail if offsets are invalid, or if the requested |
2320 | * data is in non-linear parts of the *skb*. On failure the |
2321 | * program can just bail out, or in the case of a non-linear |
2322 | * buffer, use a helper to make the data available. The |
2323 | * **bpf_skb_load_bytes**\ () helper is a first solution to access |
2324 | * the data. Another one consists in using **bpf_skb_pull_data** |
2325 | * to pull in once the non-linear parts, then retesting and |
2326 | * eventually access the data. |
2327 | * |
2328 | * At the same time, this also makes sure the *skb* is uncloned, |
2329 | * which is a necessary condition for direct write. As this needs |
2330 | * to be an invariant for the write part only, the verifier |
2331 | * detects writes and adds a prologue that is calling |
2332 | * **bpf_skb_pull_data()** to effectively unclone the *skb* from |
2333 | * the very beginning in case it is indeed cloned. |
2334 | * |
2335 | * A call to this helper is susceptible to change the underlying |
2336 | * packet buffer. Therefore, at load time, all checks on pointers |
2337 | * previously done by the verifier are invalidated and must be |
2338 | * performed again, if the helper is used in combination with |
2339 | * direct packet access. |
2340 | * Return |
2341 | * 0 on success, or a negative error in case of failure. |
2342 | * |
2343 | * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) |
2344 | * Description |
2345 | * Add the checksum *csum* into *skb*\ **->csum** in case the |
2346 | * driver has supplied a checksum for the entire packet into that |
2347 | * field. Return an error otherwise. This helper is intended to be |
2348 | * used in combination with **bpf_csum_diff**\ (), in particular |
2349 | * when the checksum needs to be updated after data has been |
2350 | * written into the packet through direct packet access. |
2351 | * Return |
2352 | * The checksum on success, or a negative error code in case of |
2353 | * failure. |
2354 | * |
2355 | * void bpf_set_hash_invalid(struct sk_buff *skb) |
2356 | * Description |
2357 | * Invalidate the current *skb*\ **->hash**. It can be used after |
2358 | * mangling on headers through direct packet access, in order to |
2359 | * indicate that the hash is outdated and to trigger a |
2360 | * recalculation the next time the kernel tries to access this |
2361 | * hash or when the **bpf_get_hash_recalc**\ () helper is called. |
2362 | * |
2363 | * long bpf_get_numa_node_id(void) |
2364 | * Description |
2365 | * Return the id of the current NUMA node. The primary use case |
2366 | * for this helper is the selection of sockets for the local NUMA |
2367 | * node, when the program is attached to sockets using the |
2368 | * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), |
2369 | * but the helper is also available to other eBPF program types, |
2370 | * similarly to **bpf_get_smp_processor_id**\ (). |
2371 | * Return |
2372 | * The id of current NUMA node. |
2373 | * |
2374 | * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) |
2375 | * Description |
2376 | * Grows headroom of packet associated to *skb* and adjusts the |
2377 | * offset of the MAC header accordingly, adding *len* bytes of |
2378 | * space. It automatically extends and reallocates memory as |
2379 | * required. |
2380 | * |
2381 | * This helper can be used on a layer 3 *skb* to push a MAC header |
2382 | * for redirection into a layer 2 device. |
2383 | * |
2384 | * All values for *flags* are reserved for future usage, and must |
2385 | * be left at zero. |
2386 | * |
2387 | * A call to this helper is susceptible to change the underlying |
2388 | * packet buffer. Therefore, at load time, all checks on pointers |
2389 | * previously done by the verifier are invalidated and must be |
2390 | * performed again, if the helper is used in combination with |
2391 | * direct packet access. |
2392 | * Return |
2393 | * 0 on success, or a negative error in case of failure. |
2394 | * |
2395 | * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) |
2396 | * Description |
2397 | * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that |
2398 | * it is possible to use a negative value for *delta*. This helper |
2399 | * can be used to prepare the packet for pushing or popping |
2400 | * headers. |
2401 | * |
2402 | * A call to this helper is susceptible to change the underlying |
2403 | * packet buffer. Therefore, at load time, all checks on pointers |
2404 | * previously done by the verifier are invalidated and must be |
2405 | * performed again, if the helper is used in combination with |
2406 | * direct packet access. |
2407 | * Return |
2408 | * 0 on success, or a negative error in case of failure. |
2409 | * |
2410 | * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) |
2411 | * Description |
2412 | * Copy a NUL terminated string from an unsafe kernel address |
2413 | * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for |
2414 | * more details. |
2415 | * |
2416 | * Generally, use **bpf_probe_read_user_str**\ () or |
2417 | * **bpf_probe_read_kernel_str**\ () instead. |
2418 | * Return |
2419 | * On success, the strictly positive length of the string, |
2420 | * including the trailing NUL character. On error, a negative |
2421 | * value. |
2422 | * |
2423 | * u64 bpf_get_socket_cookie(struct sk_buff *skb) |
2424 | * Description |
2425 | * If the **struct sk_buff** pointed by *skb* has a known socket, |
2426 | * retrieve the cookie (generated by the kernel) of this socket. |
2427 | * If no cookie has been set yet, generate a new cookie. Once |
2428 | * generated, the socket cookie remains stable for the life of the |
2429 | * socket. This helper can be useful for monitoring per socket |
2430 | * networking traffic statistics as it provides a global socket |
2431 | * identifier that can be assumed unique. |
2432 | * Return |
2433 | * A 8-byte long unique number on success, or 0 if the socket |
2434 | * field is missing inside *skb*. |
2435 | * |
2436 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) |
2437 | * Description |
2438 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
2439 | * *skb*, but gets socket from **struct bpf_sock_addr** context. |
2440 | * Return |
2441 | * A 8-byte long unique number. |
2442 | * |
2443 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) |
2444 | * Description |
2445 | * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts |
2446 | * *skb*, but gets socket from **struct bpf_sock_ops** context. |
2447 | * Return |
2448 | * A 8-byte long unique number. |
2449 | * |
2450 | * u64 bpf_get_socket_cookie(struct sock *sk) |
2451 | * Description |
2452 | * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts |
2453 | * *sk*, but gets socket from a BTF **struct sock**. This helper |
2454 | * also works for sleepable programs. |
2455 | * Return |
2456 | * A 8-byte long unique number or 0 if *sk* is NULL. |
2457 | * |
2458 | * u32 bpf_get_socket_uid(struct sk_buff *skb) |
2459 | * Return |
2460 | * The owner UID of the socket associated to *skb*. If the socket |
2461 | * is **NULL**, or if it is not a full socket (i.e. if it is a |
2462 | * time-wait or a request socket instead), **overflowuid** value |
2463 | * is returned (note that **overflowuid** might also be the actual |
2464 | * UID value for the socket). |
2465 | * |
2466 | * long bpf_set_hash(struct sk_buff *skb, u32 hash) |
2467 | * Description |
2468 | * Set the full hash for *skb* (set the field *skb*\ **->hash**) |
2469 | * to value *hash*. |
2470 | * Return |
2471 | * 0 |
2472 | * |
2473 | * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) |
2474 | * Description |
2475 | * Emulate a call to **setsockopt()** on the socket associated to |
2476 | * *bpf_socket*, which must be a full socket. The *level* at |
2477 | * which the option resides and the name *optname* of the option |
2478 | * must be specified, see **setsockopt(2)** for more information. |
2479 | * The option value of length *optlen* is pointed by *optval*. |
2480 | * |
2481 | * *bpf_socket* should be one of the following: |
2482 | * |
2483 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. |
2484 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** |
2485 | * and **BPF_CGROUP_INET6_CONNECT**. |
2486 | * |
2487 | * This helper actually implements a subset of **setsockopt()**. |
2488 | * It supports the following *level*\ s: |
2489 | * |
2490 | * * **SOL_SOCKET**, which supports the following *optname*\ s: |
2491 | * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, |
2492 | * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, |
2493 | * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. |
2494 | * * **IPPROTO_TCP**, which supports the following *optname*\ s: |
2495 | * **TCP_CONGESTION**, **TCP_BPF_IW**, |
2496 | * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, |
2497 | * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, |
2498 | * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. |
2499 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. |
2500 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. |
2501 | * Return |
2502 | * 0 on success, or a negative error in case of failure. |
2503 | * |
2504 | * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) |
2505 | * Description |
2506 | * Grow or shrink the room for data in the packet associated to |
2507 | * *skb* by *len_diff*, and according to the selected *mode*. |
2508 | * |
2509 | * By default, the helper will reset any offloaded checksum |
2510 | * indicator of the skb to CHECKSUM_NONE. This can be avoided |
2511 | * by the following flag: |
2512 | * |
2513 | * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded |
2514 | * checksum data of the skb to CHECKSUM_NONE. |
2515 | * |
2516 | * There are two supported modes at this time: |
2517 | * |
2518 | * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer |
2519 | * (room space is added or removed below the layer 2 header). |
2520 | * |
2521 | * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer |
2522 | * (room space is added or removed below the layer 3 header). |
2523 | * |
2524 | * The following flags are supported at this time: |
2525 | * |
2526 | * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. |
2527 | * Adjusting mss in this way is not allowed for datagrams. |
2528 | * |
2529 | * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, |
2530 | * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: |
2531 | * Any new space is reserved to hold a tunnel header. |
2532 | * Configure skb offsets and other fields accordingly. |
2533 | * |
2534 | * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, |
2535 | * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: |
2536 | * Use with ENCAP_L3 flags to further specify the tunnel type. |
2537 | * |
2538 | * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): |
2539 | * Use with ENCAP_L3/L4 flags to further specify the tunnel |
2540 | * type; *len* is the length of the inner MAC header. |
2541 | * |
2542 | * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: |
2543 | * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the |
2544 | * L2 type as Ethernet. |
2545 | * |
2546 | * A call to this helper is susceptible to change the underlying |
2547 | * packet buffer. Therefore, at load time, all checks on pointers |
2548 | * previously done by the verifier are invalidated and must be |
2549 | * performed again, if the helper is used in combination with |
2550 | * direct packet access. |
2551 | * Return |
2552 | * 0 on success, or a negative error in case of failure. |
2553 | * |
2554 | * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) |
2555 | * Description |
2556 | * Redirect the packet to the endpoint referenced by *map* at |
2557 | * index *key*. Depending on its type, this *map* can contain |
2558 | * references to net devices (for forwarding packets through other |
2559 | * ports), or to CPUs (for redirecting XDP frames to another CPU; |
2560 | * but this is only implemented for native XDP (with driver |
2561 | * support) as of this writing). |
2562 | * |
2563 | * The lower two bits of *flags* are used as the return code if |
2564 | * the map lookup fails. This is so that the return value can be |
2565 | * one of the XDP program return codes up to **XDP_TX**, as chosen |
2566 | * by the caller. The higher bits of *flags* can be set to |
2567 | * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. |
2568 | * |
2569 | * With BPF_F_BROADCAST the packet will be broadcasted to all the |
2570 | * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress |
2571 | * interface will be excluded when do broadcasting. |
2572 | * |
2573 | * See also **bpf_redirect**\ (), which only supports redirecting |
2574 | * to an ifindex, but doesn't require a map to do so. |
2575 | * Return |
2576 | * **XDP_REDIRECT** on success, or the value of the two lower bits |
2577 | * of the *flags* argument on error. |
2578 | * |
2579 | * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) |
2580 | * Description |
2581 | * Redirect the packet to the socket referenced by *map* (of type |
2582 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and |
2583 | * egress interfaces can be used for redirection. The |
2584 | * **BPF_F_INGRESS** value in *flags* is used to make the |
2585 | * distinction (ingress path is selected if the flag is present, |
2586 | * egress path otherwise). This is the only flag supported for now. |
2587 | * Return |
2588 | * **SK_PASS** on success, or **SK_DROP** on error. |
2589 | * |
2590 | * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) |
2591 | * Description |
2592 | * Add an entry to, or update a *map* referencing sockets. The |
2593 | * *skops* is used as a new value for the entry associated to |
2594 | * *key*. *flags* is one of: |
2595 | * |
2596 | * **BPF_NOEXIST** |
2597 | * The entry for *key* must not exist in the map. |
2598 | * **BPF_EXIST** |
2599 | * The entry for *key* must already exist in the map. |
2600 | * **BPF_ANY** |
2601 | * No condition on the existence of the entry for *key*. |
2602 | * |
2603 | * If the *map* has eBPF programs (parser and verdict), those will |
2604 | * be inherited by the socket being added. If the socket is |
2605 | * already attached to eBPF programs, this results in an error. |
2606 | * Return |
2607 | * 0 on success, or a negative error in case of failure. |
2608 | * |
2609 | * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) |
2610 | * Description |
2611 | * Adjust the address pointed by *xdp_md*\ **->data_meta** by |
2612 | * *delta* (which can be positive or negative). Note that this |
2613 | * operation modifies the address stored in *xdp_md*\ **->data**, |
2614 | * so the latter must be loaded only after the helper has been |
2615 | * called. |
2616 | * |
2617 | * The use of *xdp_md*\ **->data_meta** is optional and programs |
2618 | * are not required to use it. The rationale is that when the |
2619 | * packet is processed with XDP (e.g. as DoS filter), it is |
2620 | * possible to push further meta data along with it before passing |
2621 | * to the stack, and to give the guarantee that an ingress eBPF |
2622 | * program attached as a TC classifier on the same device can pick |
2623 | * this up for further post-processing. Since TC works with socket |
2624 | * buffers, it remains possible to set from XDP the **mark** or |
2625 | * **priority** pointers, or other pointers for the socket buffer. |
2626 | * Having this scratch space generic and programmable allows for |
2627 | * more flexibility as the user is free to store whatever meta |
2628 | * data they need. |
2629 | * |
2630 | * A call to this helper is susceptible to change the underlying |
2631 | * packet buffer. Therefore, at load time, all checks on pointers |
2632 | * previously done by the verifier are invalidated and must be |
2633 | * performed again, if the helper is used in combination with |
2634 | * direct packet access. |
2635 | * Return |
2636 | * 0 on success, or a negative error in case of failure. |
2637 | * |
2638 | * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) |
2639 | * Description |
2640 | * Read the value of a perf event counter, and store it into *buf* |
2641 | * of size *buf_size*. This helper relies on a *map* of type |
2642 | * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event |
2643 | * counter is selected when *map* is updated with perf event file |
2644 | * descriptors. The *map* is an array whose size is the number of |
2645 | * available CPUs, and each cell contains a value relative to one |
2646 | * CPU. The value to retrieve is indicated by *flags*, that |
2647 | * contains the index of the CPU to look up, masked with |
2648 | * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to |
2649 | * **BPF_F_CURRENT_CPU** to indicate that the value for the |
2650 | * current CPU should be retrieved. |
2651 | * |
2652 | * This helper behaves in a way close to |
2653 | * **bpf_perf_event_read**\ () helper, save that instead of |
2654 | * just returning the value observed, it fills the *buf* |
2655 | * structure. This allows for additional data to be retrieved: in |
2656 | * particular, the enabled and running times (in *buf*\ |
2657 | * **->enabled** and *buf*\ **->running**, respectively) are |
2658 | * copied. In general, **bpf_perf_event_read_value**\ () is |
2659 | * recommended over **bpf_perf_event_read**\ (), which has some |
2660 | * ABI issues and provides fewer functionalities. |
2661 | * |
2662 | * These values are interesting, because hardware PMU (Performance |
2663 | * Monitoring Unit) counters are limited resources. When there are |
2664 | * more PMU based perf events opened than available counters, |
2665 | * kernel will multiplex these events so each event gets certain |
2666 | * percentage (but not all) of the PMU time. In case that |
2667 | * multiplexing happens, the number of samples or counter value |
2668 | * will not reflect the case compared to when no multiplexing |
2669 | * occurs. This makes comparison between different runs difficult. |
2670 | * Typically, the counter value should be normalized before |
2671 | * comparing to other experiments. The usual normalization is done |
2672 | * as follows. |
2673 | * |
2674 | * :: |
2675 | * |
2676 | * normalized_counter = counter * t_enabled / t_running |
2677 | * |
2678 | * Where t_enabled is the time enabled for event and t_running is |
2679 | * the time running for event since last normalization. The |
2680 | * enabled and running times are accumulated since the perf event |
2681 | * open. To achieve scaling factor between two invocations of an |
2682 | * eBPF program, users can use CPU id as the key (which is |
2683 | * typical for perf array usage model) to remember the previous |
2684 | * value and do the calculation inside the eBPF program. |
2685 | * Return |
2686 | * 0 on success, or a negative error in case of failure. |
2687 | * |
2688 | * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) |
2689 | * Description |
2690 | * For en eBPF program attached to a perf event, retrieve the |
2691 | * value of the event counter associated to *ctx* and store it in |
2692 | * the structure pointed by *buf* and of size *buf_size*. Enabled |
2693 | * and running times are also stored in the structure (see |
2694 | * description of helper **bpf_perf_event_read_value**\ () for |
2695 | * more details). |
2696 | * Return |
2697 | * 0 on success, or a negative error in case of failure. |
2698 | * |
2699 | * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) |
2700 | * Description |
2701 | * Emulate a call to **getsockopt()** on the socket associated to |
2702 | * *bpf_socket*, which must be a full socket. The *level* at |
2703 | * which the option resides and the name *optname* of the option |
2704 | * must be specified, see **getsockopt(2)** for more information. |
2705 | * The retrieved value is stored in the structure pointed by |
2706 | * *opval* and of length *optlen*. |
2707 | * |
2708 | * *bpf_socket* should be one of the following: |
2709 | * |
2710 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. |
2711 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** |
2712 | * and **BPF_CGROUP_INET6_CONNECT**. |
2713 | * |
2714 | * This helper actually implements a subset of **getsockopt()**. |
2715 | * It supports the following *level*\ s: |
2716 | * |
2717 | * * **IPPROTO_TCP**, which supports *optname* |
2718 | * **TCP_CONGESTION**. |
2719 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. |
2720 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. |
2721 | * Return |
2722 | * 0 on success, or a negative error in case of failure. |
2723 | * |
2724 | * long bpf_override_return(struct pt_regs *regs, u64 rc) |
2725 | * Description |
2726 | * Used for error injection, this helper uses kprobes to override |
2727 | * the return value of the probed function, and to set it to *rc*. |
2728 | * The first argument is the context *regs* on which the kprobe |
2729 | * works. |
2730 | * |
2731 | * This helper works by setting the PC (program counter) |
2732 | * to an override function which is run in place of the original |
2733 | * probed function. This means the probed function is not run at |
2734 | * all. The replacement function just returns with the required |
2735 | * value. |
2736 | * |
2737 | * This helper has security implications, and thus is subject to |
2738 | * restrictions. It is only available if the kernel was compiled |
2739 | * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration |
2740 | * option, and in this case it only works on functions tagged with |
2741 | * **ALLOW_ERROR_INJECTION** in the kernel code. |
2742 | * |
2743 | * Also, the helper is only available for the architectures having |
2744 | * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, |
2745 | * x86 architecture is the only one to support this feature. |
2746 | * Return |
2747 | * 0 |
2748 | * |
2749 | * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) |
2750 | * Description |
2751 | * Attempt to set the value of the **bpf_sock_ops_cb_flags** field |
2752 | * for the full TCP socket associated to *bpf_sock_ops* to |
2753 | * *argval*. |
2754 | * |
2755 | * The primary use of this field is to determine if there should |
2756 | * be calls to eBPF programs of type |
2757 | * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP |
2758 | * code. A program of the same type can change its value, per |
2759 | * connection and as necessary, when the connection is |
2760 | * established. This field is directly accessible for reading, but |
2761 | * this helper must be used for updates in order to return an |
2762 | * error if an eBPF program tries to set a callback that is not |
2763 | * supported in the current kernel. |
2764 | * |
2765 | * *argval* is a flag array which can combine these flags: |
2766 | * |
2767 | * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) |
2768 | * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) |
2769 | * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) |
2770 | * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) |
2771 | * |
2772 | * Therefore, this function can be used to clear a callback flag by |
2773 | * setting the appropriate bit to zero. e.g. to disable the RTO |
2774 | * callback: |
2775 | * |
2776 | * **bpf_sock_ops_cb_flags_set(bpf_sock,** |
2777 | * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** |
2778 | * |
2779 | * Here are some examples of where one could call such eBPF |
2780 | * program: |
2781 | * |
2782 | * * When RTO fires. |
2783 | * * When a packet is retransmitted. |
2784 | * * When the connection terminates. |
2785 | * * When a packet is sent. |
2786 | * * When a packet is received. |
2787 | * Return |
2788 | * Code **-EINVAL** if the socket is not a full TCP socket; |
2789 | * otherwise, a positive number containing the bits that could not |
2790 | * be set is returned (which comes down to 0 if all bits were set |
2791 | * as required). |
2792 | * |
2793 | * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) |
2794 | * Description |
2795 | * This helper is used in programs implementing policies at the |
2796 | * socket level. If the message *msg* is allowed to pass (i.e. if |
2797 | * the verdict eBPF program returns **SK_PASS**), redirect it to |
2798 | * the socket referenced by *map* (of type |
2799 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and |
2800 | * egress interfaces can be used for redirection. The |
2801 | * **BPF_F_INGRESS** value in *flags* is used to make the |
2802 | * distinction (ingress path is selected if the flag is present, |
2803 | * egress path otherwise). This is the only flag supported for now. |
2804 | * Return |
2805 | * **SK_PASS** on success, or **SK_DROP** on error. |
2806 | * |
2807 | * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) |
2808 | * Description |
2809 | * For socket policies, apply the verdict of the eBPF program to |
2810 | * the next *bytes* (number of bytes) of message *msg*. |
2811 | * |
2812 | * For example, this helper can be used in the following cases: |
2813 | * |
2814 | * * A single **sendmsg**\ () or **sendfile**\ () system call |
2815 | * contains multiple logical messages that the eBPF program is |
2816 | * supposed to read and for which it should apply a verdict. |
2817 | * * An eBPF program only cares to read the first *bytes* of a |
2818 | * *msg*. If the message has a large payload, then setting up |
2819 | * and calling the eBPF program repeatedly for all bytes, even |
2820 | * though the verdict is already known, would create unnecessary |
2821 | * overhead. |
2822 | * |
2823 | * When called from within an eBPF program, the helper sets a |
2824 | * counter internal to the BPF infrastructure, that is used to |
2825 | * apply the last verdict to the next *bytes*. If *bytes* is |
2826 | * smaller than the current data being processed from a |
2827 | * **sendmsg**\ () or **sendfile**\ () system call, the first |
2828 | * *bytes* will be sent and the eBPF program will be re-run with |
2829 | * the pointer for start of data pointing to byte number *bytes* |
2830 | * **+ 1**. If *bytes* is larger than the current data being |
2831 | * processed, then the eBPF verdict will be applied to multiple |
2832 | * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are |
2833 | * consumed. |
2834 | * |
2835 | * Note that if a socket closes with the internal counter holding |
2836 | * a non-zero value, this is not a problem because data is not |
2837 | * being buffered for *bytes* and is sent as it is received. |
2838 | * Return |
2839 | * 0 |
2840 | * |
2841 | * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) |
2842 | * Description |
2843 | * For socket policies, prevent the execution of the verdict eBPF |
2844 | * program for message *msg* until *bytes* (byte number) have been |
2845 | * accumulated. |
2846 | * |
2847 | * This can be used when one needs a specific number of bytes |
2848 | * before a verdict can be assigned, even if the data spans |
2849 | * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme |
2850 | * case would be a user calling **sendmsg**\ () repeatedly with |
2851 | * 1-byte long message segments. Obviously, this is bad for |
2852 | * performance, but it is still valid. If the eBPF program needs |
2853 | * *bytes* bytes to validate a header, this helper can be used to |
2854 | * prevent the eBPF program to be called again until *bytes* have |
2855 | * been accumulated. |
2856 | * Return |
2857 | * 0 |
2858 | * |
2859 | * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) |
2860 | * Description |
2861 | * For socket policies, pull in non-linear data from user space |
2862 | * for *msg* and set pointers *msg*\ **->data** and *msg*\ |
2863 | * **->data_end** to *start* and *end* bytes offsets into *msg*, |
2864 | * respectively. |
2865 | * |
2866 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a |
2867 | * *msg* it can only parse data that the (**data**, **data_end**) |
2868 | * pointers have already consumed. For **sendmsg**\ () hooks this |
2869 | * is likely the first scatterlist element. But for calls relying |
2870 | * on the **sendpage** handler (e.g. **sendfile**\ ()) this will |
2871 | * be the range (**0**, **0**) because the data is shared with |
2872 | * user space and by default the objective is to avoid allowing |
2873 | * user space to modify data while (or after) eBPF verdict is |
2874 | * being decided. This helper can be used to pull in data and to |
2875 | * set the start and end pointer to given values. Data will be |
2876 | * copied if necessary (i.e. if data was not linear and if start |
2877 | * and end pointers do not point to the same chunk). |
2878 | * |
2879 | * A call to this helper is susceptible to change the underlying |
2880 | * packet buffer. Therefore, at load time, all checks on pointers |
2881 | * previously done by the verifier are invalidated and must be |
2882 | * performed again, if the helper is used in combination with |
2883 | * direct packet access. |
2884 | * |
2885 | * All values for *flags* are reserved for future usage, and must |
2886 | * be left at zero. |
2887 | * Return |
2888 | * 0 on success, or a negative error in case of failure. |
2889 | * |
2890 | * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) |
2891 | * Description |
2892 | * Bind the socket associated to *ctx* to the address pointed by |
2893 | * *addr*, of length *addr_len*. This allows for making outgoing |
2894 | * connection from the desired IP address, which can be useful for |
2895 | * example when all processes inside a cgroup should use one |
2896 | * single IP address on a host that has multiple IP configured. |
2897 | * |
2898 | * This helper works for IPv4 and IPv6, TCP and UDP sockets. The |
2899 | * domain (*addr*\ **->sa_family**) must be **AF_INET** (or |
2900 | * **AF_INET6**). It's advised to pass zero port (**sin_port** |
2901 | * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like |
2902 | * behavior and lets the kernel efficiently pick up an unused |
2903 | * port as long as 4-tuple is unique. Passing non-zero port might |
2904 | * lead to degraded performance. |
2905 | * Return |
2906 | * 0 on success, or a negative error in case of failure. |
2907 | * |
2908 | * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) |
2909 | * Description |
2910 | * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is |
2911 | * possible to both shrink and grow the packet tail. |
2912 | * Shrink done via *delta* being a negative integer. |
2913 | * |
2914 | * A call to this helper is susceptible to change the underlying |
2915 | * packet buffer. Therefore, at load time, all checks on pointers |
2916 | * previously done by the verifier are invalidated and must be |
2917 | * performed again, if the helper is used in combination with |
2918 | * direct packet access. |
2919 | * Return |
2920 | * 0 on success, or a negative error in case of failure. |
2921 | * |
2922 | * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) |
2923 | * Description |
2924 | * Retrieve the XFRM state (IP transform framework, see also |
2925 | * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. |
2926 | * |
2927 | * The retrieved value is stored in the **struct bpf_xfrm_state** |
2928 | * pointed by *xfrm_state* and of length *size*. |
2929 | * |
2930 | * All values for *flags* are reserved for future usage, and must |
2931 | * be left at zero. |
2932 | * |
2933 | * This helper is available only if the kernel was compiled with |
2934 | * **CONFIG_XFRM** configuration option. |
2935 | * Return |
2936 | * 0 on success, or a negative error in case of failure. |
2937 | * |
2938 | * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) |
2939 | * Description |
2940 | * Return a user or a kernel stack in bpf program provided buffer. |
2941 | * To achieve this, the helper needs *ctx*, which is a pointer |
2942 | * to the context on which the tracing program is executed. |
2943 | * To store the stacktrace, the bpf program provides *buf* with |
2944 | * a nonnegative *size*. |
2945 | * |
2946 | * The last argument, *flags*, holds the number of stack frames to |
2947 | * skip (from 0 to 255), masked with |
2948 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
2949 | * the following flags: |
2950 | * |
2951 | * **BPF_F_USER_STACK** |
2952 | * Collect a user space stack instead of a kernel stack. |
2953 | * **BPF_F_USER_BUILD_ID** |
2954 | * Collect buildid+offset instead of ips for user stack, |
2955 | * only valid if **BPF_F_USER_STACK** is also specified. |
2956 | * |
2957 | * **bpf_get_stack**\ () can collect up to |
2958 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject |
2959 | * to sufficient large buffer size. Note that |
2960 | * this limit can be controlled with the **sysctl** program, and |
2961 | * that it should be manually increased in order to profile long |
2962 | * user stacks (such as stacks for Java programs). To do so, use: |
2963 | * |
2964 | * :: |
2965 | * |
2966 | * # sysctl kernel.perf_event_max_stack=<new value> |
2967 | * Return |
2968 | * The non-negative copied *buf* length equal to or less than |
2969 | * *size* on success, or a negative error in case of failure. |
2970 | * |
2971 | * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) |
2972 | * Description |
2973 | * This helper is similar to **bpf_skb_load_bytes**\ () in that |
2974 | * it provides an easy way to load *len* bytes from *offset* |
2975 | * from the packet associated to *skb*, into the buffer pointed |
2976 | * by *to*. The difference to **bpf_skb_load_bytes**\ () is that |
2977 | * a fifth argument *start_header* exists in order to select a |
2978 | * base offset to start from. *start_header* can be one of: |
2979 | * |
2980 | * **BPF_HDR_START_MAC** |
2981 | * Base offset to load data from is *skb*'s mac header. |
2982 | * **BPF_HDR_START_NET** |
2983 | * Base offset to load data from is *skb*'s network header. |
2984 | * |
2985 | * In general, "direct packet access" is the preferred method to |
2986 | * access packet data, however, this helper is in particular useful |
2987 | * in socket filters where *skb*\ **->data** does not always point |
2988 | * to the start of the mac header and where "direct packet access" |
2989 | * is not available. |
2990 | * Return |
2991 | * 0 on success, or a negative error in case of failure. |
2992 | * |
2993 | * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) |
2994 | * Description |
2995 | * Do FIB lookup in kernel tables using parameters in *params*. |
2996 | * If lookup is successful and result shows packet is to be |
2997 | * forwarded, the neighbor tables are searched for the nexthop. |
2998 | * If successful (ie., FIB lookup shows forwarding and nexthop |
2999 | * is resolved), the nexthop address is returned in ipv4_dst |
3000 | * or ipv6_dst based on family, smac is set to mac address of |
3001 | * egress device, dmac is set to nexthop mac address, rt_metric |
3002 | * is set to metric from route (IPv4/IPv6 only), and ifindex |
3003 | * is set to the device index of the nexthop from the FIB lookup. |
3004 | * |
3005 | * *plen* argument is the size of the passed in struct. |
3006 | * *flags* argument can be a combination of one or more of the |
3007 | * following values: |
3008 | * |
3009 | * **BPF_FIB_LOOKUP_DIRECT** |
3010 | * Do a direct table lookup vs full lookup using FIB |
3011 | * rules. |
3012 | * **BPF_FIB_LOOKUP_OUTPUT** |
3013 | * Perform lookup from an egress perspective (default is |
3014 | * ingress). |
3015 | * |
3016 | * *ctx* is either **struct xdp_md** for XDP programs or |
3017 | * **struct sk_buff** tc cls_act programs. |
3018 | * Return |
3019 | * * < 0 if any input argument is invalid |
3020 | * * 0 on success (packet is forwarded, nexthop neighbor exists) |
3021 | * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the |
3022 | * packet is not forwarded or needs assist from full stack |
3023 | * |
3024 | * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU |
3025 | * was exceeded and output params->mtu_result contains the MTU. |
3026 | * |
3027 | * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) |
3028 | * Description |
3029 | * Add an entry to, or update a sockhash *map* referencing sockets. |
3030 | * The *skops* is used as a new value for the entry associated to |
3031 | * *key*. *flags* is one of: |
3032 | * |
3033 | * **BPF_NOEXIST** |
3034 | * The entry for *key* must not exist in the map. |
3035 | * **BPF_EXIST** |
3036 | * The entry for *key* must already exist in the map. |
3037 | * **BPF_ANY** |
3038 | * No condition on the existence of the entry for *key*. |
3039 | * |
3040 | * If the *map* has eBPF programs (parser and verdict), those will |
3041 | * be inherited by the socket being added. If the socket is |
3042 | * already attached to eBPF programs, this results in an error. |
3043 | * Return |
3044 | * 0 on success, or a negative error in case of failure. |
3045 | * |
3046 | * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) |
3047 | * Description |
3048 | * This helper is used in programs implementing policies at the |
3049 | * socket level. If the message *msg* is allowed to pass (i.e. if |
3050 | * the verdict eBPF program returns **SK_PASS**), redirect it to |
3051 | * the socket referenced by *map* (of type |
3052 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and |
3053 | * egress interfaces can be used for redirection. The |
3054 | * **BPF_F_INGRESS** value in *flags* is used to make the |
3055 | * distinction (ingress path is selected if the flag is present, |
3056 | * egress path otherwise). This is the only flag supported for now. |
3057 | * Return |
3058 | * **SK_PASS** on success, or **SK_DROP** on error. |
3059 | * |
3060 | * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) |
3061 | * Description |
3062 | * This helper is used in programs implementing policies at the |
3063 | * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. |
3064 | * if the verdict eBPF program returns **SK_PASS**), redirect it |
3065 | * to the socket referenced by *map* (of type |
3066 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and |
3067 | * egress interfaces can be used for redirection. The |
3068 | * **BPF_F_INGRESS** value in *flags* is used to make the |
3069 | * distinction (ingress path is selected if the flag is present, |
3070 | * egress otherwise). This is the only flag supported for now. |
3071 | * Return |
3072 | * **SK_PASS** on success, or **SK_DROP** on error. |
3073 | * |
3074 | * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) |
3075 | * Description |
3076 | * Encapsulate the packet associated to *skb* within a Layer 3 |
3077 | * protocol header. This header is provided in the buffer at |
3078 | * address *hdr*, with *len* its size in bytes. *type* indicates |
3079 | * the protocol of the header and can be one of: |
3080 | * |
3081 | * **BPF_LWT_ENCAP_SEG6** |
3082 | * IPv6 encapsulation with Segment Routing Header |
3083 | * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, |
3084 | * the IPv6 header is computed by the kernel. |
3085 | * **BPF_LWT_ENCAP_SEG6_INLINE** |
3086 | * Only works if *skb* contains an IPv6 packet. Insert a |
3087 | * Segment Routing Header (**struct ipv6_sr_hdr**) inside |
3088 | * the IPv6 header. |
3089 | * **BPF_LWT_ENCAP_IP** |
3090 | * IP encapsulation (GRE/GUE/IPIP/etc). The outer header |
3091 | * must be IPv4 or IPv6, followed by zero or more |
3092 | * additional headers, up to **LWT_BPF_MAX_HEADROOM** |
3093 | * total bytes in all prepended headers. Please note that |
3094 | * if **skb_is_gso**\ (*skb*) is true, no more than two |
3095 | * headers can be prepended, and the inner header, if |
3096 | * present, should be either GRE or UDP/GUE. |
3097 | * |
3098 | * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs |
3099 | * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can |
3100 | * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and |
3101 | * **BPF_PROG_TYPE_LWT_XMIT**. |
3102 | * |
3103 | * A call to this helper is susceptible to change the underlying |
3104 | * packet buffer. Therefore, at load time, all checks on pointers |
3105 | * previously done by the verifier are invalidated and must be |
3106 | * performed again, if the helper is used in combination with |
3107 | * direct packet access. |
3108 | * Return |
3109 | * 0 on success, or a negative error in case of failure. |
3110 | * |
3111 | * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) |
3112 | * Description |
3113 | * Store *len* bytes from address *from* into the packet |
3114 | * associated to *skb*, at *offset*. Only the flags, tag and TLVs |
3115 | * inside the outermost IPv6 Segment Routing Header can be |
3116 | * modified through this helper. |
3117 | * |
3118 | * A call to this helper is susceptible to change the underlying |
3119 | * packet buffer. Therefore, at load time, all checks on pointers |
3120 | * previously done by the verifier are invalidated and must be |
3121 | * performed again, if the helper is used in combination with |
3122 | * direct packet access. |
3123 | * Return |
3124 | * 0 on success, or a negative error in case of failure. |
3125 | * |
3126 | * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) |
3127 | * Description |
3128 | * Adjust the size allocated to TLVs in the outermost IPv6 |
3129 | * Segment Routing Header contained in the packet associated to |
3130 | * *skb*, at position *offset* by *delta* bytes. Only offsets |
3131 | * after the segments are accepted. *delta* can be as well |
3132 | * positive (growing) as negative (shrinking). |
3133 | * |
3134 | * A call to this helper is susceptible to change the underlying |
3135 | * packet buffer. Therefore, at load time, all checks on pointers |
3136 | * previously done by the verifier are invalidated and must be |
3137 | * performed again, if the helper is used in combination with |
3138 | * direct packet access. |
3139 | * Return |
3140 | * 0 on success, or a negative error in case of failure. |
3141 | * |
3142 | * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) |
3143 | * Description |
3144 | * Apply an IPv6 Segment Routing action of type *action* to the |
3145 | * packet associated to *skb*. Each action takes a parameter |
3146 | * contained at address *param*, and of length *param_len* bytes. |
3147 | * *action* can be one of: |
3148 | * |
3149 | * **SEG6_LOCAL_ACTION_END_X** |
3150 | * End.X action: Endpoint with Layer-3 cross-connect. |
3151 | * Type of *param*: **struct in6_addr**. |
3152 | * **SEG6_LOCAL_ACTION_END_T** |
3153 | * End.T action: Endpoint with specific IPv6 table lookup. |
3154 | * Type of *param*: **int**. |
3155 | * **SEG6_LOCAL_ACTION_END_B6** |
3156 | * End.B6 action: Endpoint bound to an SRv6 policy. |
3157 | * Type of *param*: **struct ipv6_sr_hdr**. |
3158 | * **SEG6_LOCAL_ACTION_END_B6_ENCAP** |
3159 | * End.B6.Encap action: Endpoint bound to an SRv6 |
3160 | * encapsulation policy. |
3161 | * Type of *param*: **struct ipv6_sr_hdr**. |
3162 | * |
3163 | * A call to this helper is susceptible to change the underlying |
3164 | * packet buffer. Therefore, at load time, all checks on pointers |
3165 | * previously done by the verifier are invalidated and must be |
3166 | * performed again, if the helper is used in combination with |
3167 | * direct packet access. |
3168 | * Return |
3169 | * 0 on success, or a negative error in case of failure. |
3170 | * |
3171 | * long bpf_rc_repeat(void *ctx) |
3172 | * Description |
3173 | * This helper is used in programs implementing IR decoding, to |
3174 | * report a successfully decoded repeat key message. This delays |
3175 | * the generation of a key up event for previously generated |
3176 | * key down event. |
3177 | * |
3178 | * Some IR protocols like NEC have a special IR message for |
3179 | * repeating last button, for when a button is held down. |
3180 | * |
3181 | * The *ctx* should point to the lirc sample as passed into |
3182 | * the program. |
3183 | * |
3184 | * This helper is only available is the kernel was compiled with |
3185 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
3186 | * "**y**". |
3187 | * Return |
3188 | * 0 |
3189 | * |
3190 | * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) |
3191 | * Description |
3192 | * This helper is used in programs implementing IR decoding, to |
3193 | * report a successfully decoded key press with *scancode*, |
3194 | * *toggle* value in the given *protocol*. The scancode will be |
3195 | * translated to a keycode using the rc keymap, and reported as |
3196 | * an input key down event. After a period a key up event is |
3197 | * generated. This period can be extended by calling either |
3198 | * **bpf_rc_keydown**\ () again with the same values, or calling |
3199 | * **bpf_rc_repeat**\ (). |
3200 | * |
3201 | * Some protocols include a toggle bit, in case the button was |
3202 | * released and pressed again between consecutive scancodes. |
3203 | * |
3204 | * The *ctx* should point to the lirc sample as passed into |
3205 | * the program. |
3206 | * |
3207 | * The *protocol* is the decoded protocol number (see |
3208 | * **enum rc_proto** for some predefined values). |
3209 | * |
3210 | * This helper is only available is the kernel was compiled with |
3211 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
3212 | * "**y**". |
3213 | * Return |
3214 | * 0 |
3215 | * |
3216 | * u64 bpf_skb_cgroup_id(struct sk_buff *skb) |
3217 | * Description |
3218 | * Return the cgroup v2 id of the socket associated with the *skb*. |
3219 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () |
3220 | * helper for cgroup v1 by providing a tag resp. identifier that |
3221 | * can be matched on or used for map lookups e.g. to implement |
3222 | * policy. The cgroup v2 id of a given path in the hierarchy is |
3223 | * exposed in user space through the f_handle API in order to get |
3224 | * to the same 64-bit id. |
3225 | * |
3226 | * This helper can be used on TC egress path, but not on ingress, |
3227 | * and is available only if the kernel was compiled with the |
3228 | * **CONFIG_SOCK_CGROUP_DATA** configuration option. |
3229 | * Return |
3230 | * The id is returned or 0 in case the id could not be retrieved. |
3231 | * |
3232 | * u64 bpf_get_current_cgroup_id(void) |
3233 | * Return |
3234 | * A 64-bit integer containing the current cgroup id based |
3235 | * on the cgroup within which the current task is running. |
3236 | * |
3237 | * void *bpf_get_local_storage(void *map, u64 flags) |
3238 | * Description |
3239 | * Get the pointer to the local storage area. |
3240 | * The type and the size of the local storage is defined |
3241 | * by the *map* argument. |
3242 | * The *flags* meaning is specific for each map type, |
3243 | * and has to be 0 for cgroup local storage. |
3244 | * |
3245 | * Depending on the BPF program type, a local storage area |
3246 | * can be shared between multiple instances of the BPF program, |
3247 | * running simultaneously. |
3248 | * |
3249 | * A user should care about the synchronization by himself. |
3250 | * For example, by using the **BPF_ATOMIC** instructions to alter |
3251 | * the shared data. |
3252 | * Return |
3253 | * A pointer to the local storage area. |
3254 | * |
3255 | * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) |
3256 | * Description |
3257 | * Select a **SO_REUSEPORT** socket from a |
3258 | * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. |
3259 | * It checks the selected socket is matching the incoming |
3260 | * request in the socket buffer. |
3261 | * Return |
3262 | * 0 on success, or a negative error in case of failure. |
3263 | * |
3264 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) |
3265 | * Description |
3266 | * Return id of cgroup v2 that is ancestor of cgroup associated |
3267 | * with the *skb* at the *ancestor_level*. The root cgroup is at |
3268 | * *ancestor_level* zero and each step down the hierarchy |
3269 | * increments the level. If *ancestor_level* == level of cgroup |
3270 | * associated with *skb*, then return value will be same as that |
3271 | * of **bpf_skb_cgroup_id**\ (). |
3272 | * |
3273 | * The helper is useful to implement policies based on cgroups |
3274 | * that are upper in hierarchy than immediate cgroup associated |
3275 | * with *skb*. |
3276 | * |
3277 | * The format of returned id and helper limitations are same as in |
3278 | * **bpf_skb_cgroup_id**\ (). |
3279 | * Return |
3280 | * The id is returned or 0 in case the id could not be retrieved. |
3281 | * |
3282 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
3283 | * Description |
3284 | * Look for TCP socket matching *tuple*, optionally in a child |
3285 | * network namespace *netns*. The return value must be checked, |
3286 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
3287 | * |
3288 | * The *ctx* should point to the context of the program, such as |
3289 | * the skb or socket (depending on the hook in use). This is used |
3290 | * to determine the base network namespace for the lookup. |
3291 | * |
3292 | * *tuple_size* must be one of: |
3293 | * |
3294 | * **sizeof**\ (*tuple*\ **->ipv4**) |
3295 | * Look for an IPv4 socket. |
3296 | * **sizeof**\ (*tuple*\ **->ipv6**) |
3297 | * Look for an IPv6 socket. |
3298 | * |
3299 | * If the *netns* is a negative signed 32-bit integer, then the |
3300 | * socket lookup table in the netns associated with the *ctx* |
3301 | * will be used. For the TC hooks, this is the netns of the device |
3302 | * in the skb. For socket hooks, this is the netns of the socket. |
3303 | * If *netns* is any other signed 32-bit value greater than or |
3304 | * equal to zero then it specifies the ID of the netns relative to |
3305 | * the netns associated with the *ctx*. *netns* values beyond the |
3306 | * range of 32-bit integers are reserved for future use. |
3307 | * |
3308 | * All values for *flags* are reserved for future usage, and must |
3309 | * be left at zero. |
3310 | * |
3311 | * This helper is available only if the kernel was compiled with |
3312 | * **CONFIG_NET** configuration option. |
3313 | * Return |
3314 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
3315 | * For sockets with reuseport option, the **struct bpf_sock** |
3316 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
3317 | * tuple. |
3318 | * |
3319 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
3320 | * Description |
3321 | * Look for UDP socket matching *tuple*, optionally in a child |
3322 | * network namespace *netns*. The return value must be checked, |
3323 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
3324 | * |
3325 | * The *ctx* should point to the context of the program, such as |
3326 | * the skb or socket (depending on the hook in use). This is used |
3327 | * to determine the base network namespace for the lookup. |
3328 | * |
3329 | * *tuple_size* must be one of: |
3330 | * |
3331 | * **sizeof**\ (*tuple*\ **->ipv4**) |
3332 | * Look for an IPv4 socket. |
3333 | * **sizeof**\ (*tuple*\ **->ipv6**) |
3334 | * Look for an IPv6 socket. |
3335 | * |
3336 | * If the *netns* is a negative signed 32-bit integer, then the |
3337 | * socket lookup table in the netns associated with the *ctx* |
3338 | * will be used. For the TC hooks, this is the netns of the device |
3339 | * in the skb. For socket hooks, this is the netns of the socket. |
3340 | * If *netns* is any other signed 32-bit value greater than or |
3341 | * equal to zero then it specifies the ID of the netns relative to |
3342 | * the netns associated with the *ctx*. *netns* values beyond the |
3343 | * range of 32-bit integers are reserved for future use. |
3344 | * |
3345 | * All values for *flags* are reserved for future usage, and must |
3346 | * be left at zero. |
3347 | * |
3348 | * This helper is available only if the kernel was compiled with |
3349 | * **CONFIG_NET** configuration option. |
3350 | * Return |
3351 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
3352 | * For sockets with reuseport option, the **struct bpf_sock** |
3353 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
3354 | * tuple. |
3355 | * |
3356 | * long bpf_sk_release(void *sock) |
3357 | * Description |
3358 | * Release the reference held by *sock*. *sock* must be a |
3359 | * non-**NULL** pointer that was returned from |
3360 | * **bpf_sk_lookup_xxx**\ (). |
3361 | * Return |
3362 | * 0 on success, or a negative error in case of failure. |
3363 | * |
3364 | * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) |
3365 | * Description |
3366 | * Push an element *value* in *map*. *flags* is one of: |
3367 | * |
3368 | * **BPF_EXIST** |
3369 | * If the queue/stack is full, the oldest element is |
3370 | * removed to make room for this. |
3371 | * Return |
3372 | * 0 on success, or a negative error in case of failure. |
3373 | * |
3374 | * long bpf_map_pop_elem(struct bpf_map *map, void *value) |
3375 | * Description |
3376 | * Pop an element from *map*. |
3377 | * Return |
3378 | * 0 on success, or a negative error in case of failure. |
3379 | * |
3380 | * long bpf_map_peek_elem(struct bpf_map *map, void *value) |
3381 | * Description |
3382 | * Get an element from *map* without removing it. |
3383 | * Return |
3384 | * 0 on success, or a negative error in case of failure. |
3385 | * |
3386 | * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) |
3387 | * Description |
3388 | * For socket policies, insert *len* bytes into *msg* at offset |
3389 | * *start*. |
3390 | * |
3391 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a |
3392 | * *msg* it may want to insert metadata or options into the *msg*. |
3393 | * This can later be read and used by any of the lower layer BPF |
3394 | * hooks. |
3395 | * |
3396 | * This helper may fail if under memory pressure (a malloc |
3397 | * fails) in these cases BPF programs will get an appropriate |
3398 | * error and BPF programs will need to handle them. |
3399 | * Return |
3400 | * 0 on success, or a negative error in case of failure. |
3401 | * |
3402 | * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) |
3403 | * Description |
3404 | * Will remove *len* bytes from a *msg* starting at byte *start*. |
3405 | * This may result in **ENOMEM** errors under certain situations if |
3406 | * an allocation and copy are required due to a full ring buffer. |
3407 | * However, the helper will try to avoid doing the allocation |
3408 | * if possible. Other errors can occur if input parameters are |
3409 | * invalid either due to *start* byte not being valid part of *msg* |
3410 | * payload and/or *pop* value being to large. |
3411 | * Return |
3412 | * 0 on success, or a negative error in case of failure. |
3413 | * |
3414 | * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) |
3415 | * Description |
3416 | * This helper is used in programs implementing IR decoding, to |
3417 | * report a successfully decoded pointer movement. |
3418 | * |
3419 | * The *ctx* should point to the lirc sample as passed into |
3420 | * the program. |
3421 | * |
3422 | * This helper is only available is the kernel was compiled with |
3423 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
3424 | * "**y**". |
3425 | * Return |
3426 | * 0 |
3427 | * |
3428 | * long bpf_spin_lock(struct bpf_spin_lock *lock) |
3429 | * Description |
3430 | * Acquire a spinlock represented by the pointer *lock*, which is |
3431 | * stored as part of a value of a map. Taking the lock allows to |
3432 | * safely update the rest of the fields in that value. The |
3433 | * spinlock can (and must) later be released with a call to |
3434 | * **bpf_spin_unlock**\ (\ *lock*\ ). |
3435 | * |
3436 | * Spinlocks in BPF programs come with a number of restrictions |
3437 | * and constraints: |
3438 | * |
3439 | * * **bpf_spin_lock** objects are only allowed inside maps of |
3440 | * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this |
3441 | * list could be extended in the future). |
3442 | * * BTF description of the map is mandatory. |
3443 | * * The BPF program can take ONE lock at a time, since taking two |
3444 | * or more could cause dead locks. |
3445 | * * Only one **struct bpf_spin_lock** is allowed per map element. |
3446 | * * When the lock is taken, calls (either BPF to BPF or helpers) |
3447 | * are not allowed. |
3448 | * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not |
3449 | * allowed inside a spinlock-ed region. |
3450 | * * The BPF program MUST call **bpf_spin_unlock**\ () to release |
3451 | * the lock, on all execution paths, before it returns. |
3452 | * * The BPF program can access **struct bpf_spin_lock** only via |
3453 | * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () |
3454 | * helpers. Loading or storing data into the **struct |
3455 | * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. |
3456 | * * To use the **bpf_spin_lock**\ () helper, the BTF description |
3457 | * of the map value must be a struct and have **struct |
3458 | * bpf_spin_lock** *anyname*\ **;** field at the top level. |
3459 | * Nested lock inside another struct is not allowed. |
3460 | * * The **struct bpf_spin_lock** *lock* field in a map value must |
3461 | * be aligned on a multiple of 4 bytes in that value. |
3462 | * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy |
3463 | * the **bpf_spin_lock** field to user space. |
3464 | * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from |
3465 | * a BPF program, do not update the **bpf_spin_lock** field. |
3466 | * * **bpf_spin_lock** cannot be on the stack or inside a |
3467 | * networking packet (it can only be inside of a map values). |
3468 | * * **bpf_spin_lock** is available to root only. |
3469 | * * Tracing programs and socket filter programs cannot use |
3470 | * **bpf_spin_lock**\ () due to insufficient preemption checks |
3471 | * (but this may change in the future). |
3472 | * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. |
3473 | * Return |
3474 | * 0 |
3475 | * |
3476 | * long bpf_spin_unlock(struct bpf_spin_lock *lock) |
3477 | * Description |
3478 | * Release the *lock* previously locked by a call to |
3479 | * **bpf_spin_lock**\ (\ *lock*\ ). |
3480 | * Return |
3481 | * 0 |
3482 | * |
3483 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) |
3484 | * Description |
3485 | * This helper gets a **struct bpf_sock** pointer such |
3486 | * that all the fields in this **bpf_sock** can be accessed. |
3487 | * Return |
3488 | * A **struct bpf_sock** pointer on success, or **NULL** in |
3489 | * case of failure. |
3490 | * |
3491 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) |
3492 | * Description |
3493 | * This helper gets a **struct bpf_tcp_sock** pointer from a |
3494 | * **struct bpf_sock** pointer. |
3495 | * Return |
3496 | * A **struct bpf_tcp_sock** pointer on success, or **NULL** in |
3497 | * case of failure. |
3498 | * |
3499 | * long bpf_skb_ecn_set_ce(struct sk_buff *skb) |
3500 | * Description |
3501 | * Set ECN (Explicit Congestion Notification) field of IP header |
3502 | * to **CE** (Congestion Encountered) if current value is **ECT** |
3503 | * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 |
3504 | * and IPv4. |
3505 | * Return |
3506 | * 1 if the **CE** flag is set (either by the current helper call |
3507 | * or because it was already present), 0 if it is not set. |
3508 | * |
3509 | * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) |
3510 | * Description |
3511 | * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. |
3512 | * **bpf_sk_release**\ () is unnecessary and not allowed. |
3513 | * Return |
3514 | * A **struct bpf_sock** pointer on success, or **NULL** in |
3515 | * case of failure. |
3516 | * |
3517 | * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
3518 | * Description |
3519 | * Look for TCP socket matching *tuple*, optionally in a child |
3520 | * network namespace *netns*. The return value must be checked, |
3521 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
3522 | * |
3523 | * This function is identical to **bpf_sk_lookup_tcp**\ (), except |
3524 | * that it also returns timewait or request sockets. Use |
3525 | * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the |
3526 | * full structure. |
3527 | * |
3528 | * This helper is available only if the kernel was compiled with |
3529 | * **CONFIG_NET** configuration option. |
3530 | * Return |
3531 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
3532 | * For sockets with reuseport option, the **struct bpf_sock** |
3533 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
3534 | * tuple. |
3535 | * |
3536 | * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) |
3537 | * Description |
3538 | * Check whether *iph* and *th* contain a valid SYN cookie ACK for |
3539 | * the listening socket in *sk*. |
3540 | * |
3541 | * *iph* points to the start of the IPv4 or IPv6 header, while |
3542 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or |
3543 | * **sizeof**\ (**struct ip6hdr**). |
3544 | * |
3545 | * *th* points to the start of the TCP header, while *th_len* |
3546 | * contains **sizeof**\ (**struct tcphdr**). |
3547 | * Return |
3548 | * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative |
3549 | * error otherwise. |
3550 | * |
3551 | * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) |
3552 | * Description |
3553 | * Get name of sysctl in /proc/sys/ and copy it into provided by |
3554 | * program buffer *buf* of size *buf_len*. |
3555 | * |
3556 | * The buffer is always NUL terminated, unless it's zero-sized. |
3557 | * |
3558 | * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is |
3559 | * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name |
3560 | * only (e.g. "tcp_mem"). |
3561 | * Return |
3562 | * Number of character copied (not including the trailing NUL). |
3563 | * |
3564 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
3565 | * truncated name in this case). |
3566 | * |
3567 | * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) |
3568 | * Description |
3569 | * Get current value of sysctl as it is presented in /proc/sys |
3570 | * (incl. newline, etc), and copy it as a string into provided |
3571 | * by program buffer *buf* of size *buf_len*. |
3572 | * |
3573 | * The whole value is copied, no matter what file position user |
3574 | * space issued e.g. sys_read at. |
3575 | * |
3576 | * The buffer is always NUL terminated, unless it's zero-sized. |
3577 | * Return |
3578 | * Number of character copied (not including the trailing NUL). |
3579 | * |
3580 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
3581 | * truncated name in this case). |
3582 | * |
3583 | * **-EINVAL** if current value was unavailable, e.g. because |
3584 | * sysctl is uninitialized and read returns -EIO for it. |
3585 | * |
3586 | * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) |
3587 | * Description |
3588 | * Get new value being written by user space to sysctl (before |
3589 | * the actual write happens) and copy it as a string into |
3590 | * provided by program buffer *buf* of size *buf_len*. |
3591 | * |
3592 | * User space may write new value at file position > 0. |
3593 | * |
3594 | * The buffer is always NUL terminated, unless it's zero-sized. |
3595 | * Return |
3596 | * Number of character copied (not including the trailing NUL). |
3597 | * |
3598 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
3599 | * truncated name in this case). |
3600 | * |
3601 | * **-EINVAL** if sysctl is being read. |
3602 | * |
3603 | * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) |
3604 | * Description |
3605 | * Override new value being written by user space to sysctl with |
3606 | * value provided by program in buffer *buf* of size *buf_len*. |
3607 | * |
3608 | * *buf* should contain a string in same form as provided by user |
3609 | * space on sysctl write. |
3610 | * |
3611 | * User space may write new value at file position > 0. To override |
3612 | * the whole sysctl value file position should be set to zero. |
3613 | * Return |
3614 | * 0 on success. |
3615 | * |
3616 | * **-E2BIG** if the *buf_len* is too big. |
3617 | * |
3618 | * **-EINVAL** if sysctl is being read. |
3619 | * |
3620 | * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) |
3621 | * Description |
3622 | * Convert the initial part of the string from buffer *buf* of |
3623 | * size *buf_len* to a long integer according to the given base |
3624 | * and save the result in *res*. |
3625 | * |
3626 | * The string may begin with an arbitrary amount of white space |
3627 | * (as determined by **isspace**\ (3)) followed by a single |
3628 | * optional '**-**' sign. |
3629 | * |
3630 | * Five least significant bits of *flags* encode base, other bits |
3631 | * are currently unused. |
3632 | * |
3633 | * Base must be either 8, 10, 16 or 0 to detect it automatically |
3634 | * similar to user space **strtol**\ (3). |
3635 | * Return |
3636 | * Number of characters consumed on success. Must be positive but |
3637 | * no more than *buf_len*. |
3638 | * |
3639 | * **-EINVAL** if no valid digits were found or unsupported base |
3640 | * was provided. |
3641 | * |
3642 | * **-ERANGE** if resulting value was out of range. |
3643 | * |
3644 | * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) |
3645 | * Description |
3646 | * Convert the initial part of the string from buffer *buf* of |
3647 | * size *buf_len* to an unsigned long integer according to the |
3648 | * given base and save the result in *res*. |
3649 | * |
3650 | * The string may begin with an arbitrary amount of white space |
3651 | * (as determined by **isspace**\ (3)). |
3652 | * |
3653 | * Five least significant bits of *flags* encode base, other bits |
3654 | * are currently unused. |
3655 | * |
3656 | * Base must be either 8, 10, 16 or 0 to detect it automatically |
3657 | * similar to user space **strtoul**\ (3). |
3658 | * Return |
3659 | * Number of characters consumed on success. Must be positive but |
3660 | * no more than *buf_len*. |
3661 | * |
3662 | * **-EINVAL** if no valid digits were found or unsupported base |
3663 | * was provided. |
3664 | * |
3665 | * **-ERANGE** if resulting value was out of range. |
3666 | * |
3667 | * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) |
3668 | * Description |
3669 | * Get a bpf-local-storage from a *sk*. |
3670 | * |
3671 | * Logically, it could be thought of getting the value from |
3672 | * a *map* with *sk* as the **key**. From this |
3673 | * perspective, the usage is not much different from |
3674 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this |
3675 | * helper enforces the key must be a full socket and the map must |
3676 | * be a **BPF_MAP_TYPE_SK_STORAGE** also. |
3677 | * |
3678 | * Underneath, the value is stored locally at *sk* instead of |
3679 | * the *map*. The *map* is used as the bpf-local-storage |
3680 | * "type". The bpf-local-storage "type" (i.e. the *map*) is |
3681 | * searched against all bpf-local-storages residing at *sk*. |
3682 | * |
3683 | * *sk* is a kernel **struct sock** pointer for LSM program. |
3684 | * *sk* is a **struct bpf_sock** pointer for other program types. |
3685 | * |
3686 | * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be |
3687 | * used such that a new bpf-local-storage will be |
3688 | * created if one does not exist. *value* can be used |
3689 | * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify |
3690 | * the initial value of a bpf-local-storage. If *value* is |
3691 | * **NULL**, the new bpf-local-storage will be zero initialized. |
3692 | * Return |
3693 | * A bpf-local-storage pointer is returned on success. |
3694 | * |
3695 | * **NULL** if not found or there was an error in adding |
3696 | * a new bpf-local-storage. |
3697 | * |
3698 | * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) |
3699 | * Description |
3700 | * Delete a bpf-local-storage from a *sk*. |
3701 | * Return |
3702 | * 0 on success. |
3703 | * |
3704 | * **-ENOENT** if the bpf-local-storage cannot be found. |
3705 | * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). |
3706 | * |
3707 | * long bpf_send_signal(u32 sig) |
3708 | * Description |
3709 | * Send signal *sig* to the process of the current task. |
3710 | * The signal may be delivered to any of this process's threads. |
3711 | * Return |
3712 | * 0 on success or successfully queued. |
3713 | * |
3714 | * **-EBUSY** if work queue under nmi is full. |
3715 | * |
3716 | * **-EINVAL** if *sig* is invalid. |
3717 | * |
3718 | * **-EPERM** if no permission to send the *sig*. |
3719 | * |
3720 | * **-EAGAIN** if bpf program can try again. |
3721 | * |
3722 | * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) |
3723 | * Description |
3724 | * Try to issue a SYN cookie for the packet with corresponding |
3725 | * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. |
3726 | * |
3727 | * *iph* points to the start of the IPv4 or IPv6 header, while |
3728 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or |
3729 | * **sizeof**\ (**struct ip6hdr**). |
3730 | * |
3731 | * *th* points to the start of the TCP header, while *th_len* |
3732 | * contains the length of the TCP header. |
3733 | * Return |
3734 | * On success, lower 32 bits hold the generated SYN cookie in |
3735 | * followed by 16 bits which hold the MSS value for that cookie, |
3736 | * and the top 16 bits are unused. |
3737 | * |
3738 | * On failure, the returned value is one of the following: |
3739 | * |
3740 | * **-EINVAL** SYN cookie cannot be issued due to error |
3741 | * |
3742 | * **-ENOENT** SYN cookie should not be issued (no SYN flood) |
3743 | * |
3744 | * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies |
3745 | * |
3746 | * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 |
3747 | * |
3748 | * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
3749 | * Description |
3750 | * Write raw *data* blob into a special BPF perf event held by |
3751 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
3752 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
3753 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
3754 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
3755 | * |
3756 | * The *flags* are used to indicate the index in *map* for which |
3757 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
3758 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
3759 | * to indicate that the index of the current CPU core should be |
3760 | * used. |
3761 | * |
3762 | * The value to write, of *size*, is passed through eBPF stack and |
3763 | * pointed by *data*. |
3764 | * |
3765 | * *ctx* is a pointer to in-kernel struct sk_buff. |
3766 | * |
3767 | * This helper is similar to **bpf_perf_event_output**\ () but |
3768 | * restricted to raw_tracepoint bpf programs. |
3769 | * Return |
3770 | * 0 on success, or a negative error in case of failure. |
3771 | * |
3772 | * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) |
3773 | * Description |
3774 | * Safely attempt to read *size* bytes from user space address |
3775 | * *unsafe_ptr* and store the data in *dst*. |
3776 | * Return |
3777 | * 0 on success, or a negative error in case of failure. |
3778 | * |
3779 | * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) |
3780 | * Description |
3781 | * Safely attempt to read *size* bytes from kernel space address |
3782 | * *unsafe_ptr* and store the data in *dst*. |
3783 | * Return |
3784 | * 0 on success, or a negative error in case of failure. |
3785 | * |
3786 | * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) |
3787 | * Description |
3788 | * Copy a NUL terminated string from an unsafe user address |
3789 | * *unsafe_ptr* to *dst*. The *size* should include the |
3790 | * terminating NUL byte. In case the string length is smaller than |
3791 | * *size*, the target is not padded with further NUL bytes. If the |
3792 | * string length is larger than *size*, just *size*-1 bytes are |
3793 | * copied and the last byte is set to NUL. |
3794 | * |
3795 | * On success, returns the number of bytes that were written, |
3796 | * including the terminal NUL. This makes this helper useful in |
3797 | * tracing programs for reading strings, and more importantly to |
3798 | * get its length at runtime. See the following snippet: |
3799 | * |
3800 | * :: |
3801 | * |
3802 | * SEC("kprobe/sys_open") |
3803 | * void bpf_sys_open(struct pt_regs *ctx) |
3804 | * { |
3805 | * char buf[PATHLEN]; // PATHLEN is defined to 256 |
3806 | * int res = bpf_probe_read_user_str(buf, sizeof(buf), |
3807 | * ctx->di); |
3808 | * |
3809 | * // Consume buf, for example push it to |
3810 | * // userspace via bpf_perf_event_output(); we |
3811 | * // can use res (the string length) as event |
3812 | * // size, after checking its boundaries. |
3813 | * } |
3814 | * |
3815 | * In comparison, using **bpf_probe_read_user**\ () helper here |
3816 | * instead to read the string would require to estimate the length |
3817 | * at compile time, and would often result in copying more memory |
3818 | * than necessary. |
3819 | * |
3820 | * Another useful use case is when parsing individual process |
3821 | * arguments or individual environment variables navigating |
3822 | * *current*\ **->mm->arg_start** and *current*\ |
3823 | * **->mm->env_start**: using this helper and the return value, |
3824 | * one can quickly iterate at the right offset of the memory area. |
3825 | * Return |
3826 | * On success, the strictly positive length of the output string, |
3827 | * including the trailing NUL character. On error, a negative |
3828 | * value. |
3829 | * |
3830 | * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) |
3831 | * Description |
3832 | * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* |
3833 | * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. |
3834 | * Return |
3835 | * On success, the strictly positive length of the string, including |
3836 | * the trailing NUL character. On error, a negative value. |
3837 | * |
3838 | * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) |
3839 | * Description |
3840 | * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. |
3841 | * *rcv_nxt* is the ack_seq to be sent out. |
3842 | * Return |
3843 | * 0 on success, or a negative error in case of failure. |
3844 | * |
3845 | * long bpf_send_signal_thread(u32 sig) |
3846 | * Description |
3847 | * Send signal *sig* to the thread corresponding to the current task. |
3848 | * Return |
3849 | * 0 on success or successfully queued. |
3850 | * |
3851 | * **-EBUSY** if work queue under nmi is full. |
3852 | * |
3853 | * **-EINVAL** if *sig* is invalid. |
3854 | * |
3855 | * **-EPERM** if no permission to send the *sig*. |
3856 | * |
3857 | * **-EAGAIN** if bpf program can try again. |
3858 | * |
3859 | * u64 bpf_jiffies64(void) |
3860 | * Description |
3861 | * Obtain the 64bit jiffies |
3862 | * Return |
3863 | * The 64 bit jiffies |
3864 | * |
3865 | * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) |
3866 | * Description |
3867 | * For an eBPF program attached to a perf event, retrieve the |
3868 | * branch records (**struct perf_branch_entry**) associated to *ctx* |
3869 | * and store it in the buffer pointed by *buf* up to size |
3870 | * *size* bytes. |
3871 | * Return |
3872 | * On success, number of bytes written to *buf*. On error, a |
3873 | * negative value. |
3874 | * |
3875 | * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to |
3876 | * instead return the number of bytes required to store all the |
3877 | * branch entries. If this flag is set, *buf* may be NULL. |
3878 | * |
3879 | * **-EINVAL** if arguments invalid or **size** not a multiple |
3880 | * of **sizeof**\ (**struct perf_branch_entry**\ ). |
3881 | * |
3882 | * **-ENOENT** if architecture does not support branch records. |
3883 | * |
3884 | * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) |
3885 | * Description |
3886 | * Returns 0 on success, values for *pid* and *tgid* as seen from the current |
3887 | * *namespace* will be returned in *nsdata*. |
3888 | * Return |
3889 | * 0 on success, or one of the following in case of failure: |
3890 | * |
3891 | * **-EINVAL** if dev and inum supplied don't match dev_t and inode number |
3892 | * with nsfs of current task, or if dev conversion to dev_t lost high bits. |
3893 | * |
3894 | * **-ENOENT** if pidns does not exists for the current task. |
3895 | * |
3896 | * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
3897 | * Description |
3898 | * Write raw *data* blob into a special BPF perf event held by |
3899 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
3900 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
3901 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
3902 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
3903 | * |
3904 | * The *flags* are used to indicate the index in *map* for which |
3905 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
3906 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
3907 | * to indicate that the index of the current CPU core should be |
3908 | * used. |
3909 | * |
3910 | * The value to write, of *size*, is passed through eBPF stack and |
3911 | * pointed by *data*. |
3912 | * |
3913 | * *ctx* is a pointer to in-kernel struct xdp_buff. |
3914 | * |
3915 | * This helper is similar to **bpf_perf_eventoutput**\ () but |
3916 | * restricted to raw_tracepoint bpf programs. |
3917 | * Return |
3918 | * 0 on success, or a negative error in case of failure. |
3919 | * |
3920 | * u64 bpf_get_netns_cookie(void *ctx) |
3921 | * Description |
3922 | * Retrieve the cookie (generated by the kernel) of the network |
3923 | * namespace the input *ctx* is associated with. The network |
3924 | * namespace cookie remains stable for its lifetime and provides |
3925 | * a global identifier that can be assumed unique. If *ctx* is |
3926 | * NULL, then the helper returns the cookie for the initial |
3927 | * network namespace. The cookie itself is very similar to that |
3928 | * of **bpf_get_socket_cookie**\ () helper, but for network |
3929 | * namespaces instead of sockets. |
3930 | * Return |
3931 | * A 8-byte long opaque number. |
3932 | * |
3933 | * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) |
3934 | * Description |
3935 | * Return id of cgroup v2 that is ancestor of the cgroup associated |
3936 | * with the current task at the *ancestor_level*. The root cgroup |
3937 | * is at *ancestor_level* zero and each step down the hierarchy |
3938 | * increments the level. If *ancestor_level* == level of cgroup |
3939 | * associated with the current task, then return value will be the |
3940 | * same as that of **bpf_get_current_cgroup_id**\ (). |
3941 | * |
3942 | * The helper is useful to implement policies based on cgroups |
3943 | * that are upper in hierarchy than immediate cgroup associated |
3944 | * with the current task. |
3945 | * |
3946 | * The format of returned id and helper limitations are same as in |
3947 | * **bpf_get_current_cgroup_id**\ (). |
3948 | * Return |
3949 | * The id is returned or 0 in case the id could not be retrieved. |
3950 | * |
3951 | * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) |
3952 | * Description |
3953 | * Helper is overloaded depending on BPF program type. This |
3954 | * description applies to **BPF_PROG_TYPE_SCHED_CLS** and |
3955 | * **BPF_PROG_TYPE_SCHED_ACT** programs. |
3956 | * |
3957 | * Assign the *sk* to the *skb*. When combined with appropriate |
3958 | * routing configuration to receive the packet towards the socket, |
3959 | * will cause *skb* to be delivered to the specified socket. |
3960 | * Subsequent redirection of *skb* via **bpf_redirect**\ (), |
3961 | * **bpf_clone_redirect**\ () or other methods outside of BPF may |
3962 | * interfere with successful delivery to the socket. |
3963 | * |
3964 | * This operation is only valid from TC ingress path. |
3965 | * |
3966 | * The *flags* argument must be zero. |
3967 | * Return |
3968 | * 0 on success, or a negative error in case of failure: |
3969 | * |
3970 | * **-EINVAL** if specified *flags* are not supported. |
3971 | * |
3972 | * **-ENOENT** if the socket is unavailable for assignment. |
3973 | * |
3974 | * **-ENETUNREACH** if the socket is unreachable (wrong netns). |
3975 | * |
3976 | * **-EOPNOTSUPP** if the operation is not supported, for example |
3977 | * a call from outside of TC ingress. |
3978 | * |
3979 | * **-ESOCKTNOSUPPORT** if the socket type is not supported |
3980 | * (reuseport). |
3981 | * |
3982 | * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) |
3983 | * Description |
3984 | * Helper is overloaded depending on BPF program type. This |
3985 | * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. |
3986 | * |
3987 | * Select the *sk* as a result of a socket lookup. |
3988 | * |
3989 | * For the operation to succeed passed socket must be compatible |
3990 | * with the packet description provided by the *ctx* object. |
3991 | * |
3992 | * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must |
3993 | * be an exact match. While IP family (**AF_INET** or |
3994 | * **AF_INET6**) must be compatible, that is IPv6 sockets |
3995 | * that are not v6-only can be selected for IPv4 packets. |
3996 | * |
3997 | * Only TCP listeners and UDP unconnected sockets can be |
3998 | * selected. *sk* can also be NULL to reset any previous |
3999 | * selection. |
4000 | * |
4001 | * *flags* argument can combination of following values: |
4002 | * |
4003 | * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous |
4004 | * socket selection, potentially done by a BPF program |
4005 | * that ran before us. |
4006 | * |
4007 | * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip |
4008 | * load-balancing within reuseport group for the socket |
4009 | * being selected. |
4010 | * |
4011 | * On success *ctx->sk* will point to the selected socket. |
4012 | * |
4013 | * Return |
4014 | * 0 on success, or a negative errno in case of failure. |
4015 | * |
4016 | * * **-EAFNOSUPPORT** if socket family (*sk->family*) is |
4017 | * not compatible with packet family (*ctx->family*). |
4018 | * |
4019 | * * **-EEXIST** if socket has been already selected, |
4020 | * potentially by another program, and |
4021 | * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. |
4022 | * |
4023 | * * **-EINVAL** if unsupported flags were specified. |
4024 | * |
4025 | * * **-EPROTOTYPE** if socket L4 protocol |
4026 | * (*sk->protocol*) doesn't match packet protocol |
4027 | * (*ctx->protocol*). |
4028 | * |
4029 | * * **-ESOCKTNOSUPPORT** if socket is not in allowed |
4030 | * state (TCP listening or UDP unconnected). |
4031 | * |
4032 | * u64 bpf_ktime_get_boot_ns(void) |
4033 | * Description |
4034 | * Return the time elapsed since system boot, in nanoseconds. |
4035 | * Does include the time the system was suspended. |
4036 | * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) |
4037 | * Return |
4038 | * Current *ktime*. |
4039 | * |
4040 | * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) |
4041 | * Description |
4042 | * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print |
4043 | * out the format string. |
4044 | * The *m* represents the seq_file. The *fmt* and *fmt_size* are for |
4045 | * the format string itself. The *data* and *data_len* are format string |
4046 | * arguments. The *data* are a **u64** array and corresponding format string |
4047 | * values are stored in the array. For strings and pointers where pointees |
4048 | * are accessed, only the pointer values are stored in the *data* array. |
4049 | * The *data_len* is the size of *data* in bytes. |
4050 | * |
4051 | * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. |
4052 | * Reading kernel memory may fail due to either invalid address or |
4053 | * valid address but requiring a major memory fault. If reading kernel memory |
4054 | * fails, the string for **%s** will be an empty string, and the ip |
4055 | * address for **%p{i,I}{4,6}** will be 0. Not returning error to |
4056 | * bpf program is consistent with what **bpf_trace_printk**\ () does for now. |
4057 | * Return |
4058 | * 0 on success, or a negative error in case of failure: |
4059 | * |
4060 | * **-EBUSY** if per-CPU memory copy buffer is busy, can try again |
4061 | * by returning 1 from bpf program. |
4062 | * |
4063 | * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. |
4064 | * |
4065 | * **-E2BIG** if *fmt* contains too many format specifiers. |
4066 | * |
4067 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. |
4068 | * |
4069 | * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) |
4070 | * Description |
4071 | * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. |
4072 | * The *m* represents the seq_file. The *data* and *len* represent the |
4073 | * data to write in bytes. |
4074 | * Return |
4075 | * 0 on success, or a negative error in case of failure: |
4076 | * |
4077 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. |
4078 | * |
4079 | * u64 bpf_sk_cgroup_id(void *sk) |
4080 | * Description |
4081 | * Return the cgroup v2 id of the socket *sk*. |
4082 | * |
4083 | * *sk* must be a non-**NULL** pointer to a socket, e.g. one |
4084 | * returned from **bpf_sk_lookup_xxx**\ (), |
4085 | * **bpf_sk_fullsock**\ (), etc. The format of returned id is |
4086 | * same as in **bpf_skb_cgroup_id**\ (). |
4087 | * |
4088 | * This helper is available only if the kernel was compiled with |
4089 | * the **CONFIG_SOCK_CGROUP_DATA** configuration option. |
4090 | * Return |
4091 | * The id is returned or 0 in case the id could not be retrieved. |
4092 | * |
4093 | * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) |
4094 | * Description |
4095 | * Return id of cgroup v2 that is ancestor of cgroup associated |
4096 | * with the *sk* at the *ancestor_level*. The root cgroup is at |
4097 | * *ancestor_level* zero and each step down the hierarchy |
4098 | * increments the level. If *ancestor_level* == level of cgroup |
4099 | * associated with *sk*, then return value will be same as that |
4100 | * of **bpf_sk_cgroup_id**\ (). |
4101 | * |
4102 | * The helper is useful to implement policies based on cgroups |
4103 | * that are upper in hierarchy than immediate cgroup associated |
4104 | * with *sk*. |
4105 | * |
4106 | * The format of returned id and helper limitations are same as in |
4107 | * **bpf_sk_cgroup_id**\ (). |
4108 | * Return |
4109 | * The id is returned or 0 in case the id could not be retrieved. |
4110 | * |
4111 | * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) |
4112 | * Description |
4113 | * Copy *size* bytes from *data* into a ring buffer *ringbuf*. |
4114 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
4115 | * of new data availability is sent. |
4116 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
4117 | * of new data availability is sent unconditionally. |
4118 | * If **0** is specified in *flags*, an adaptive notification |
4119 | * of new data availability is sent. |
4120 | * |
4121 | * An adaptive notification is a notification sent whenever the user-space |
4122 | * process has caught up and consumed all available payloads. In case the user-space |
4123 | * process is still processing a previous payload, then no notification is needed |
4124 | * as it will process the newly added payload automatically. |
4125 | * Return |
4126 | * 0 on success, or a negative error in case of failure. |
4127 | * |
4128 | * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) |
4129 | * Description |
4130 | * Reserve *size* bytes of payload in a ring buffer *ringbuf*. |
4131 | * *flags* must be 0. |
4132 | * Return |
4133 | * Valid pointer with *size* bytes of memory available; NULL, |
4134 | * otherwise. |
4135 | * |
4136 | * void bpf_ringbuf_submit(void *data, u64 flags) |
4137 | * Description |
4138 | * Submit reserved ring buffer sample, pointed to by *data*. |
4139 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
4140 | * of new data availability is sent. |
4141 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
4142 | * of new data availability is sent unconditionally. |
4143 | * If **0** is specified in *flags*, an adaptive notification |
4144 | * of new data availability is sent. |
4145 | * |
4146 | * See 'bpf_ringbuf_output()' for the definition of adaptive notification. |
4147 | * Return |
4148 | * Nothing. Always succeeds. |
4149 | * |
4150 | * void bpf_ringbuf_discard(void *data, u64 flags) |
4151 | * Description |
4152 | * Discard reserved ring buffer sample, pointed to by *data*. |
4153 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
4154 | * of new data availability is sent. |
4155 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
4156 | * of new data availability is sent unconditionally. |
4157 | * If **0** is specified in *flags*, an adaptive notification |
4158 | * of new data availability is sent. |
4159 | * |
4160 | * See 'bpf_ringbuf_output()' for the definition of adaptive notification. |
4161 | * Return |
4162 | * Nothing. Always succeeds. |
4163 | * |
4164 | * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) |
4165 | * Description |
4166 | * Query various characteristics of provided ring buffer. What |
4167 | * exactly is queries is determined by *flags*: |
4168 | * |
4169 | * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. |
4170 | * * **BPF_RB_RING_SIZE**: The size of ring buffer. |
4171 | * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). |
4172 | * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). |
4173 | * |
4174 | * Data returned is just a momentary snapshot of actual values |
4175 | * and could be inaccurate, so this facility should be used to |
4176 | * power heuristics and for reporting, not to make 100% correct |
4177 | * calculation. |
4178 | * Return |
4179 | * Requested value, or 0, if *flags* are not recognized. |
4180 | * |
4181 | * long bpf_csum_level(struct sk_buff *skb, u64 level) |
4182 | * Description |
4183 | * Change the skbs checksum level by one layer up or down, or |
4184 | * reset it entirely to none in order to have the stack perform |
4185 | * checksum validation. The level is applicable to the following |
4186 | * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of |
4187 | * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | |
4188 | * through **bpf_skb_adjust_room**\ () helper with passing in |
4189 | * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call |
4190 | * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since |
4191 | * the UDP header is removed. Similarly, an encap of the latter |
4192 | * into the former could be accompanied by a helper call to |
4193 | * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the |
4194 | * skb is still intended to be processed in higher layers of the |
4195 | * stack instead of just egressing at tc. |
4196 | * |
4197 | * There are three supported level settings at this time: |
4198 | * |
4199 | * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs |
4200 | * with CHECKSUM_UNNECESSARY. |
4201 | * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs |
4202 | * with CHECKSUM_UNNECESSARY. |
4203 | * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and |
4204 | * sets CHECKSUM_NONE to force checksum validation by the stack. |
4205 | * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current |
4206 | * skb->csum_level. |
4207 | * Return |
4208 | * 0 on success, or a negative error in case of failure. In the |
4209 | * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level |
4210 | * is returned or the error code -EACCES in case the skb is not |
4211 | * subject to CHECKSUM_UNNECESSARY. |
4212 | * |
4213 | * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) |
4214 | * Description |
4215 | * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. |
4216 | * Return |
4217 | * *sk* if casting is valid, or **NULL** otherwise. |
4218 | * |
4219 | * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) |
4220 | * Description |
4221 | * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. |
4222 | * Return |
4223 | * *sk* if casting is valid, or **NULL** otherwise. |
4224 | * |
4225 | * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) |
4226 | * Description |
4227 | * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. |
4228 | * Return |
4229 | * *sk* if casting is valid, or **NULL** otherwise. |
4230 | * |
4231 | * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) |
4232 | * Description |
4233 | * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. |
4234 | * Return |
4235 | * *sk* if casting is valid, or **NULL** otherwise. |
4236 | * |
4237 | * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) |
4238 | * Description |
4239 | * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. |
4240 | * Return |
4241 | * *sk* if casting is valid, or **NULL** otherwise. |
4242 | * |
4243 | * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) |
4244 | * Description |
4245 | * Return a user or a kernel stack in bpf program provided buffer. |
4246 | * To achieve this, the helper needs *task*, which is a valid |
4247 | * pointer to **struct task_struct**. To store the stacktrace, the |
4248 | * bpf program provides *buf* with a nonnegative *size*. |
4249 | * |
4250 | * The last argument, *flags*, holds the number of stack frames to |
4251 | * skip (from 0 to 255), masked with |
4252 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
4253 | * the following flags: |
4254 | * |
4255 | * **BPF_F_USER_STACK** |
4256 | * Collect a user space stack instead of a kernel stack. |
4257 | * **BPF_F_USER_BUILD_ID** |
4258 | * Collect buildid+offset instead of ips for user stack, |
4259 | * only valid if **BPF_F_USER_STACK** is also specified. |
4260 | * |
4261 | * **bpf_get_task_stack**\ () can collect up to |
4262 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject |
4263 | * to sufficient large buffer size. Note that |
4264 | * this limit can be controlled with the **sysctl** program, and |
4265 | * that it should be manually increased in order to profile long |
4266 | * user stacks (such as stacks for Java programs). To do so, use: |
4267 | * |
4268 | * :: |
4269 | * |
4270 | * # sysctl kernel.perf_event_max_stack=<new value> |
4271 | * Return |
4272 | * The non-negative copied *buf* length equal to or less than |
4273 | * *size* on success, or a negative error in case of failure. |
4274 | * |
4275 | * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) |
4276 | * Description |
4277 | * Load header option. Support reading a particular TCP header |
4278 | * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). |
4279 | * |
4280 | * If *flags* is 0, it will search the option from the |
4281 | * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** |
4282 | * has details on what skb_data contains under different |
4283 | * *skops*\ **->op**. |
4284 | * |
4285 | * The first byte of the *searchby_res* specifies the |
4286 | * kind that it wants to search. |
4287 | * |
4288 | * If the searching kind is an experimental kind |
4289 | * (i.e. 253 or 254 according to RFC6994). It also |
4290 | * needs to specify the "magic" which is either |
4291 | * 2 bytes or 4 bytes. It then also needs to |
4292 | * specify the size of the magic by using |
4293 | * the 2nd byte which is "kind-length" of a TCP |
4294 | * header option and the "kind-length" also |
4295 | * includes the first 2 bytes "kind" and "kind-length" |
4296 | * itself as a normal TCP header option also does. |
4297 | * |
4298 | * For example, to search experimental kind 254 with |
4299 | * 2 byte magic 0xeB9F, the searchby_res should be |
4300 | * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. |
4301 | * |
4302 | * To search for the standard window scale option (3), |
4303 | * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. |
4304 | * Note, kind-length must be 0 for regular option. |
4305 | * |
4306 | * Searching for No-Op (0) and End-of-Option-List (1) are |
4307 | * not supported. |
4308 | * |
4309 | * *len* must be at least 2 bytes which is the minimal size |
4310 | * of a header option. |
4311 | * |
4312 | * Supported flags: |
4313 | * |
4314 | * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the |
4315 | * saved_syn packet or the just-received syn packet. |
4316 | * |
4317 | * Return |
4318 | * > 0 when found, the header option is copied to *searchby_res*. |
4319 | * The return value is the total length copied. On failure, a |
4320 | * negative error code is returned: |
4321 | * |
4322 | * **-EINVAL** if a parameter is invalid. |
4323 | * |
4324 | * **-ENOMSG** if the option is not found. |
4325 | * |
4326 | * **-ENOENT** if no syn packet is available when |
4327 | * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. |
4328 | * |
4329 | * **-ENOSPC** if there is not enough space. Only *len* number of |
4330 | * bytes are copied. |
4331 | * |
4332 | * **-EFAULT** on failure to parse the header options in the |
4333 | * packet. |
4334 | * |
4335 | * **-EPERM** if the helper cannot be used under the current |
4336 | * *skops*\ **->op**. |
4337 | * |
4338 | * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) |
4339 | * Description |
4340 | * Store header option. The data will be copied |
4341 | * from buffer *from* with length *len* to the TCP header. |
4342 | * |
4343 | * The buffer *from* should have the whole option that |
4344 | * includes the kind, kind-length, and the actual |
4345 | * option data. The *len* must be at least kind-length |
4346 | * long. The kind-length does not have to be 4 byte |
4347 | * aligned. The kernel will take care of the padding |
4348 | * and setting the 4 bytes aligned value to th->doff. |
4349 | * |
4350 | * This helper will check for duplicated option |
4351 | * by searching the same option in the outgoing skb. |
4352 | * |
4353 | * This helper can only be called during |
4354 | * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. |
4355 | * |
4356 | * Return |
4357 | * 0 on success, or negative error in case of failure: |
4358 | * |
4359 | * **-EINVAL** If param is invalid. |
4360 | * |
4361 | * **-ENOSPC** if there is not enough space in the header. |
4362 | * Nothing has been written |
4363 | * |
4364 | * **-EEXIST** if the option already exists. |
4365 | * |
4366 | * **-EFAULT** on failrue to parse the existing header options. |
4367 | * |
4368 | * **-EPERM** if the helper cannot be used under the current |
4369 | * *skops*\ **->op**. |
4370 | * |
4371 | * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) |
4372 | * Description |
4373 | * Reserve *len* bytes for the bpf header option. The |
4374 | * space will be used by **bpf_store_hdr_opt**\ () later in |
4375 | * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. |
4376 | * |
4377 | * If **bpf_reserve_hdr_opt**\ () is called multiple times, |
4378 | * the total number of bytes will be reserved. |
4379 | * |
4380 | * This helper can only be called during |
4381 | * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. |
4382 | * |
4383 | * Return |
4384 | * 0 on success, or negative error in case of failure: |
4385 | * |
4386 | * **-EINVAL** if a parameter is invalid. |
4387 | * |
4388 | * **-ENOSPC** if there is not enough space in the header. |
4389 | * |
4390 | * **-EPERM** if the helper cannot be used under the current |
4391 | * *skops*\ **->op**. |
4392 | * |
4393 | * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) |
4394 | * Description |
4395 | * Get a bpf_local_storage from an *inode*. |
4396 | * |
4397 | * Logically, it could be thought of as getting the value from |
4398 | * a *map* with *inode* as the **key**. From this |
4399 | * perspective, the usage is not much different from |
4400 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this |
4401 | * helper enforces the key must be an inode and the map must also |
4402 | * be a **BPF_MAP_TYPE_INODE_STORAGE**. |
4403 | * |
4404 | * Underneath, the value is stored locally at *inode* instead of |
4405 | * the *map*. The *map* is used as the bpf-local-storage |
4406 | * "type". The bpf-local-storage "type" (i.e. the *map*) is |
4407 | * searched against all bpf_local_storage residing at *inode*. |
4408 | * |
4409 | * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be |
4410 | * used such that a new bpf_local_storage will be |
4411 | * created if one does not exist. *value* can be used |
4412 | * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify |
4413 | * the initial value of a bpf_local_storage. If *value* is |
4414 | * **NULL**, the new bpf_local_storage will be zero initialized. |
4415 | * Return |
4416 | * A bpf_local_storage pointer is returned on success. |
4417 | * |
4418 | * **NULL** if not found or there was an error in adding |
4419 | * a new bpf_local_storage. |
4420 | * |
4421 | * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) |
4422 | * Description |
4423 | * Delete a bpf_local_storage from an *inode*. |
4424 | * Return |
4425 | * 0 on success. |
4426 | * |
4427 | * **-ENOENT** if the bpf_local_storage cannot be found. |
4428 | * |
4429 | * long bpf_d_path(struct path *path, char *buf, u32 sz) |
4430 | * Description |
4431 | * Return full path for given **struct path** object, which |
4432 | * needs to be the kernel BTF *path* object. The path is |
4433 | * returned in the provided buffer *buf* of size *sz* and |
4434 | * is zero terminated. |
4435 | * |
4436 | * Return |
4437 | * On success, the strictly positive length of the string, |
4438 | * including the trailing NUL character. On error, a negative |
4439 | * value. |
4440 | * |
4441 | * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) |
4442 | * Description |
4443 | * Read *size* bytes from user space address *user_ptr* and store |
4444 | * the data in *dst*. This is a wrapper of **copy_from_user**\ (). |
4445 | * Return |
4446 | * 0 on success, or a negative error in case of failure. |
4447 | * |
4448 | * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) |
4449 | * Description |
4450 | * Use BTF to store a string representation of *ptr*->ptr in *str*, |
4451 | * using *ptr*->type_id. This value should specify the type |
4452 | * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) |
4453 | * can be used to look up vmlinux BTF type ids. Traversing the |
4454 | * data structure using BTF, the type information and values are |
4455 | * stored in the first *str_size* - 1 bytes of *str*. Safe copy of |
4456 | * the pointer data is carried out to avoid kernel crashes during |
4457 | * operation. Smaller types can use string space on the stack; |
4458 | * larger programs can use map data to store the string |
4459 | * representation. |
4460 | * |
4461 | * The string can be subsequently shared with userspace via |
4462 | * bpf_perf_event_output() or ring buffer interfaces. |
4463 | * bpf_trace_printk() is to be avoided as it places too small |
4464 | * a limit on string size to be useful. |
4465 | * |
4466 | * *flags* is a combination of |
4467 | * |
4468 | * **BTF_F_COMPACT** |
4469 | * no formatting around type information |
4470 | * **BTF_F_NONAME** |
4471 | * no struct/union member names/types |
4472 | * **BTF_F_PTR_RAW** |
4473 | * show raw (unobfuscated) pointer values; |
4474 | * equivalent to printk specifier %px. |
4475 | * **BTF_F_ZERO** |
4476 | * show zero-valued struct/union members; they |
4477 | * are not displayed by default |
4478 | * |
4479 | * Return |
4480 | * The number of bytes that were written (or would have been |
4481 | * written if output had to be truncated due to string size), |
4482 | * or a negative error in cases of failure. |
4483 | * |
4484 | * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) |
4485 | * Description |
4486 | * Use BTF to write to seq_write a string representation of |
4487 | * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). |
4488 | * *flags* are identical to those used for bpf_snprintf_btf. |
4489 | * Return |
4490 | * 0 on success or a negative error in case of failure. |
4491 | * |
4492 | * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) |
4493 | * Description |
4494 | * See **bpf_get_cgroup_classid**\ () for the main description. |
4495 | * This helper differs from **bpf_get_cgroup_classid**\ () in that |
4496 | * the cgroup v1 net_cls class is retrieved only from the *skb*'s |
4497 | * associated socket instead of the current process. |
4498 | * Return |
4499 | * The id is returned or 0 in case the id could not be retrieved. |
4500 | * |
4501 | * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) |
4502 | * Description |
4503 | * Redirect the packet to another net device of index *ifindex* |
4504 | * and fill in L2 addresses from neighboring subsystem. This helper |
4505 | * is somewhat similar to **bpf_redirect**\ (), except that it |
4506 | * populates L2 addresses as well, meaning, internally, the helper |
4507 | * relies on the neighbor lookup for the L2 address of the nexthop. |
4508 | * |
4509 | * The helper will perform a FIB lookup based on the skb's |
4510 | * networking header to get the address of the next hop, unless |
4511 | * this is supplied by the caller in the *params* argument. The |
4512 | * *plen* argument indicates the len of *params* and should be set |
4513 | * to 0 if *params* is NULL. |
4514 | * |
4515 | * The *flags* argument is reserved and must be 0. The helper is |
4516 | * currently only supported for tc BPF program types, and enabled |
4517 | * for IPv4 and IPv6 protocols. |
4518 | * Return |
4519 | * The helper returns **TC_ACT_REDIRECT** on success or |
4520 | * **TC_ACT_SHOT** on error. |
4521 | * |
4522 | * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) |
4523 | * Description |
4524 | * Take a pointer to a percpu ksym, *percpu_ptr*, and return a |
4525 | * pointer to the percpu kernel variable on *cpu*. A ksym is an |
4526 | * extern variable decorated with '__ksym'. For ksym, there is a |
4527 | * global var (either static or global) defined of the same name |
4528 | * in the kernel. The ksym is percpu if the global var is percpu. |
4529 | * The returned pointer points to the global percpu var on *cpu*. |
4530 | * |
4531 | * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the |
4532 | * kernel, except that bpf_per_cpu_ptr() may return NULL. This |
4533 | * happens if *cpu* is larger than nr_cpu_ids. The caller of |
4534 | * bpf_per_cpu_ptr() must check the returned value. |
4535 | * Return |
4536 | * A pointer pointing to the kernel percpu variable on *cpu*, or |
4537 | * NULL, if *cpu* is invalid. |
4538 | * |
4539 | * void *bpf_this_cpu_ptr(const void *percpu_ptr) |
4540 | * Description |
4541 | * Take a pointer to a percpu ksym, *percpu_ptr*, and return a |
4542 | * pointer to the percpu kernel variable on this cpu. See the |
4543 | * description of 'ksym' in **bpf_per_cpu_ptr**\ (). |
4544 | * |
4545 | * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in |
4546 | * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would |
4547 | * never return NULL. |
4548 | * Return |
4549 | * A pointer pointing to the kernel percpu variable on this cpu. |
4550 | * |
4551 | * long bpf_redirect_peer(u32 ifindex, u64 flags) |
4552 | * Description |
4553 | * Redirect the packet to another net device of index *ifindex*. |
4554 | * This helper is somewhat similar to **bpf_redirect**\ (), except |
4555 | * that the redirection happens to the *ifindex*' peer device and |
4556 | * the netns switch takes place from ingress to ingress without |
4557 | * going through the CPU's backlog queue. |
4558 | * |
4559 | * The *flags* argument is reserved and must be 0. The helper is |
4560 | * currently only supported for tc BPF program types at the ingress |
4561 | * hook and for veth device types. The peer device must reside in a |
4562 | * different network namespace. |
4563 | * Return |
4564 | * The helper returns **TC_ACT_REDIRECT** on success or |
4565 | * **TC_ACT_SHOT** on error. |
4566 | * |
4567 | * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags) |
4568 | * Description |
4569 | * Get a bpf_local_storage from the *task*. |
4570 | * |
4571 | * Logically, it could be thought of as getting the value from |
4572 | * a *map* with *task* as the **key**. From this |
4573 | * perspective, the usage is not much different from |
4574 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this |
4575 | * helper enforces the key must be an task_struct and the map must also |
4576 | * be a **BPF_MAP_TYPE_TASK_STORAGE**. |
4577 | * |
4578 | * Underneath, the value is stored locally at *task* instead of |
4579 | * the *map*. The *map* is used as the bpf-local-storage |
4580 | * "type". The bpf-local-storage "type" (i.e. the *map*) is |
4581 | * searched against all bpf_local_storage residing at *task*. |
4582 | * |
4583 | * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be |
4584 | * used such that a new bpf_local_storage will be |
4585 | * created if one does not exist. *value* can be used |
4586 | * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify |
4587 | * the initial value of a bpf_local_storage. If *value* is |
4588 | * **NULL**, the new bpf_local_storage will be zero initialized. |
4589 | * Return |
4590 | * A bpf_local_storage pointer is returned on success. |
4591 | * |
4592 | * **NULL** if not found or there was an error in adding |
4593 | * a new bpf_local_storage. |
4594 | * |
4595 | * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task) |
4596 | * Description |
4597 | * Delete a bpf_local_storage from a *task*. |
4598 | * Return |
4599 | * 0 on success. |
4600 | * |
4601 | * **-ENOENT** if the bpf_local_storage cannot be found. |
4602 | * |
4603 | * struct task_struct *bpf_get_current_task_btf(void) |
4604 | * Description |
4605 | * Return a BTF pointer to the "current" task. |
4606 | * This pointer can also be used in helpers that accept an |
4607 | * *ARG_PTR_TO_BTF_ID* of type *task_struct*. |
4608 | * Return |
4609 | * Pointer to the current task. |
4610 | * |
4611 | * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags) |
4612 | * Description |
4613 | * Set or clear certain options on *bprm*: |
4614 | * |
4615 | * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit |
4616 | * which sets the **AT_SECURE** auxv for glibc. The bit |
4617 | * is cleared if the flag is not specified. |
4618 | * Return |
4619 | * **-EINVAL** if invalid *flags* are passed, zero otherwise. |
4620 | * |
4621 | * u64 bpf_ktime_get_coarse_ns(void) |
4622 | * Description |
4623 | * Return a coarse-grained version of the time elapsed since |
4624 | * system boot, in nanoseconds. Does not include time the system |
4625 | * was suspended. |
4626 | * |
4627 | * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) |
4628 | * Return |
4629 | * Current *ktime*. |
4630 | * |
4631 | * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) |
4632 | * Description |
4633 | * Returns the stored IMA hash of the *inode* (if it's avaialable). |
4634 | * If the hash is larger than *size*, then only *size* |
4635 | * bytes will be copied to *dst* |
4636 | * Return |
4637 | * The **hash_algo** is returned on success, |
4638 | * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if |
4639 | * invalid arguments are passed. |
4640 | * |
4641 | * struct socket *bpf_sock_from_file(struct file *file) |
4642 | * Description |
4643 | * If the given file represents a socket, returns the associated |
4644 | * socket. |
4645 | * Return |
4646 | * A pointer to a struct socket on success or NULL if the file is |
4647 | * not a socket. |
4648 | * |
4649 | * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) |
4650 | * Description |
4651 | * Check packet size against exceeding MTU of net device (based |
4652 | * on *ifindex*). This helper will likely be used in combination |
4653 | * with helpers that adjust/change the packet size. |
4654 | * |
4655 | * The argument *len_diff* can be used for querying with a planned |
4656 | * size change. This allows to check MTU prior to changing packet |
4657 | * ctx. Providing an *len_diff* adjustment that is larger than the |
4658 | * actual packet size (resulting in negative packet size) will in |
4659 | * principle not exceed the MTU, why it is not considered a |
4660 | * failure. Other BPF-helpers are needed for performing the |
4661 | * planned size change, why the responsability for catch a negative |
4662 | * packet size belong in those helpers. |
4663 | * |
4664 | * Specifying *ifindex* zero means the MTU check is performed |
4665 | * against the current net device. This is practical if this isn't |
4666 | * used prior to redirect. |
4667 | * |
4668 | * On input *mtu_len* must be a valid pointer, else verifier will |
4669 | * reject BPF program. If the value *mtu_len* is initialized to |
4670 | * zero then the ctx packet size is use. When value *mtu_len* is |
4671 | * provided as input this specify the L3 length that the MTU check |
4672 | * is done against. Remember XDP and TC length operate at L2, but |
4673 | * this value is L3 as this correlate to MTU and IP-header tot_len |
4674 | * values which are L3 (similar behavior as bpf_fib_lookup). |
4675 | * |
4676 | * The Linux kernel route table can configure MTUs on a more |
4677 | * specific per route level, which is not provided by this helper. |
4678 | * For route level MTU checks use the **bpf_fib_lookup**\ () |
4679 | * helper. |
4680 | * |
4681 | * *ctx* is either **struct xdp_md** for XDP programs or |
4682 | * **struct sk_buff** for tc cls_act programs. |
4683 | * |
4684 | * The *flags* argument can be a combination of one or more of the |
4685 | * following values: |
4686 | * |
4687 | * **BPF_MTU_CHK_SEGS** |
4688 | * This flag will only works for *ctx* **struct sk_buff**. |
4689 | * If packet context contains extra packet segment buffers |
4690 | * (often knows as GSO skb), then MTU check is harder to |
4691 | * check at this point, because in transmit path it is |
4692 | * possible for the skb packet to get re-segmented |
4693 | * (depending on net device features). This could still be |
4694 | * a MTU violation, so this flag enables performing MTU |
4695 | * check against segments, with a different violation |
4696 | * return code to tell it apart. Check cannot use len_diff. |
4697 | * |
4698 | * On return *mtu_len* pointer contains the MTU value of the net |
4699 | * device. Remember the net device configured MTU is the L3 size, |
4700 | * which is returned here and XDP and TC length operate at L2. |
4701 | * Helper take this into account for you, but remember when using |
4702 | * MTU value in your BPF-code. |
4703 | * |
4704 | * Return |
4705 | * * 0 on success, and populate MTU value in *mtu_len* pointer. |
4706 | * |
4707 | * * < 0 if any input argument is invalid (*mtu_len* not updated) |
4708 | * |
4709 | * MTU violations return positive values, but also populate MTU |
4710 | * value in *mtu_len* pointer, as this can be needed for |
4711 | * implementing PMTU handing: |
4712 | * |
4713 | * * **BPF_MTU_CHK_RET_FRAG_NEEDED** |
4714 | * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** |
4715 | * |
4716 | * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) |
4717 | * Description |
4718 | * For each element in **map**, call **callback_fn** function with |
4719 | * **map**, **callback_ctx** and other map-specific parameters. |
4720 | * The **callback_fn** should be a static function and |
4721 | * the **callback_ctx** should be a pointer to the stack. |
4722 | * The **flags** is used to control certain aspects of the helper. |
4723 | * Currently, the **flags** must be 0. |
4724 | * |
4725 | * The following are a list of supported map types and their |
4726 | * respective expected callback signatures: |
4727 | * |
4728 | * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, |
4729 | * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, |
4730 | * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY |
4731 | * |
4732 | * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); |
4733 | * |
4734 | * For per_cpu maps, the map_value is the value on the cpu where the |
4735 | * bpf_prog is running. |
4736 | * |
4737 | * If **callback_fn** return 0, the helper will continue to the next |
4738 | * element. If return value is 1, the helper will skip the rest of |
4739 | * elements and return. Other return values are not used now. |
4740 | * |
4741 | * Return |
4742 | * The number of traversed map elements for success, **-EINVAL** for |
4743 | * invalid **flags**. |
4744 | * |
4745 | * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) |
4746 | * Description |
4747 | * Outputs a string into the **str** buffer of size **str_size** |
4748 | * based on a format string stored in a read-only map pointed by |
4749 | * **fmt**. |
4750 | * |
4751 | * Each format specifier in **fmt** corresponds to one u64 element |
4752 | * in the **data** array. For strings and pointers where pointees |
4753 | * are accessed, only the pointer values are stored in the *data* |
4754 | * array. The *data_len* is the size of *data* in bytes. |
4755 | * |
4756 | * Formats **%s** and **%p{i,I}{4,6}** require to read kernel |
4757 | * memory. Reading kernel memory may fail due to either invalid |
4758 | * address or valid address but requiring a major memory fault. If |
4759 | * reading kernel memory fails, the string for **%s** will be an |
4760 | * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. |
4761 | * Not returning error to bpf program is consistent with what |
4762 | * **bpf_trace_printk**\ () does for now. |
4763 | * |
4764 | * Return |
4765 | * The strictly positive length of the formatted string, including |
4766 | * the trailing zero character. If the return value is greater than |
4767 | * **str_size**, **str** contains a truncated string, guaranteed to |
4768 | * be zero-terminated except when **str_size** is 0. |
4769 | * |
4770 | * Or **-EBUSY** if the per-CPU memory copy buffer is busy. |
4771 | * |
4772 | * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) |
4773 | * Description |
4774 | * Execute bpf syscall with given arguments. |
4775 | * Return |
4776 | * A syscall result. |
4777 | * |
4778 | * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) |
4779 | * Description |
4780 | * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. |
4781 | * Return |
4782 | * Returns btf_id and btf_obj_fd in lower and upper 32 bits. |
4783 | * |
4784 | * long bpf_sys_close(u32 fd) |
4785 | * Description |
4786 | * Execute close syscall for given FD. |
4787 | * Return |
4788 | * A syscall result. |
4789 | * |
4790 | * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) |
4791 | * Description |
4792 | * Initialize the timer. |
4793 | * First 4 bits of *flags* specify clockid. |
4794 | * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. |
4795 | * All other bits of *flags* are reserved. |
4796 | * The verifier will reject the program if *timer* is not from |
4797 | * the same *map*. |
4798 | * Return |
4799 | * 0 on success. |
4800 | * **-EBUSY** if *timer* is already initialized. |
4801 | * **-EINVAL** if invalid *flags* are passed. |
4802 | * **-EPERM** if *timer* is in a map that doesn't have any user references. |
4803 | * The user space should either hold a file descriptor to a map with timers |
4804 | * or pin such map in bpffs. When map is unpinned or file descriptor is |
4805 | * closed all timers in the map will be cancelled and freed. |
4806 | * |
4807 | * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) |
4808 | * Description |
4809 | * Configure the timer to call *callback_fn* static function. |
4810 | * Return |
4811 | * 0 on success. |
4812 | * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. |
4813 | * **-EPERM** if *timer* is in a map that doesn't have any user references. |
4814 | * The user space should either hold a file descriptor to a map with timers |
4815 | * or pin such map in bpffs. When map is unpinned or file descriptor is |
4816 | * closed all timers in the map will be cancelled and freed. |
4817 | * |
4818 | * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) |
4819 | * Description |
4820 | * Set timer expiration N nanoseconds from the current time. The |
4821 | * configured callback will be invoked in soft irq context on some cpu |
4822 | * and will not repeat unless another bpf_timer_start() is made. |
4823 | * In such case the next invocation can migrate to a different cpu. |
4824 | * Since struct bpf_timer is a field inside map element the map |
4825 | * owns the timer. The bpf_timer_set_callback() will increment refcnt |
4826 | * of BPF program to make sure that callback_fn code stays valid. |
4827 | * When user space reference to a map reaches zero all timers |
4828 | * in a map are cancelled and corresponding program's refcnts are |
4829 | * decremented. This is done to make sure that Ctrl-C of a user |
4830 | * process doesn't leave any timers running. If map is pinned in |
4831 | * bpffs the callback_fn can re-arm itself indefinitely. |
4832 | * bpf_map_update/delete_elem() helpers and user space sys_bpf commands |
4833 | * cancel and free the timer in the given map element. |
4834 | * The map can contain timers that invoke callback_fn-s from different |
4835 | * programs. The same callback_fn can serve different timers from |
4836 | * different maps if key/value layout matches across maps. |
4837 | * Every bpf_timer_set_callback() can have different callback_fn. |
4838 | * |
4839 | * Return |
4840 | * 0 on success. |
4841 | * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier |
4842 | * or invalid *flags* are passed. |
4843 | * |
4844 | * long bpf_timer_cancel(struct bpf_timer *timer) |
4845 | * Description |
4846 | * Cancel the timer and wait for callback_fn to finish if it was running. |
4847 | * Return |
4848 | * 0 if the timer was not active. |
4849 | * 1 if the timer was active. |
4850 | * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. |
4851 | * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its |
4852 | * own timer which would have led to a deadlock otherwise. |
4853 | * |
4854 | * u64 bpf_get_func_ip(void *ctx) |
4855 | * Description |
4856 | * Get address of the traced function (for tracing and kprobe programs). |
4857 | * Return |
4858 | * Address of the traced function. |
4859 | * |
4860 | * u64 bpf_get_attach_cookie(void *ctx) |
4861 | * Description |
4862 | * Get bpf_cookie value provided (optionally) during the program |
4863 | * attachment. It might be different for each individual |
4864 | * attachment, even if BPF program itself is the same. |
4865 | * Expects BPF program context *ctx* as a first argument. |
4866 | * |
4867 | * Supported for the following program types: |
4868 | * - kprobe/uprobe; |
4869 | * - tracepoint; |
4870 | * - perf_event. |
4871 | * Return |
4872 | * Value specified by user at BPF link creation/attachment time |
4873 | * or 0, if it was not specified. |
4874 | * |
4875 | * long bpf_task_pt_regs(struct task_struct *task) |
4876 | * Description |
4877 | * Get the struct pt_regs associated with **task**. |
4878 | * Return |
4879 | * A pointer to struct pt_regs. |
4880 | */ |
4881 | #define __BPF_FUNC_MAPPER(FN) \ |
4882 | FN(unspec), \ |
4883 | FN(map_lookup_elem), \ |
4884 | FN(map_update_elem), \ |
4885 | FN(map_delete_elem), \ |
4886 | FN(probe_read), \ |
4887 | FN(ktime_get_ns), \ |
4888 | FN(trace_printk), \ |
4889 | FN(get_prandom_u32), \ |
4890 | FN(get_smp_processor_id), \ |
4891 | FN(skb_store_bytes), \ |
4892 | FN(l3_csum_replace), \ |
4893 | FN(l4_csum_replace), \ |
4894 | FN(tail_call), \ |
4895 | FN(clone_redirect), \ |
4896 | FN(get_current_pid_tgid), \ |
4897 | FN(get_current_uid_gid), \ |
4898 | FN(get_current_comm), \ |
4899 | FN(get_cgroup_classid), \ |
4900 | FN(skb_vlan_push), \ |
4901 | FN(skb_vlan_pop), \ |
4902 | FN(skb_get_tunnel_key), \ |
4903 | FN(skb_set_tunnel_key), \ |
4904 | FN(perf_event_read), \ |
4905 | FN(redirect), \ |
4906 | FN(get_route_realm), \ |
4907 | FN(perf_event_output), \ |
4908 | FN(skb_load_bytes), \ |
4909 | FN(get_stackid), \ |
4910 | FN(csum_diff), \ |
4911 | FN(skb_get_tunnel_opt), \ |
4912 | FN(skb_set_tunnel_opt), \ |
4913 | FN(skb_change_proto), \ |
4914 | FN(skb_change_type), \ |
4915 | FN(skb_under_cgroup), \ |
4916 | FN(get_hash_recalc), \ |
4917 | FN(get_current_task), \ |
4918 | FN(probe_write_user), \ |
4919 | FN(current_task_under_cgroup), \ |
4920 | FN(skb_change_tail), \ |
4921 | FN(skb_pull_data), \ |
4922 | FN(csum_update), \ |
4923 | FN(set_hash_invalid), \ |
4924 | FN(get_numa_node_id), \ |
4925 | FN(skb_change_head), \ |
4926 | FN(xdp_adjust_head), \ |
4927 | FN(probe_read_str), \ |
4928 | FN(get_socket_cookie), \ |
4929 | FN(get_socket_uid), \ |
4930 | FN(set_hash), \ |
4931 | FN(setsockopt), \ |
4932 | FN(skb_adjust_room), \ |
4933 | FN(redirect_map), \ |
4934 | FN(sk_redirect_map), \ |
4935 | FN(sock_map_update), \ |
4936 | FN(xdp_adjust_meta), \ |
4937 | FN(perf_event_read_value), \ |
4938 | FN(perf_prog_read_value), \ |
4939 | FN(getsockopt), \ |
4940 | FN(override_return), \ |
4941 | FN(sock_ops_cb_flags_set), \ |
4942 | FN(msg_redirect_map), \ |
4943 | FN(msg_apply_bytes), \ |
4944 | FN(msg_cork_bytes), \ |
4945 | FN(msg_pull_data), \ |
4946 | FN(bind), \ |
4947 | FN(xdp_adjust_tail), \ |
4948 | FN(skb_get_xfrm_state), \ |
4949 | FN(get_stack), \ |
4950 | FN(skb_load_bytes_relative), \ |
4951 | FN(fib_lookup), \ |
4952 | FN(sock_hash_update), \ |
4953 | FN(msg_redirect_hash), \ |
4954 | FN(sk_redirect_hash), \ |
4955 | FN(lwt_push_encap), \ |
4956 | FN(lwt_seg6_store_bytes), \ |
4957 | FN(lwt_seg6_adjust_srh), \ |
4958 | FN(lwt_seg6_action), \ |
4959 | FN(rc_repeat), \ |
4960 | FN(rc_keydown), \ |
4961 | FN(skb_cgroup_id), \ |
4962 | FN(get_current_cgroup_id), \ |
4963 | FN(get_local_storage), \ |
4964 | FN(sk_select_reuseport), \ |
4965 | FN(skb_ancestor_cgroup_id), \ |
4966 | FN(sk_lookup_tcp), \ |
4967 | FN(sk_lookup_udp), \ |
4968 | FN(sk_release), \ |
4969 | FN(map_push_elem), \ |
4970 | FN(map_pop_elem), \ |
4971 | FN(map_peek_elem), \ |
4972 | FN(msg_push_data), \ |
4973 | FN(msg_pop_data), \ |
4974 | FN(rc_pointer_rel), \ |
4975 | FN(spin_lock), \ |
4976 | FN(spin_unlock), \ |
4977 | FN(sk_fullsock), \ |
4978 | FN(tcp_sock), \ |
4979 | FN(skb_ecn_set_ce), \ |
4980 | FN(get_listener_sock), \ |
4981 | FN(skc_lookup_tcp), \ |
4982 | FN(tcp_check_syncookie), \ |
4983 | FN(sysctl_get_name), \ |
4984 | FN(sysctl_get_current_value), \ |
4985 | FN(sysctl_get_new_value), \ |
4986 | FN(sysctl_set_new_value), \ |
4987 | FN(strtol), \ |
4988 | FN(strtoul), \ |
4989 | FN(sk_storage_get), \ |
4990 | FN(sk_storage_delete), \ |
4991 | FN(send_signal), \ |
4992 | FN(tcp_gen_syncookie), \ |
4993 | FN(skb_output), \ |
4994 | FN(probe_read_user), \ |
4995 | FN(probe_read_kernel), \ |
4996 | FN(probe_read_user_str), \ |
4997 | FN(probe_read_kernel_str), \ |
4998 | FN(tcp_send_ack), \ |
4999 | FN(send_signal_thread), \ |
5000 | FN(jiffies64), \ |
5001 | FN(read_branch_records), \ |
5002 | FN(get_ns_current_pid_tgid), \ |
5003 | FN(xdp_output), \ |
5004 | FN(get_netns_cookie), \ |
5005 | FN(get_current_ancestor_cgroup_id), \ |
5006 | FN(sk_assign), \ |
5007 | FN(ktime_get_boot_ns), \ |
5008 | FN(seq_printf), \ |
5009 | FN(seq_write), \ |
5010 | FN(sk_cgroup_id), \ |
5011 | FN(sk_ancestor_cgroup_id), \ |
5012 | FN(ringbuf_output), \ |
5013 | FN(ringbuf_reserve), \ |
5014 | FN(ringbuf_submit), \ |
5015 | FN(ringbuf_discard), \ |
5016 | FN(ringbuf_query), \ |
5017 | FN(csum_level), \ |
5018 | FN(skc_to_tcp6_sock), \ |
5019 | FN(skc_to_tcp_sock), \ |
5020 | FN(skc_to_tcp_timewait_sock), \ |
5021 | FN(skc_to_tcp_request_sock), \ |
5022 | FN(skc_to_udp6_sock), \ |
5023 | FN(get_task_stack), \ |
5024 | FN(load_hdr_opt), \ |
5025 | FN(store_hdr_opt), \ |
5026 | FN(reserve_hdr_opt), \ |
5027 | FN(inode_storage_get), \ |
5028 | FN(inode_storage_delete), \ |
5029 | FN(d_path), \ |
5030 | FN(copy_from_user), \ |
5031 | FN(snprintf_btf), \ |
5032 | FN(seq_printf_btf), \ |
5033 | FN(skb_cgroup_classid), \ |
5034 | FN(redirect_neigh), \ |
5035 | FN(per_cpu_ptr), \ |
5036 | FN(this_cpu_ptr), \ |
5037 | FN(redirect_peer), \ |
5038 | FN(task_storage_get), \ |
5039 | FN(task_storage_delete), \ |
5040 | FN(get_current_task_btf), \ |
5041 | FN(bprm_opts_set), \ |
5042 | FN(ktime_get_coarse_ns), \ |
5043 | FN(ima_inode_hash), \ |
5044 | FN(sock_from_file), \ |
5045 | FN(check_mtu), \ |
5046 | FN(for_each_map_elem), \ |
5047 | FN(snprintf), \ |
5048 | FN(sys_bpf), \ |
5049 | FN(btf_find_by_name_kind), \ |
5050 | FN(sys_close), \ |
5051 | FN(timer_init), \ |
5052 | FN(timer_set_callback), \ |
5053 | FN(timer_start), \ |
5054 | FN(timer_cancel), \ |
5055 | FN(get_func_ip), \ |
5056 | FN(get_attach_cookie), \ |
5057 | FN(task_pt_regs), \ |
5058 | /* */ |
5059 | |
5060 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
5061 | * function eBPF program intends to call |
5062 | */ |
5063 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x |
5064 | enum bpf_func_id { |
5065 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) |
5066 | __BPF_FUNC_MAX_ID, |
5067 | }; |
5068 | #undef __BPF_ENUM_FN |
5069 | |
5070 | /* All flags used by eBPF helper functions, placed here. */ |
5071 | |
5072 | /* BPF_FUNC_skb_store_bytes flags. */ |
5073 | enum { |
5074 | BPF_F_RECOMPUTE_CSUM = (1ULL << 0), |
5075 | BPF_F_INVALIDATE_HASH = (1ULL << 1), |
5076 | }; |
5077 | |
5078 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. |
5079 | * First 4 bits are for passing the header field size. |
5080 | */ |
5081 | enum { |
5082 | BPF_F_HDR_FIELD_MASK = 0xfULL, |
5083 | }; |
5084 | |
5085 | /* BPF_FUNC_l4_csum_replace flags. */ |
5086 | enum { |
5087 | BPF_F_PSEUDO_HDR = (1ULL << 4), |
5088 | BPF_F_MARK_MANGLED_0 = (1ULL << 5), |
5089 | BPF_F_MARK_ENFORCE = (1ULL << 6), |
5090 | }; |
5091 | |
5092 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ |
5093 | enum { |
5094 | BPF_F_INGRESS = (1ULL << 0), |
5095 | }; |
5096 | |
5097 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ |
5098 | enum { |
5099 | BPF_F_TUNINFO_IPV6 = (1ULL << 0), |
5100 | }; |
5101 | |
5102 | /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ |
5103 | enum { |
5104 | BPF_F_SKIP_FIELD_MASK = 0xffULL, |
5105 | BPF_F_USER_STACK = (1ULL << 8), |
5106 | /* flags used by BPF_FUNC_get_stackid only. */ |
5107 | BPF_F_FAST_STACK_CMP = (1ULL << 9), |
5108 | BPF_F_REUSE_STACKID = (1ULL << 10), |
5109 | /* flags used by BPF_FUNC_get_stack only. */ |
5110 | BPF_F_USER_BUILD_ID = (1ULL << 11), |
5111 | }; |
5112 | |
5113 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
5114 | enum { |
5115 | BPF_F_ZERO_CSUM_TX = (1ULL << 1), |
5116 | BPF_F_DONT_FRAGMENT = (1ULL << 2), |
5117 | BPF_F_SEQ_NUMBER = (1ULL << 3), |
5118 | }; |
5119 | |
5120 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and |
5121 | * BPF_FUNC_perf_event_read_value flags. |
5122 | */ |
5123 | enum { |
5124 | BPF_F_INDEX_MASK = 0xffffffffULL, |
5125 | BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, |
5126 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
5127 | BPF_F_CTXLEN_MASK = (0xfffffULL << 32), |
5128 | }; |
5129 | |
5130 | /* Current network namespace */ |
5131 | enum { |
5132 | BPF_F_CURRENT_NETNS = (-1L), |
5133 | }; |
5134 | |
5135 | /* BPF_FUNC_csum_level level values. */ |
5136 | enum { |
5137 | BPF_CSUM_LEVEL_QUERY, |
5138 | BPF_CSUM_LEVEL_INC, |
5139 | BPF_CSUM_LEVEL_DEC, |
5140 | BPF_CSUM_LEVEL_RESET, |
5141 | }; |
5142 | |
5143 | /* BPF_FUNC_skb_adjust_room flags. */ |
5144 | enum { |
5145 | BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), |
5146 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), |
5147 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), |
5148 | BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), |
5149 | BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), |
5150 | BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), |
5151 | BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), |
5152 | }; |
5153 | |
5154 | enum { |
5155 | BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, |
5156 | BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, |
5157 | }; |
5158 | |
5159 | #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ |
5160 | BPF_ADJ_ROOM_ENCAP_L2_MASK) \ |
5161 | << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) |
5162 | |
5163 | /* BPF_FUNC_sysctl_get_name flags. */ |
5164 | enum { |
5165 | BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), |
5166 | }; |
5167 | |
5168 | /* BPF_FUNC_<kernel_obj>_storage_get flags */ |
5169 | enum { |
5170 | BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), |
5171 | /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility |
5172 | * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. |
5173 | */ |
5174 | BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, |
5175 | }; |
5176 | |
5177 | /* BPF_FUNC_read_branch_records flags. */ |
5178 | enum { |
5179 | BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), |
5180 | }; |
5181 | |
5182 | /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and |
5183 | * BPF_FUNC_bpf_ringbuf_output flags. |
5184 | */ |
5185 | enum { |
5186 | BPF_RB_NO_WAKEUP = (1ULL << 0), |
5187 | BPF_RB_FORCE_WAKEUP = (1ULL << 1), |
5188 | }; |
5189 | |
5190 | /* BPF_FUNC_bpf_ringbuf_query flags */ |
5191 | enum { |
5192 | BPF_RB_AVAIL_DATA = 0, |
5193 | BPF_RB_RING_SIZE = 1, |
5194 | BPF_RB_CONS_POS = 2, |
5195 | BPF_RB_PROD_POS = 3, |
5196 | }; |
5197 | |
5198 | /* BPF ring buffer constants */ |
5199 | enum { |
5200 | BPF_RINGBUF_BUSY_BIT = (1U << 31), |
5201 | BPF_RINGBUF_DISCARD_BIT = (1U << 30), |
5202 | BPF_RINGBUF_HDR_SZ = 8, |
5203 | }; |
5204 | |
5205 | /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ |
5206 | enum { |
5207 | BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), |
5208 | BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), |
5209 | }; |
5210 | |
5211 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
5212 | enum bpf_adj_room_mode { |
5213 | BPF_ADJ_ROOM_NET, |
5214 | BPF_ADJ_ROOM_MAC, |
5215 | }; |
5216 | |
5217 | /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ |
5218 | enum bpf_hdr_start_off { |
5219 | BPF_HDR_START_MAC, |
5220 | BPF_HDR_START_NET, |
5221 | }; |
5222 | |
5223 | /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ |
5224 | enum bpf_lwt_encap_mode { |
5225 | BPF_LWT_ENCAP_SEG6, |
5226 | BPF_LWT_ENCAP_SEG6_INLINE, |
5227 | BPF_LWT_ENCAP_IP, |
5228 | }; |
5229 | |
5230 | /* Flags for bpf_bprm_opts_set helper */ |
5231 | enum { |
5232 | BPF_F_BPRM_SECUREEXEC = (1ULL << 0), |
5233 | }; |
5234 | |
5235 | /* Flags for bpf_redirect_map helper */ |
5236 | enum { |
5237 | BPF_F_BROADCAST = (1ULL << 3), |
5238 | BPF_F_EXCLUDE_INGRESS = (1ULL << 4), |
5239 | }; |
5240 | |
5241 | #define __bpf_md_ptr(type, name) \ |
5242 | union { \ |
5243 | type name; \ |
5244 | __u64 :64; \ |
5245 | } __attribute__((aligned(8))) |
5246 | |
5247 | /* user accessible mirror of in-kernel sk_buff. |
5248 | * new fields can only be added to the end of this structure |
5249 | */ |
5250 | struct __sk_buff { |
5251 | __u32 len; |
5252 | __u32 pkt_type; |
5253 | __u32 mark; |
5254 | __u32 queue_mapping; |
5255 | __u32 protocol; |
5256 | __u32 vlan_present; |
5257 | __u32 vlan_tci; |
5258 | __u32 vlan_proto; |
5259 | __u32 priority; |
5260 | __u32 ingress_ifindex; |
5261 | __u32 ifindex; |
5262 | __u32 tc_index; |
5263 | __u32 cb[5]; |
5264 | __u32 hash; |
5265 | __u32 tc_classid; |
5266 | __u32 data; |
5267 | __u32 data_end; |
5268 | __u32 napi_id; |
5269 | |
5270 | /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ |
5271 | __u32 family; |
5272 | __u32 remote_ip4; /* Stored in network byte order */ |
5273 | __u32 local_ip4; /* Stored in network byte order */ |
5274 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
5275 | __u32 local_ip6[4]; /* Stored in network byte order */ |
5276 | __u32 remote_port; /* Stored in network byte order */ |
5277 | __u32 local_port; /* stored in host byte order */ |
5278 | /* ... here. */ |
5279 | |
5280 | __u32 data_meta; |
5281 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
5282 | __u64 tstamp; |
5283 | __u32 wire_len; |
5284 | __u32 gso_segs; |
5285 | __bpf_md_ptr(struct bpf_sock *, sk); |
5286 | __u32 gso_size; |
5287 | }; |
5288 | |
5289 | struct bpf_tunnel_key { |
5290 | __u32 tunnel_id; |
5291 | union { |
5292 | __u32 remote_ipv4; |
5293 | __u32 remote_ipv6[4]; |
5294 | }; |
5295 | __u8 tunnel_tos; |
5296 | __u8 tunnel_ttl; |
5297 | __u16 tunnel_ext; /* Padding, future use. */ |
5298 | __u32 tunnel_label; |
5299 | }; |
5300 | |
5301 | /* user accessible mirror of in-kernel xfrm_state. |
5302 | * new fields can only be added to the end of this structure |
5303 | */ |
5304 | struct bpf_xfrm_state { |
5305 | __u32 reqid; |
5306 | __u32 spi; /* Stored in network byte order */ |
5307 | __u16 family; |
5308 | __u16 ext; /* Padding, future use. */ |
5309 | union { |
5310 | __u32 remote_ipv4; /* Stored in network byte order */ |
5311 | __u32 remote_ipv6[4]; /* Stored in network byte order */ |
5312 | }; |
5313 | }; |
5314 | |
5315 | /* Generic BPF return codes which all BPF program types may support. |
5316 | * The values are binary compatible with their TC_ACT_* counter-part to |
5317 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT |
5318 | * programs. |
5319 | * |
5320 | * XDP is handled seprately, see XDP_*. |
5321 | */ |
5322 | enum bpf_ret_code { |
5323 | BPF_OK = 0, |
5324 | /* 1 reserved */ |
5325 | BPF_DROP = 2, |
5326 | /* 3-6 reserved */ |
5327 | BPF_REDIRECT = 7, |
5328 | /* >127 are reserved for prog type specific return codes. |
5329 | * |
5330 | * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and |
5331 | * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been |
5332 | * changed and should be routed based on its new L3 header. |
5333 | * (This is an L3 redirect, as opposed to L2 redirect |
5334 | * represented by BPF_REDIRECT above). |
5335 | */ |
5336 | BPF_LWT_REROUTE = 128, |
5337 | }; |
5338 | |
5339 | struct bpf_sock { |
5340 | __u32 bound_dev_if; |
5341 | __u32 family; |
5342 | __u32 type; |
5343 | __u32 protocol; |
5344 | __u32 mark; |
5345 | __u32 priority; |
5346 | /* IP address also allows 1 and 2 bytes access */ |
5347 | __u32 src_ip4; |
5348 | __u32 src_ip6[4]; |
5349 | __u32 src_port; /* host byte order */ |
5350 | __be16 dst_port; /* network byte order */ |
5351 | __u16 :16; /* zero padding */ |
5352 | __u32 dst_ip4; |
5353 | __u32 dst_ip6[4]; |
5354 | __u32 state; |
5355 | __s32 rx_queue_mapping; |
5356 | }; |
5357 | |
5358 | struct bpf_tcp_sock { |
5359 | __u32 snd_cwnd; /* Sending congestion window */ |
5360 | __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ |
5361 | __u32 rtt_min; |
5362 | __u32 snd_ssthresh; /* Slow start size threshold */ |
5363 | __u32 rcv_nxt; /* What we want to receive next */ |
5364 | __u32 snd_nxt; /* Next sequence we send */ |
5365 | __u32 snd_una; /* First byte we want an ack for */ |
5366 | __u32 mss_cache; /* Cached effective mss, not including SACKS */ |
5367 | __u32 ecn_flags; /* ECN status bits. */ |
5368 | __u32 rate_delivered; /* saved rate sample: packets delivered */ |
5369 | __u32 rate_interval_us; /* saved rate sample: time elapsed */ |
5370 | __u32 packets_out; /* Packets which are "in flight" */ |
5371 | __u32 retrans_out; /* Retransmitted packets out */ |
5372 | __u32 total_retrans; /* Total retransmits for entire connection */ |
5373 | __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn |
5374 | * total number of segments in. |
5375 | */ |
5376 | __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn |
5377 | * total number of data segments in. |
5378 | */ |
5379 | __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut |
5380 | * The total number of segments sent. |
5381 | */ |
5382 | __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut |
5383 | * total number of data segments sent. |
5384 | */ |
5385 | __u32 lost_out; /* Lost packets */ |
5386 | __u32 sacked_out; /* SACK'd packets */ |
5387 | __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived |
5388 | * sum(delta(rcv_nxt)), or how many bytes |
5389 | * were acked. |
5390 | */ |
5391 | __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked |
5392 | * sum(delta(snd_una)), or how many bytes |
5393 | * were acked. |
5394 | */ |
5395 | __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups |
5396 | * total number of DSACK blocks received |
5397 | */ |
5398 | __u32 delivered; /* Total data packets delivered incl. rexmits */ |
5399 | __u32 delivered_ce; /* Like the above but only ECE marked packets */ |
5400 | __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ |
5401 | }; |
5402 | |
5403 | struct bpf_sock_tuple { |
5404 | union { |
5405 | struct { |
5406 | __be32 saddr; |
5407 | __be32 daddr; |
5408 | __be16 sport; |
5409 | __be16 dport; |
5410 | } ipv4; |
5411 | struct { |
5412 | __be32 saddr[4]; |
5413 | __be32 daddr[4]; |
5414 | __be16 sport; |
5415 | __be16 dport; |
5416 | } ipv6; |
5417 | }; |
5418 | }; |
5419 | |
5420 | struct bpf_xdp_sock { |
5421 | __u32 queue_id; |
5422 | }; |
5423 | |
5424 | #define XDP_PACKET_HEADROOM 256 |
5425 | |
5426 | /* User return codes for XDP prog type. |
5427 | * A valid XDP program must return one of these defined values. All other |
5428 | * return codes are reserved for future use. Unknown return codes will |
5429 | * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). |
5430 | */ |
5431 | enum xdp_action { |
5432 | XDP_ABORTED = 0, |
5433 | XDP_DROP, |
5434 | XDP_PASS, |
5435 | XDP_TX, |
5436 | XDP_REDIRECT, |
5437 | }; |
5438 | |
5439 | /* user accessible metadata for XDP packet hook |
5440 | * new fields must be added to the end of this structure |
5441 | */ |
5442 | struct xdp_md { |
5443 | __u32 data; |
5444 | __u32 data_end; |
5445 | __u32 data_meta; |
5446 | /* Below access go through struct xdp_rxq_info */ |
5447 | __u32 ingress_ifindex; /* rxq->dev->ifindex */ |
5448 | __u32 rx_queue_index; /* rxq->queue_index */ |
5449 | |
5450 | __u32 egress_ifindex; /* txq->dev->ifindex */ |
5451 | }; |
5452 | |
5453 | /* DEVMAP map-value layout |
5454 | * |
5455 | * The struct data-layout of map-value is a configuration interface. |
5456 | * New members can only be added to the end of this structure. |
5457 | */ |
5458 | struct bpf_devmap_val { |
5459 | __u32 ifindex; /* device index */ |
5460 | union { |
5461 | int fd; /* prog fd on map write */ |
5462 | __u32 id; /* prog id on map read */ |
5463 | } bpf_prog; |
5464 | }; |
5465 | |
5466 | /* CPUMAP map-value layout |
5467 | * |
5468 | * The struct data-layout of map-value is a configuration interface. |
5469 | * New members can only be added to the end of this structure. |
5470 | */ |
5471 | struct bpf_cpumap_val { |
5472 | __u32 qsize; /* queue size to remote target CPU */ |
5473 | union { |
5474 | int fd; /* prog fd on map write */ |
5475 | __u32 id; /* prog id on map read */ |
5476 | } bpf_prog; |
5477 | }; |
5478 | |
5479 | enum sk_action { |
5480 | SK_DROP = 0, |
5481 | SK_PASS, |
5482 | }; |
5483 | |
5484 | /* user accessible metadata for SK_MSG packet hook, new fields must |
5485 | * be added to the end of this structure |
5486 | */ |
5487 | struct sk_msg_md { |
5488 | __bpf_md_ptr(void *, data); |
5489 | __bpf_md_ptr(void *, data_end); |
5490 | |
5491 | __u32 family; |
5492 | __u32 remote_ip4; /* Stored in network byte order */ |
5493 | __u32 local_ip4; /* Stored in network byte order */ |
5494 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
5495 | __u32 local_ip6[4]; /* Stored in network byte order */ |
5496 | __u32 remote_port; /* Stored in network byte order */ |
5497 | __u32 local_port; /* stored in host byte order */ |
5498 | __u32 size; /* Total size of sk_msg */ |
5499 | |
5500 | __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ |
5501 | }; |
5502 | |
5503 | struct sk_reuseport_md { |
5504 | /* |
5505 | * Start of directly accessible data. It begins from |
5506 | * the tcp/udp header. |
5507 | */ |
5508 | __bpf_md_ptr(void *, data); |
5509 | /* End of directly accessible data */ |
5510 | __bpf_md_ptr(void *, data_end); |
5511 | /* |
5512 | * Total length of packet (starting from the tcp/udp header). |
5513 | * Note that the directly accessible bytes (data_end - data) |
5514 | * could be less than this "len". Those bytes could be |
5515 | * indirectly read by a helper "bpf_skb_load_bytes()". |
5516 | */ |
5517 | __u32 len; |
5518 | /* |
5519 | * Eth protocol in the mac header (network byte order). e.g. |
5520 | * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) |
5521 | */ |
5522 | __u32 eth_protocol; |
5523 | __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ |
5524 | __u32 bind_inany; /* Is sock bound to an INANY address? */ |
5525 | __u32 hash; /* A hash of the packet 4 tuples */ |
5526 | /* When reuse->migrating_sk is NULL, it is selecting a sk for the |
5527 | * new incoming connection request (e.g. selecting a listen sk for |
5528 | * the received SYN in the TCP case). reuse->sk is one of the sk |
5529 | * in the reuseport group. The bpf prog can use reuse->sk to learn |
5530 | * the local listening ip/port without looking into the skb. |
5531 | * |
5532 | * When reuse->migrating_sk is not NULL, reuse->sk is closed and |
5533 | * reuse->migrating_sk is the socket that needs to be migrated |
5534 | * to another listening socket. migrating_sk could be a fullsock |
5535 | * sk that is fully established or a reqsk that is in-the-middle |
5536 | * of 3-way handshake. |
5537 | */ |
5538 | __bpf_md_ptr(struct bpf_sock *, sk); |
5539 | __bpf_md_ptr(struct bpf_sock *, migrating_sk); |
5540 | }; |
5541 | |
5542 | #define BPF_TAG_SIZE 8 |
5543 | |
5544 | struct bpf_prog_info { |
5545 | __u32 type; |
5546 | __u32 id; |
5547 | __u8 tag[BPF_TAG_SIZE]; |
5548 | __u32 jited_prog_len; |
5549 | __u32 xlated_prog_len; |
5550 | __aligned_u64 jited_prog_insns; |
5551 | __aligned_u64 xlated_prog_insns; |
5552 | __u64 load_time; /* ns since boottime */ |
5553 | __u32 created_by_uid; |
5554 | __u32 nr_map_ids; |
5555 | __aligned_u64 map_ids; |
5556 | char name[BPF_OBJ_NAME_LEN]; |
5557 | __u32 ifindex; |
5558 | __u32 gpl_compatible:1; |
5559 | __u32 :31; /* alignment pad */ |
5560 | __u64 netns_dev; |
5561 | __u64 netns_ino; |
5562 | __u32 nr_jited_ksyms; |
5563 | __u32 nr_jited_func_lens; |
5564 | __aligned_u64 jited_ksyms; |
5565 | __aligned_u64 jited_func_lens; |
5566 | __u32 btf_id; |
5567 | __u32 func_info_rec_size; |
5568 | __aligned_u64 func_info; |
5569 | __u32 nr_func_info; |
5570 | __u32 nr_line_info; |
5571 | __aligned_u64 line_info; |
5572 | __aligned_u64 jited_line_info; |
5573 | __u32 nr_jited_line_info; |
5574 | __u32 line_info_rec_size; |
5575 | __u32 jited_line_info_rec_size; |
5576 | __u32 nr_prog_tags; |
5577 | __aligned_u64 prog_tags; |
5578 | __u64 run_time_ns; |
5579 | __u64 run_cnt; |
5580 | __u64 recursion_misses; |
5581 | } __attribute__((aligned(8))); |
5582 | |
5583 | struct bpf_map_info { |
5584 | __u32 type; |
5585 | __u32 id; |
5586 | __u32 key_size; |
5587 | __u32 value_size; |
5588 | __u32 max_entries; |
5589 | __u32 map_flags; |
5590 | char name[BPF_OBJ_NAME_LEN]; |
5591 | __u32 ifindex; |
5592 | __u32 btf_vmlinux_value_type_id; |
5593 | __u64 netns_dev; |
5594 | __u64 netns_ino; |
5595 | __u32 btf_id; |
5596 | __u32 btf_key_type_id; |
5597 | __u32 btf_value_type_id; |
5598 | } __attribute__((aligned(8))); |
5599 | |
5600 | struct bpf_btf_info { |
5601 | __aligned_u64 btf; |
5602 | __u32 btf_size; |
5603 | __u32 id; |
5604 | __aligned_u64 name; |
5605 | __u32 name_len; |
5606 | __u32 kernel_btf; |
5607 | } __attribute__((aligned(8))); |
5608 | |
5609 | struct bpf_link_info { |
5610 | __u32 type; |
5611 | __u32 id; |
5612 | __u32 prog_id; |
5613 | union { |
5614 | struct { |
5615 | __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ |
5616 | __u32 tp_name_len; /* in/out: tp_name buffer len */ |
5617 | } raw_tracepoint; |
5618 | struct { |
5619 | __u32 attach_type; |
5620 | __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ |
5621 | __u32 target_btf_id; /* BTF type id inside the object */ |
5622 | } tracing; |
5623 | struct { |
5624 | __u64 cgroup_id; |
5625 | __u32 attach_type; |
5626 | } cgroup; |
5627 | struct { |
5628 | __aligned_u64 target_name; /* in/out: target_name buffer ptr */ |
5629 | __u32 target_name_len; /* in/out: target_name buffer len */ |
5630 | union { |
5631 | struct { |
5632 | __u32 map_id; |
5633 | } map; |
5634 | }; |
5635 | } iter; |
5636 | struct { |
5637 | __u32 netns_ino; |
5638 | __u32 attach_type; |
5639 | } netns; |
5640 | struct { |
5641 | __u32 ifindex; |
5642 | } xdp; |
5643 | }; |
5644 | } __attribute__((aligned(8))); |
5645 | |
5646 | /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed |
5647 | * by user and intended to be used by socket (e.g. to bind to, depends on |
5648 | * attach type). |
5649 | */ |
5650 | struct bpf_sock_addr { |
5651 | __u32 user_family; /* Allows 4-byte read, but no write. */ |
5652 | __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. |
5653 | * Stored in network byte order. |
5654 | */ |
5655 | __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. |
5656 | * Stored in network byte order. |
5657 | */ |
5658 | __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. |
5659 | * Stored in network byte order |
5660 | */ |
5661 | __u32 family; /* Allows 4-byte read, but no write */ |
5662 | __u32 type; /* Allows 4-byte read, but no write */ |
5663 | __u32 protocol; /* Allows 4-byte read, but no write */ |
5664 | __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. |
5665 | * Stored in network byte order. |
5666 | */ |
5667 | __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. |
5668 | * Stored in network byte order. |
5669 | */ |
5670 | __bpf_md_ptr(struct bpf_sock *, sk); |
5671 | }; |
5672 | |
5673 | /* User bpf_sock_ops struct to access socket values and specify request ops |
5674 | * and their replies. |
5675 | * Some of this fields are in network (bigendian) byte order and may need |
5676 | * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). |
5677 | * New fields can only be added at the end of this structure |
5678 | */ |
5679 | struct bpf_sock_ops { |
5680 | __u32 op; |
5681 | union { |
5682 | __u32 args[4]; /* Optionally passed to bpf program */ |
5683 | __u32 reply; /* Returned by bpf program */ |
5684 | __u32 replylong[4]; /* Optionally returned by bpf prog */ |
5685 | }; |
5686 | __u32 family; |
5687 | __u32 remote_ip4; /* Stored in network byte order */ |
5688 | __u32 local_ip4; /* Stored in network byte order */ |
5689 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
5690 | __u32 local_ip6[4]; /* Stored in network byte order */ |
5691 | __u32 remote_port; /* Stored in network byte order */ |
5692 | __u32 local_port; /* stored in host byte order */ |
5693 | __u32 is_fullsock; /* Some TCP fields are only valid if |
5694 | * there is a full socket. If not, the |
5695 | * fields read as zero. |
5696 | */ |
5697 | __u32 snd_cwnd; |
5698 | __u32 srtt_us; /* Averaged RTT << 3 in usecs */ |
5699 | __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ |
5700 | __u32 state; |
5701 | __u32 rtt_min; |
5702 | __u32 snd_ssthresh; |
5703 | __u32 rcv_nxt; |
5704 | __u32 snd_nxt; |
5705 | __u32 snd_una; |
5706 | __u32 mss_cache; |
5707 | __u32 ecn_flags; |
5708 | __u32 rate_delivered; |
5709 | __u32 rate_interval_us; |
5710 | __u32 packets_out; |
5711 | __u32 retrans_out; |
5712 | __u32 total_retrans; |
5713 | __u32 segs_in; |
5714 | __u32 data_segs_in; |
5715 | __u32 segs_out; |
5716 | __u32 data_segs_out; |
5717 | __u32 lost_out; |
5718 | __u32 sacked_out; |
5719 | __u32 sk_txhash; |
5720 | __u64 bytes_received; |
5721 | __u64 bytes_acked; |
5722 | __bpf_md_ptr(struct bpf_sock *, sk); |
5723 | /* [skb_data, skb_data_end) covers the whole TCP header. |
5724 | * |
5725 | * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received |
5726 | * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the |
5727 | * header has not been written. |
5728 | * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have |
5729 | * been written so far. |
5730 | * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes |
5731 | * the 3WHS. |
5732 | * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes |
5733 | * the 3WHS. |
5734 | * |
5735 | * bpf_load_hdr_opt() can also be used to read a particular option. |
5736 | */ |
5737 | __bpf_md_ptr(void *, skb_data); |
5738 | __bpf_md_ptr(void *, skb_data_end); |
5739 | __u32 skb_len; /* The total length of a packet. |
5740 | * It includes the header, options, |
5741 | * and payload. |
5742 | */ |
5743 | __u32 skb_tcp_flags; /* tcp_flags of the header. It provides |
5744 | * an easy way to check for tcp_flags |
5745 | * without parsing skb_data. |
5746 | * |
5747 | * In particular, the skb_tcp_flags |
5748 | * will still be available in |
5749 | * BPF_SOCK_OPS_HDR_OPT_LEN even though |
5750 | * the outgoing header has not |
5751 | * been written yet. |
5752 | */ |
5753 | }; |
5754 | |
5755 | /* Definitions for bpf_sock_ops_cb_flags */ |
5756 | enum { |
5757 | BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), |
5758 | BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), |
5759 | BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), |
5760 | BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), |
5761 | /* Call bpf for all received TCP headers. The bpf prog will be |
5762 | * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB |
5763 | * |
5764 | * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB |
5765 | * for the header option related helpers that will be useful |
5766 | * to the bpf programs. |
5767 | * |
5768 | * It could be used at the client/active side (i.e. connect() side) |
5769 | * when the server told it that the server was in syncookie |
5770 | * mode and required the active side to resend the bpf-written |
5771 | * options. The active side can keep writing the bpf-options until |
5772 | * it received a valid packet from the server side to confirm |
5773 | * the earlier packet (and options) has been received. The later |
5774 | * example patch is using it like this at the active side when the |
5775 | * server is in syncookie mode. |
5776 | * |
5777 | * The bpf prog will usually turn this off in the common cases. |
5778 | */ |
5779 | BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), |
5780 | /* Call bpf when kernel has received a header option that |
5781 | * the kernel cannot handle. The bpf prog will be called under |
5782 | * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. |
5783 | * |
5784 | * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB |
5785 | * for the header option related helpers that will be useful |
5786 | * to the bpf programs. |
5787 | */ |
5788 | BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), |
5789 | /* Call bpf when the kernel is writing header options for the |
5790 | * outgoing packet. The bpf prog will first be called |
5791 | * to reserve space in a skb under |
5792 | * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then |
5793 | * the bpf prog will be called to write the header option(s) |
5794 | * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. |
5795 | * |
5796 | * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB |
5797 | * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option |
5798 | * related helpers that will be useful to the bpf programs. |
5799 | * |
5800 | * The kernel gets its chance to reserve space and write |
5801 | * options first before the BPF program does. |
5802 | */ |
5803 | BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), |
5804 | /* Mask of all currently supported cb flags */ |
5805 | BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, |
5806 | }; |
5807 | |
5808 | /* List of known BPF sock_ops operators. |
5809 | * New entries can only be added at the end |
5810 | */ |
5811 | enum { |
5812 | BPF_SOCK_OPS_VOID, |
5813 | BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or |
5814 | * -1 if default value should be used |
5815 | */ |
5816 | BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized |
5817 | * window (in packets) or -1 if default |
5818 | * value should be used |
5819 | */ |
5820 | BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an |
5821 | * active connection is initialized |
5822 | */ |
5823 | BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an |
5824 | * active connection is |
5825 | * established |
5826 | */ |
5827 | BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a |
5828 | * passive connection is |
5829 | * established |
5830 | */ |
5831 | BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control |
5832 | * needs ECN |
5833 | */ |
5834 | BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is |
5835 | * based on the path and may be |
5836 | * dependent on the congestion control |
5837 | * algorithm. In general it indicates |
5838 | * a congestion threshold. RTTs above |
5839 | * this indicate congestion |
5840 | */ |
5841 | BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. |
5842 | * Arg1: value of icsk_retransmits |
5843 | * Arg2: value of icsk_rto |
5844 | * Arg3: whether RTO has expired |
5845 | */ |
5846 | BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. |
5847 | * Arg1: sequence number of 1st byte |
5848 | * Arg2: # segments |
5849 | * Arg3: return value of |
5850 | * tcp_transmit_skb (0 => success) |
5851 | */ |
5852 | BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. |
5853 | * Arg1: old_state |
5854 | * Arg2: new_state |
5855 | */ |
5856 | BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after |
5857 | * socket transition to LISTEN state. |
5858 | */ |
5859 | BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. |
5860 | */ |
5861 | BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. |
5862 | * It will be called to handle |
5863 | * the packets received at |
5864 | * an already established |
5865 | * connection. |
5866 | * |
5867 | * sock_ops->skb_data: |
5868 | * Referring to the received skb. |
5869 | * It covers the TCP header only. |
5870 | * |
5871 | * bpf_load_hdr_opt() can also |
5872 | * be used to search for a |
5873 | * particular option. |
5874 | */ |
5875 | BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the |
5876 | * header option later in |
5877 | * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. |
5878 | * Arg1: bool want_cookie. (in |
5879 | * writing SYNACK only) |
5880 | * |
5881 | * sock_ops->skb_data: |
5882 | * Not available because no header has |
5883 | * been written yet. |
5884 | * |
5885 | * sock_ops->skb_tcp_flags: |
5886 | * The tcp_flags of the |
5887 | * outgoing skb. (e.g. SYN, ACK, FIN). |
5888 | * |
5889 | * bpf_reserve_hdr_opt() should |
5890 | * be used to reserve space. |
5891 | */ |
5892 | BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options |
5893 | * Arg1: bool want_cookie. (in |
5894 | * writing SYNACK only) |
5895 | * |
5896 | * sock_ops->skb_data: |
5897 | * Referring to the outgoing skb. |
5898 | * It covers the TCP header |
5899 | * that has already been written |
5900 | * by the kernel and the |
5901 | * earlier bpf-progs. |
5902 | * |
5903 | * sock_ops->skb_tcp_flags: |
5904 | * The tcp_flags of the outgoing |
5905 | * skb. (e.g. SYN, ACK, FIN). |
5906 | * |
5907 | * bpf_store_hdr_opt() should |
5908 | * be used to write the |
5909 | * option. |
5910 | * |
5911 | * bpf_load_hdr_opt() can also |
5912 | * be used to search for a |
5913 | * particular option that |
5914 | * has already been written |
5915 | * by the kernel or the |
5916 | * earlier bpf-progs. |
5917 | */ |
5918 | }; |
5919 | |
5920 | /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect |
5921 | * changes between the TCP and BPF versions. Ideally this should never happen. |
5922 | * If it does, we need to add code to convert them before calling |
5923 | * the BPF sock_ops function. |
5924 | */ |
5925 | enum { |
5926 | BPF_TCP_ESTABLISHED = 1, |
5927 | BPF_TCP_SYN_SENT, |
5928 | BPF_TCP_SYN_RECV, |
5929 | BPF_TCP_FIN_WAIT1, |
5930 | BPF_TCP_FIN_WAIT2, |
5931 | BPF_TCP_TIME_WAIT, |
5932 | BPF_TCP_CLOSE, |
5933 | BPF_TCP_CLOSE_WAIT, |
5934 | BPF_TCP_LAST_ACK, |
5935 | BPF_TCP_LISTEN, |
5936 | BPF_TCP_CLOSING, /* Now a valid state */ |
5937 | BPF_TCP_NEW_SYN_RECV, |
5938 | |
5939 | BPF_TCP_MAX_STATES /* Leave at the end! */ |
5940 | }; |
5941 | |
5942 | enum { |
5943 | TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ |
5944 | TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ |
5945 | TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ |
5946 | TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ |
5947 | /* Copy the SYN pkt to optval |
5948 | * |
5949 | * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the |
5950 | * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit |
5951 | * to only getting from the saved_syn. It can either get the |
5952 | * syn packet from: |
5953 | * |
5954 | * 1. the just-received SYN packet (only available when writing the |
5955 | * SYNACK). It will be useful when it is not necessary to |
5956 | * save the SYN packet for latter use. It is also the only way |
5957 | * to get the SYN during syncookie mode because the syn |
5958 | * packet cannot be saved during syncookie. |
5959 | * |
5960 | * OR |
5961 | * |
5962 | * 2. the earlier saved syn which was done by |
5963 | * bpf_setsockopt(TCP_SAVE_SYN). |
5964 | * |
5965 | * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the |
5966 | * SYN packet is obtained. |
5967 | * |
5968 | * If the bpf-prog does not need the IP[46] header, the |
5969 | * bpf-prog can avoid parsing the IP header by using |
5970 | * TCP_BPF_SYN. Otherwise, the bpf-prog can get both |
5971 | * IP[46] and TCP header by using TCP_BPF_SYN_IP. |
5972 | * |
5973 | * >0: Total number of bytes copied |
5974 | * -ENOSPC: Not enough space in optval. Only optlen number of |
5975 | * bytes is copied. |
5976 | * -ENOENT: The SYN skb is not available now and the earlier SYN pkt |
5977 | * is not saved by setsockopt(TCP_SAVE_SYN). |
5978 | */ |
5979 | TCP_BPF_SYN = 1005, /* Copy the TCP header */ |
5980 | TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ |
5981 | TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ |
5982 | }; |
5983 | |
5984 | enum { |
5985 | BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), |
5986 | }; |
5987 | |
5988 | /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and |
5989 | * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. |
5990 | */ |
5991 | enum { |
5992 | BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the |
5993 | * total option spaces |
5994 | * required for an established |
5995 | * sk in order to calculate the |
5996 | * MSS. No skb is actually |
5997 | * sent. |
5998 | */ |
5999 | BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode |
6000 | * when sending a SYN. |
6001 | */ |
6002 | }; |
6003 | |
6004 | struct bpf_perf_event_value { |
6005 | __u64 counter; |
6006 | __u64 enabled; |
6007 | __u64 running; |
6008 | }; |
6009 | |
6010 | enum { |
6011 | BPF_DEVCG_ACC_MKNOD = (1ULL << 0), |
6012 | BPF_DEVCG_ACC_READ = (1ULL << 1), |
6013 | BPF_DEVCG_ACC_WRITE = (1ULL << 2), |
6014 | }; |
6015 | |
6016 | enum { |
6017 | BPF_DEVCG_DEV_BLOCK = (1ULL << 0), |
6018 | BPF_DEVCG_DEV_CHAR = (1ULL << 1), |
6019 | }; |
6020 | |
6021 | struct bpf_cgroup_dev_ctx { |
6022 | /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ |
6023 | __u32 access_type; |
6024 | __u32 major; |
6025 | __u32 minor; |
6026 | }; |
6027 | |
6028 | struct bpf_raw_tracepoint_args { |
6029 | __u64 args[0]; |
6030 | }; |
6031 | |
6032 | /* DIRECT: Skip the FIB rules and go to FIB table associated with device |
6033 | * OUTPUT: Do lookup from egress perspective; default is ingress |
6034 | */ |
6035 | enum { |
6036 | BPF_FIB_LOOKUP_DIRECT = (1U << 0), |
6037 | BPF_FIB_LOOKUP_OUTPUT = (1U << 1), |
6038 | }; |
6039 | |
6040 | enum { |
6041 | BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ |
6042 | BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ |
6043 | BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ |
6044 | BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ |
6045 | BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ |
6046 | BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ |
6047 | BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ |
6048 | BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ |
6049 | BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ |
6050 | }; |
6051 | |
6052 | struct bpf_fib_lookup { |
6053 | /* input: network family for lookup (AF_INET, AF_INET6) |
6054 | * output: network family of egress nexthop |
6055 | */ |
6056 | __u8 family; |
6057 | |
6058 | /* set if lookup is to consider L4 data - e.g., FIB rules */ |
6059 | __u8 l4_protocol; |
6060 | __be16 sport; |
6061 | __be16 dport; |
6062 | |
6063 | union { /* used for MTU check */ |
6064 | /* input to lookup */ |
6065 | __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ |
6066 | |
6067 | /* output: MTU value */ |
6068 | __u16 mtu_result; |
6069 | }; |
6070 | /* input: L3 device index for lookup |
6071 | * output: device index from FIB lookup |
6072 | */ |
6073 | __u32 ifindex; |
6074 | |
6075 | union { |
6076 | /* inputs to lookup */ |
6077 | __u8 tos; /* AF_INET */ |
6078 | __be32 flowinfo; /* AF_INET6, flow_label + priority */ |
6079 | |
6080 | /* output: metric of fib result (IPv4/IPv6 only) */ |
6081 | __u32 rt_metric; |
6082 | }; |
6083 | |
6084 | union { |
6085 | __be32 ipv4_src; |
6086 | __u32 ipv6_src[4]; /* in6_addr; network order */ |
6087 | }; |
6088 | |
6089 | /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in |
6090 | * network header. output: bpf_fib_lookup sets to gateway address |
6091 | * if FIB lookup returns gateway route |
6092 | */ |
6093 | union { |
6094 | __be32 ipv4_dst; |
6095 | __u32 ipv6_dst[4]; /* in6_addr; network order */ |
6096 | }; |
6097 | |
6098 | /* output */ |
6099 | __be16 h_vlan_proto; |
6100 | __be16 h_vlan_TCI; |
6101 | __u8 smac[6]; /* ETH_ALEN */ |
6102 | __u8 dmac[6]; /* ETH_ALEN */ |
6103 | }; |
6104 | |
6105 | struct bpf_redir_neigh { |
6106 | /* network family for lookup (AF_INET, AF_INET6) */ |
6107 | __u32 nh_family; |
6108 | /* network address of nexthop; skips fib lookup to find gateway */ |
6109 | union { |
6110 | __be32 ipv4_nh; |
6111 | __u32 ipv6_nh[4]; /* in6_addr; network order */ |
6112 | }; |
6113 | }; |
6114 | |
6115 | /* bpf_check_mtu flags*/ |
6116 | enum bpf_check_mtu_flags { |
6117 | BPF_MTU_CHK_SEGS = (1U << 0), |
6118 | }; |
6119 | |
6120 | enum bpf_check_mtu_ret { |
6121 | BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ |
6122 | BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ |
6123 | BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ |
6124 | }; |
6125 | |
6126 | enum bpf_task_fd_type { |
6127 | BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ |
6128 | BPF_FD_TYPE_TRACEPOINT, /* tp name */ |
6129 | BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ |
6130 | BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ |
6131 | BPF_FD_TYPE_UPROBE, /* filename + offset */ |
6132 | BPF_FD_TYPE_URETPROBE, /* filename + offset */ |
6133 | }; |
6134 | |
6135 | enum { |
6136 | BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), |
6137 | BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), |
6138 | BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), |
6139 | }; |
6140 | |
6141 | struct bpf_flow_keys { |
6142 | __u16 nhoff; |
6143 | __u16 thoff; |
6144 | __u16 addr_proto; /* ETH_P_* of valid addrs */ |
6145 | __u8 is_frag; |
6146 | __u8 is_first_frag; |
6147 | __u8 is_encap; |
6148 | __u8 ip_proto; |
6149 | __be16 n_proto; |
6150 | __be16 sport; |
6151 | __be16 dport; |
6152 | union { |
6153 | struct { |
6154 | __be32 ipv4_src; |
6155 | __be32 ipv4_dst; |
6156 | }; |
6157 | struct { |
6158 | __u32 ipv6_src[4]; /* in6_addr; network order */ |
6159 | __u32 ipv6_dst[4]; /* in6_addr; network order */ |
6160 | }; |
6161 | }; |
6162 | __u32 flags; |
6163 | __be32 flow_label; |
6164 | }; |
6165 | |
6166 | struct bpf_func_info { |
6167 | __u32 insn_off; |
6168 | __u32 type_id; |
6169 | }; |
6170 | |
6171 | #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) |
6172 | #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) |
6173 | |
6174 | struct bpf_line_info { |
6175 | __u32 insn_off; |
6176 | __u32 file_name_off; |
6177 | __u32 line_off; |
6178 | __u32 line_col; |
6179 | }; |
6180 | |
6181 | struct bpf_spin_lock { |
6182 | __u32 val; |
6183 | }; |
6184 | |
6185 | struct bpf_timer { |
6186 | __u64 :64; |
6187 | __u64 :64; |
6188 | } __attribute__((aligned(8))); |
6189 | |
6190 | struct bpf_sysctl { |
6191 | __u32 write; /* Sysctl is being read (= 0) or written (= 1). |
6192 | * Allows 1,2,4-byte read, but no write. |
6193 | */ |
6194 | __u32 file_pos; /* Sysctl file position to read from, write to. |
6195 | * Allows 1,2,4-byte read an 4-byte write. |
6196 | */ |
6197 | }; |
6198 | |
6199 | struct bpf_sockopt { |
6200 | __bpf_md_ptr(struct bpf_sock *, sk); |
6201 | __bpf_md_ptr(void *, optval); |
6202 | __bpf_md_ptr(void *, optval_end); |
6203 | |
6204 | __s32 level; |
6205 | __s32 optname; |
6206 | __s32 optlen; |
6207 | __s32 retval; |
6208 | }; |
6209 | |
6210 | struct bpf_pidns_info { |
6211 | __u32 pid; |
6212 | __u32 tgid; |
6213 | }; |
6214 | |
6215 | /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ |
6216 | struct bpf_sk_lookup { |
6217 | union { |
6218 | __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ |
6219 | __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ |
6220 | }; |
6221 | |
6222 | __u32 family; /* Protocol family (AF_INET, AF_INET6) */ |
6223 | __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ |
6224 | __u32 remote_ip4; /* Network byte order */ |
6225 | __u32 remote_ip6[4]; /* Network byte order */ |
6226 | __be16 remote_port; /* Network byte order */ |
6227 | __u16 :16; /* Zero padding */ |
6228 | __u32 local_ip4; /* Network byte order */ |
6229 | __u32 local_ip6[4]; /* Network byte order */ |
6230 | __u32 local_port; /* Host byte order */ |
6231 | }; |
6232 | |
6233 | /* |
6234 | * struct btf_ptr is used for typed pointer representation; the |
6235 | * type id is used to render the pointer data as the appropriate type |
6236 | * via the bpf_snprintf_btf() helper described above. A flags field - |
6237 | * potentially to specify additional details about the BTF pointer |
6238 | * (rather than its mode of display) - is included for future use. |
6239 | * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. |
6240 | */ |
6241 | struct btf_ptr { |
6242 | void *ptr; |
6243 | __u32 type_id; |
6244 | __u32 flags; /* BTF ptr flags; unused at present. */ |
6245 | }; |
6246 | |
6247 | /* |
6248 | * Flags to control bpf_snprintf_btf() behaviour. |
6249 | * - BTF_F_COMPACT: no formatting around type information |
6250 | * - BTF_F_NONAME: no struct/union member names/types |
6251 | * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; |
6252 | * equivalent to %px. |
6253 | * - BTF_F_ZERO: show zero-valued struct/union members; they |
6254 | * are not displayed by default |
6255 | */ |
6256 | enum { |
6257 | BTF_F_COMPACT = (1ULL << 0), |
6258 | BTF_F_NONAME = (1ULL << 1), |
6259 | BTF_F_PTR_RAW = (1ULL << 2), |
6260 | BTF_F_ZERO = (1ULL << 3), |
6261 | }; |
6262 | |
6263 | #endif /* __LINUX_BPF_H__ */ |
6264 | |