1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2018 Facebook */ |
3 | |
4 | #include <uapi/linux/btf.h> |
5 | #include <uapi/linux/bpf.h> |
6 | #include <uapi/linux/bpf_perf_event.h> |
7 | #include <uapi/linux/types.h> |
8 | #include <linux/seq_file.h> |
9 | #include <linux/compiler.h> |
10 | #include <linux/ctype.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/anon_inodes.h> |
14 | #include <linux/file.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/idr.h> |
18 | #include <linux/sort.h> |
19 | #include <linux/bpf_verifier.h> |
20 | #include <linux/btf.h> |
21 | #include <linux/btf_ids.h> |
22 | #include <linux/bpf_lsm.h> |
23 | #include <linux/skmsg.h> |
24 | #include <linux/perf_event.h> |
25 | #include <linux/bsearch.h> |
26 | #include <linux/kobject.h> |
27 | #include <linux/sysfs.h> |
28 | |
29 | #include <net/netfilter/nf_bpf_link.h> |
30 | |
31 | #include <net/sock.h> |
32 | #include <net/xdp.h> |
33 | #include "../tools/lib/bpf/relo_core.h" |
34 | |
35 | /* BTF (BPF Type Format) is the meta data format which describes |
36 | * the data types of BPF program/map. Hence, it basically focus |
37 | * on the C programming language which the modern BPF is primary |
38 | * using. |
39 | * |
40 | * ELF Section: |
41 | * ~~~~~~~~~~~ |
42 | * The BTF data is stored under the ".BTF" ELF section |
43 | * |
44 | * struct btf_type: |
45 | * ~~~~~~~~~~~~~~~ |
46 | * Each 'struct btf_type' object describes a C data type. |
47 | * Depending on the type it is describing, a 'struct btf_type' |
48 | * object may be followed by more data. F.e. |
49 | * To describe an array, 'struct btf_type' is followed by |
50 | * 'struct btf_array'. |
51 | * |
52 | * 'struct btf_type' and any extra data following it are |
53 | * 4 bytes aligned. |
54 | * |
55 | * Type section: |
56 | * ~~~~~~~~~~~~~ |
57 | * The BTF type section contains a list of 'struct btf_type' objects. |
58 | * Each one describes a C type. Recall from the above section |
59 | * that a 'struct btf_type' object could be immediately followed by extra |
60 | * data in order to describe some particular C types. |
61 | * |
62 | * type_id: |
63 | * ~~~~~~~ |
64 | * Each btf_type object is identified by a type_id. The type_id |
65 | * is implicitly implied by the location of the btf_type object in |
66 | * the BTF type section. The first one has type_id 1. The second |
67 | * one has type_id 2...etc. Hence, an earlier btf_type has |
68 | * a smaller type_id. |
69 | * |
70 | * A btf_type object may refer to another btf_type object by using |
71 | * type_id (i.e. the "type" in the "struct btf_type"). |
72 | * |
73 | * NOTE that we cannot assume any reference-order. |
74 | * A btf_type object can refer to an earlier btf_type object |
75 | * but it can also refer to a later btf_type object. |
76 | * |
77 | * For example, to describe "const void *". A btf_type |
78 | * object describing "const" may refer to another btf_type |
79 | * object describing "void *". This type-reference is done |
80 | * by specifying type_id: |
81 | * |
82 | * [1] CONST (anon) type_id=2 |
83 | * [2] PTR (anon) type_id=0 |
84 | * |
85 | * The above is the btf_verifier debug log: |
86 | * - Each line started with "[?]" is a btf_type object |
87 | * - [?] is the type_id of the btf_type object. |
88 | * - CONST/PTR is the BTF_KIND_XXX |
89 | * - "(anon)" is the name of the type. It just |
90 | * happens that CONST and PTR has no name. |
91 | * - type_id=XXX is the 'u32 type' in btf_type |
92 | * |
93 | * NOTE: "void" has type_id 0 |
94 | * |
95 | * String section: |
96 | * ~~~~~~~~~~~~~~ |
97 | * The BTF string section contains the names used by the type section. |
98 | * Each string is referred by an "offset" from the beginning of the |
99 | * string section. |
100 | * |
101 | * Each string is '\0' terminated. |
102 | * |
103 | * The first character in the string section must be '\0' |
104 | * which is used to mean 'anonymous'. Some btf_type may not |
105 | * have a name. |
106 | */ |
107 | |
108 | /* BTF verification: |
109 | * |
110 | * To verify BTF data, two passes are needed. |
111 | * |
112 | * Pass #1 |
113 | * ~~~~~~~ |
114 | * The first pass is to collect all btf_type objects to |
115 | * an array: "btf->types". |
116 | * |
117 | * Depending on the C type that a btf_type is describing, |
118 | * a btf_type may be followed by extra data. We don't know |
119 | * how many btf_type is there, and more importantly we don't |
120 | * know where each btf_type is located in the type section. |
121 | * |
122 | * Without knowing the location of each type_id, most verifications |
123 | * cannot be done. e.g. an earlier btf_type may refer to a later |
124 | * btf_type (recall the "const void *" above), so we cannot |
125 | * check this type-reference in the first pass. |
126 | * |
127 | * In the first pass, it still does some verifications (e.g. |
128 | * checking the name is a valid offset to the string section). |
129 | * |
130 | * Pass #2 |
131 | * ~~~~~~~ |
132 | * The main focus is to resolve a btf_type that is referring |
133 | * to another type. |
134 | * |
135 | * We have to ensure the referring type: |
136 | * 1) does exist in the BTF (i.e. in btf->types[]) |
137 | * 2) does not cause a loop: |
138 | * struct A { |
139 | * struct B b; |
140 | * }; |
141 | * |
142 | * struct B { |
143 | * struct A a; |
144 | * }; |
145 | * |
146 | * btf_type_needs_resolve() decides if a btf_type needs |
147 | * to be resolved. |
148 | * |
149 | * The needs_resolve type implements the "resolve()" ops which |
150 | * essentially does a DFS and detects backedge. |
151 | * |
152 | * During resolve (or DFS), different C types have different |
153 | * "RESOLVED" conditions. |
154 | * |
155 | * When resolving a BTF_KIND_STRUCT, we need to resolve all its |
156 | * members because a member is always referring to another |
157 | * type. A struct's member can be treated as "RESOLVED" if |
158 | * it is referring to a BTF_KIND_PTR. Otherwise, the |
159 | * following valid C struct would be rejected: |
160 | * |
161 | * struct A { |
162 | * int m; |
163 | * struct A *a; |
164 | * }; |
165 | * |
166 | * When resolving a BTF_KIND_PTR, it needs to keep resolving if |
167 | * it is referring to another BTF_KIND_PTR. Otherwise, we cannot |
168 | * detect a pointer loop, e.g.: |
169 | * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + |
170 | * ^ | |
171 | * +-----------------------------------------+ |
172 | * |
173 | */ |
174 | |
175 | #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) |
176 | #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) |
177 | #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) |
178 | #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) |
179 | #define BITS_ROUNDUP_BYTES(bits) \ |
180 | (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) |
181 | |
182 | #define BTF_INFO_MASK 0x9f00ffff |
183 | #define BTF_INT_MASK 0x0fffffff |
184 | #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) |
185 | #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) |
186 | |
187 | /* 16MB for 64k structs and each has 16 members and |
188 | * a few MB spaces for the string section. |
189 | * The hard limit is S32_MAX. |
190 | */ |
191 | #define BTF_MAX_SIZE (16 * 1024 * 1024) |
192 | |
193 | #define for_each_member_from(i, from, struct_type, member) \ |
194 | for (i = from, member = btf_type_member(struct_type) + from; \ |
195 | i < btf_type_vlen(struct_type); \ |
196 | i++, member++) |
197 | |
198 | #define for_each_vsi_from(i, from, struct_type, member) \ |
199 | for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ |
200 | i < btf_type_vlen(struct_type); \ |
201 | i++, member++) |
202 | |
203 | DEFINE_IDR(btf_idr); |
204 | DEFINE_SPINLOCK(btf_idr_lock); |
205 | |
206 | enum btf_kfunc_hook { |
207 | BTF_KFUNC_HOOK_COMMON, |
208 | BTF_KFUNC_HOOK_XDP, |
209 | BTF_KFUNC_HOOK_TC, |
210 | BTF_KFUNC_HOOK_STRUCT_OPS, |
211 | BTF_KFUNC_HOOK_TRACING, |
212 | BTF_KFUNC_HOOK_SYSCALL, |
213 | BTF_KFUNC_HOOK_FMODRET, |
214 | BTF_KFUNC_HOOK_CGROUP_SKB, |
215 | BTF_KFUNC_HOOK_SCHED_ACT, |
216 | BTF_KFUNC_HOOK_SK_SKB, |
217 | BTF_KFUNC_HOOK_SOCKET_FILTER, |
218 | BTF_KFUNC_HOOK_LWT, |
219 | BTF_KFUNC_HOOK_NETFILTER, |
220 | BTF_KFUNC_HOOK_MAX, |
221 | }; |
222 | |
223 | enum { |
224 | BTF_KFUNC_SET_MAX_CNT = 256, |
225 | BTF_DTOR_KFUNC_MAX_CNT = 256, |
226 | BTF_KFUNC_FILTER_MAX_CNT = 16, |
227 | }; |
228 | |
229 | struct btf_kfunc_hook_filter { |
230 | btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT]; |
231 | u32 nr_filters; |
232 | }; |
233 | |
234 | struct btf_kfunc_set_tab { |
235 | struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; |
236 | struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX]; |
237 | }; |
238 | |
239 | struct btf_id_dtor_kfunc_tab { |
240 | u32 cnt; |
241 | struct btf_id_dtor_kfunc dtors[]; |
242 | }; |
243 | |
244 | struct btf { |
245 | void *data; |
246 | struct btf_type **types; |
247 | u32 *resolved_ids; |
248 | u32 *resolved_sizes; |
249 | const char *strings; |
250 | void *nohdr_data; |
251 | struct btf_header hdr; |
252 | u32 nr_types; /* includes VOID for base BTF */ |
253 | u32 types_size; |
254 | u32 data_size; |
255 | refcount_t refcnt; |
256 | u32 id; |
257 | struct rcu_head rcu; |
258 | struct btf_kfunc_set_tab *kfunc_set_tab; |
259 | struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; |
260 | struct btf_struct_metas *struct_meta_tab; |
261 | |
262 | /* split BTF support */ |
263 | struct btf *base_btf; |
264 | u32 start_id; /* first type ID in this BTF (0 for base BTF) */ |
265 | u32 start_str_off; /* first string offset (0 for base BTF) */ |
266 | char name[MODULE_NAME_LEN]; |
267 | bool kernel_btf; |
268 | }; |
269 | |
270 | enum verifier_phase { |
271 | CHECK_META, |
272 | CHECK_TYPE, |
273 | }; |
274 | |
275 | struct resolve_vertex { |
276 | const struct btf_type *t; |
277 | u32 type_id; |
278 | u16 next_member; |
279 | }; |
280 | |
281 | enum visit_state { |
282 | NOT_VISITED, |
283 | VISITED, |
284 | RESOLVED, |
285 | }; |
286 | |
287 | enum resolve_mode { |
288 | RESOLVE_TBD, /* To Be Determined */ |
289 | RESOLVE_PTR, /* Resolving for Pointer */ |
290 | RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union |
291 | * or array |
292 | */ |
293 | }; |
294 | |
295 | #define MAX_RESOLVE_DEPTH 32 |
296 | |
297 | struct btf_sec_info { |
298 | u32 off; |
299 | u32 len; |
300 | }; |
301 | |
302 | struct btf_verifier_env { |
303 | struct btf *btf; |
304 | u8 *visit_states; |
305 | struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; |
306 | struct bpf_verifier_log log; |
307 | u32 log_type_id; |
308 | u32 top_stack; |
309 | enum verifier_phase phase; |
310 | enum resolve_mode resolve_mode; |
311 | }; |
312 | |
313 | static const char * const btf_kind_str[NR_BTF_KINDS] = { |
314 | [BTF_KIND_UNKN] = "UNKNOWN" , |
315 | [BTF_KIND_INT] = "INT" , |
316 | [BTF_KIND_PTR] = "PTR" , |
317 | [BTF_KIND_ARRAY] = "ARRAY" , |
318 | [BTF_KIND_STRUCT] = "STRUCT" , |
319 | [BTF_KIND_UNION] = "UNION" , |
320 | [BTF_KIND_ENUM] = "ENUM" , |
321 | [BTF_KIND_FWD] = "FWD" , |
322 | [BTF_KIND_TYPEDEF] = "TYPEDEF" , |
323 | [BTF_KIND_VOLATILE] = "VOLATILE" , |
324 | [BTF_KIND_CONST] = "CONST" , |
325 | [BTF_KIND_RESTRICT] = "RESTRICT" , |
326 | [BTF_KIND_FUNC] = "FUNC" , |
327 | [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO" , |
328 | [BTF_KIND_VAR] = "VAR" , |
329 | [BTF_KIND_DATASEC] = "DATASEC" , |
330 | [BTF_KIND_FLOAT] = "FLOAT" , |
331 | [BTF_KIND_DECL_TAG] = "DECL_TAG" , |
332 | [BTF_KIND_TYPE_TAG] = "TYPE_TAG" , |
333 | [BTF_KIND_ENUM64] = "ENUM64" , |
334 | }; |
335 | |
336 | const char *btf_type_str(const struct btf_type *t) |
337 | { |
338 | return btf_kind_str[BTF_INFO_KIND(t->info)]; |
339 | } |
340 | |
341 | /* Chunk size we use in safe copy of data to be shown. */ |
342 | #define BTF_SHOW_OBJ_SAFE_SIZE 32 |
343 | |
344 | /* |
345 | * This is the maximum size of a base type value (equivalent to a |
346 | * 128-bit int); if we are at the end of our safe buffer and have |
347 | * less than 16 bytes space we can't be assured of being able |
348 | * to copy the next type safely, so in such cases we will initiate |
349 | * a new copy. |
350 | */ |
351 | #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 |
352 | |
353 | /* Type name size */ |
354 | #define BTF_SHOW_NAME_SIZE 80 |
355 | |
356 | /* |
357 | * The suffix of a type that indicates it cannot alias another type when |
358 | * comparing BTF IDs for kfunc invocations. |
359 | */ |
360 | #define NOCAST_ALIAS_SUFFIX "___init" |
361 | |
362 | /* |
363 | * Common data to all BTF show operations. Private show functions can add |
364 | * their own data to a structure containing a struct btf_show and consult it |
365 | * in the show callback. See btf_type_show() below. |
366 | * |
367 | * One challenge with showing nested data is we want to skip 0-valued |
368 | * data, but in order to figure out whether a nested object is all zeros |
369 | * we need to walk through it. As a result, we need to make two passes |
370 | * when handling structs, unions and arrays; the first path simply looks |
371 | * for nonzero data, while the second actually does the display. The first |
372 | * pass is signalled by show->state.depth_check being set, and if we |
373 | * encounter a non-zero value we set show->state.depth_to_show to |
374 | * the depth at which we encountered it. When we have completed the |
375 | * first pass, we will know if anything needs to be displayed if |
376 | * depth_to_show > depth. See btf_[struct,array]_show() for the |
377 | * implementation of this. |
378 | * |
379 | * Another problem is we want to ensure the data for display is safe to |
380 | * access. To support this, the anonymous "struct {} obj" tracks the data |
381 | * object and our safe copy of it. We copy portions of the data needed |
382 | * to the object "copy" buffer, but because its size is limited to |
383 | * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we |
384 | * traverse larger objects for display. |
385 | * |
386 | * The various data type show functions all start with a call to |
387 | * btf_show_start_type() which returns a pointer to the safe copy |
388 | * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the |
389 | * raw data itself). btf_show_obj_safe() is responsible for |
390 | * using copy_from_kernel_nofault() to update the safe data if necessary |
391 | * as we traverse the object's data. skbuff-like semantics are |
392 | * used: |
393 | * |
394 | * - obj.head points to the start of the toplevel object for display |
395 | * - obj.size is the size of the toplevel object |
396 | * - obj.data points to the current point in the original data at |
397 | * which our safe data starts. obj.data will advance as we copy |
398 | * portions of the data. |
399 | * |
400 | * In most cases a single copy will suffice, but larger data structures |
401 | * such as "struct task_struct" will require many copies. The logic in |
402 | * btf_show_obj_safe() handles the logic that determines if a new |
403 | * copy_from_kernel_nofault() is needed. |
404 | */ |
405 | struct btf_show { |
406 | u64 flags; |
407 | void *target; /* target of show operation (seq file, buffer) */ |
408 | void (*showfn)(struct btf_show *show, const char *fmt, va_list args); |
409 | const struct btf *btf; |
410 | /* below are used during iteration */ |
411 | struct { |
412 | u8 depth; |
413 | u8 depth_to_show; |
414 | u8 depth_check; |
415 | u8 array_member:1, |
416 | array_terminated:1; |
417 | u16 array_encoding; |
418 | u32 type_id; |
419 | int status; /* non-zero for error */ |
420 | const struct btf_type *type; |
421 | const struct btf_member *member; |
422 | char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ |
423 | } state; |
424 | struct { |
425 | u32 size; |
426 | void *head; |
427 | void *data; |
428 | u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; |
429 | } obj; |
430 | }; |
431 | |
432 | struct btf_kind_operations { |
433 | s32 (*check_meta)(struct btf_verifier_env *env, |
434 | const struct btf_type *t, |
435 | u32 meta_left); |
436 | int (*resolve)(struct btf_verifier_env *env, |
437 | const struct resolve_vertex *v); |
438 | int (*check_member)(struct btf_verifier_env *env, |
439 | const struct btf_type *struct_type, |
440 | const struct btf_member *member, |
441 | const struct btf_type *member_type); |
442 | int (*check_kflag_member)(struct btf_verifier_env *env, |
443 | const struct btf_type *struct_type, |
444 | const struct btf_member *member, |
445 | const struct btf_type *member_type); |
446 | void (*log_details)(struct btf_verifier_env *env, |
447 | const struct btf_type *t); |
448 | void (*show)(const struct btf *btf, const struct btf_type *t, |
449 | u32 type_id, void *data, u8 bits_offsets, |
450 | struct btf_show *show); |
451 | }; |
452 | |
453 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; |
454 | static struct btf_type btf_void; |
455 | |
456 | static int btf_resolve(struct btf_verifier_env *env, |
457 | const struct btf_type *t, u32 type_id); |
458 | |
459 | static int btf_func_check(struct btf_verifier_env *env, |
460 | const struct btf_type *t); |
461 | |
462 | static bool btf_type_is_modifier(const struct btf_type *t) |
463 | { |
464 | /* Some of them is not strictly a C modifier |
465 | * but they are grouped into the same bucket |
466 | * for BTF concern: |
467 | * A type (t) that refers to another |
468 | * type through t->type AND its size cannot |
469 | * be determined without following the t->type. |
470 | * |
471 | * ptr does not fall into this bucket |
472 | * because its size is always sizeof(void *). |
473 | */ |
474 | switch (BTF_INFO_KIND(t->info)) { |
475 | case BTF_KIND_TYPEDEF: |
476 | case BTF_KIND_VOLATILE: |
477 | case BTF_KIND_CONST: |
478 | case BTF_KIND_RESTRICT: |
479 | case BTF_KIND_TYPE_TAG: |
480 | return true; |
481 | } |
482 | |
483 | return false; |
484 | } |
485 | |
486 | bool btf_type_is_void(const struct btf_type *t) |
487 | { |
488 | return t == &btf_void; |
489 | } |
490 | |
491 | static bool btf_type_is_fwd(const struct btf_type *t) |
492 | { |
493 | return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; |
494 | } |
495 | |
496 | static bool btf_type_is_datasec(const struct btf_type *t) |
497 | { |
498 | return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; |
499 | } |
500 | |
501 | static bool btf_type_is_decl_tag(const struct btf_type *t) |
502 | { |
503 | return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; |
504 | } |
505 | |
506 | static bool btf_type_nosize(const struct btf_type *t) |
507 | { |
508 | return btf_type_is_void(t) || btf_type_is_fwd(t) || |
509 | btf_type_is_func(t) || btf_type_is_func_proto(t) || |
510 | btf_type_is_decl_tag(t); |
511 | } |
512 | |
513 | static bool btf_type_nosize_or_null(const struct btf_type *t) |
514 | { |
515 | return !t || btf_type_nosize(t); |
516 | } |
517 | |
518 | static bool btf_type_is_decl_tag_target(const struct btf_type *t) |
519 | { |
520 | return btf_type_is_func(t) || btf_type_is_struct(t) || |
521 | btf_type_is_var(t) || btf_type_is_typedef(t); |
522 | } |
523 | |
524 | u32 btf_nr_types(const struct btf *btf) |
525 | { |
526 | u32 total = 0; |
527 | |
528 | while (btf) { |
529 | total += btf->nr_types; |
530 | btf = btf->base_btf; |
531 | } |
532 | |
533 | return total; |
534 | } |
535 | |
536 | s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) |
537 | { |
538 | const struct btf_type *t; |
539 | const char *tname; |
540 | u32 i, total; |
541 | |
542 | total = btf_nr_types(btf); |
543 | for (i = 1; i < total; i++) { |
544 | t = btf_type_by_id(btf, type_id: i); |
545 | if (BTF_INFO_KIND(t->info) != kind) |
546 | continue; |
547 | |
548 | tname = btf_name_by_offset(btf, offset: t->name_off); |
549 | if (!strcmp(tname, name)) |
550 | return i; |
551 | } |
552 | |
553 | return -ENOENT; |
554 | } |
555 | |
556 | s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) |
557 | { |
558 | struct btf *btf; |
559 | s32 ret; |
560 | int id; |
561 | |
562 | btf = bpf_get_btf_vmlinux(); |
563 | if (IS_ERR(ptr: btf)) |
564 | return PTR_ERR(ptr: btf); |
565 | if (!btf) |
566 | return -EINVAL; |
567 | |
568 | ret = btf_find_by_name_kind(btf, name, kind); |
569 | /* ret is never zero, since btf_find_by_name_kind returns |
570 | * positive btf_id or negative error. |
571 | */ |
572 | if (ret > 0) { |
573 | btf_get(btf); |
574 | *btf_p = btf; |
575 | return ret; |
576 | } |
577 | |
578 | /* If name is not found in vmlinux's BTF then search in module's BTFs */ |
579 | spin_lock_bh(lock: &btf_idr_lock); |
580 | idr_for_each_entry(&btf_idr, btf, id) { |
581 | if (!btf_is_module(btf)) |
582 | continue; |
583 | /* linear search could be slow hence unlock/lock |
584 | * the IDR to avoiding holding it for too long |
585 | */ |
586 | btf_get(btf); |
587 | spin_unlock_bh(lock: &btf_idr_lock); |
588 | ret = btf_find_by_name_kind(btf, name, kind); |
589 | if (ret > 0) { |
590 | *btf_p = btf; |
591 | return ret; |
592 | } |
593 | btf_put(btf); |
594 | spin_lock_bh(lock: &btf_idr_lock); |
595 | } |
596 | spin_unlock_bh(lock: &btf_idr_lock); |
597 | return ret; |
598 | } |
599 | |
600 | const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, |
601 | u32 id, u32 *res_id) |
602 | { |
603 | const struct btf_type *t = btf_type_by_id(btf, type_id: id); |
604 | |
605 | while (btf_type_is_modifier(t)) { |
606 | id = t->type; |
607 | t = btf_type_by_id(btf, type_id: t->type); |
608 | } |
609 | |
610 | if (res_id) |
611 | *res_id = id; |
612 | |
613 | return t; |
614 | } |
615 | |
616 | const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, |
617 | u32 id, u32 *res_id) |
618 | { |
619 | const struct btf_type *t; |
620 | |
621 | t = btf_type_skip_modifiers(btf, id, NULL); |
622 | if (!btf_type_is_ptr(t)) |
623 | return NULL; |
624 | |
625 | return btf_type_skip_modifiers(btf, id: t->type, res_id); |
626 | } |
627 | |
628 | const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, |
629 | u32 id, u32 *res_id) |
630 | { |
631 | const struct btf_type *ptype; |
632 | |
633 | ptype = btf_type_resolve_ptr(btf, id, res_id); |
634 | if (ptype && btf_type_is_func_proto(t: ptype)) |
635 | return ptype; |
636 | |
637 | return NULL; |
638 | } |
639 | |
640 | /* Types that act only as a source, not sink or intermediate |
641 | * type when resolving. |
642 | */ |
643 | static bool btf_type_is_resolve_source_only(const struct btf_type *t) |
644 | { |
645 | return btf_type_is_var(t) || |
646 | btf_type_is_decl_tag(t) || |
647 | btf_type_is_datasec(t); |
648 | } |
649 | |
650 | /* What types need to be resolved? |
651 | * |
652 | * btf_type_is_modifier() is an obvious one. |
653 | * |
654 | * btf_type_is_struct() because its member refers to |
655 | * another type (through member->type). |
656 | * |
657 | * btf_type_is_var() because the variable refers to |
658 | * another type. btf_type_is_datasec() holds multiple |
659 | * btf_type_is_var() types that need resolving. |
660 | * |
661 | * btf_type_is_array() because its element (array->type) |
662 | * refers to another type. Array can be thought of a |
663 | * special case of struct while array just has the same |
664 | * member-type repeated by array->nelems of times. |
665 | */ |
666 | static bool btf_type_needs_resolve(const struct btf_type *t) |
667 | { |
668 | return btf_type_is_modifier(t) || |
669 | btf_type_is_ptr(t) || |
670 | btf_type_is_struct(t) || |
671 | btf_type_is_array(t) || |
672 | btf_type_is_var(t) || |
673 | btf_type_is_func(t) || |
674 | btf_type_is_decl_tag(t) || |
675 | btf_type_is_datasec(t); |
676 | } |
677 | |
678 | /* t->size can be used */ |
679 | static bool btf_type_has_size(const struct btf_type *t) |
680 | { |
681 | switch (BTF_INFO_KIND(t->info)) { |
682 | case BTF_KIND_INT: |
683 | case BTF_KIND_STRUCT: |
684 | case BTF_KIND_UNION: |
685 | case BTF_KIND_ENUM: |
686 | case BTF_KIND_DATASEC: |
687 | case BTF_KIND_FLOAT: |
688 | case BTF_KIND_ENUM64: |
689 | return true; |
690 | } |
691 | |
692 | return false; |
693 | } |
694 | |
695 | static const char *btf_int_encoding_str(u8 encoding) |
696 | { |
697 | if (encoding == 0) |
698 | return "(none)" ; |
699 | else if (encoding == BTF_INT_SIGNED) |
700 | return "SIGNED" ; |
701 | else if (encoding == BTF_INT_CHAR) |
702 | return "CHAR" ; |
703 | else if (encoding == BTF_INT_BOOL) |
704 | return "BOOL" ; |
705 | else |
706 | return "UNKN" ; |
707 | } |
708 | |
709 | static u32 btf_type_int(const struct btf_type *t) |
710 | { |
711 | return *(u32 *)(t + 1); |
712 | } |
713 | |
714 | static const struct btf_array *btf_type_array(const struct btf_type *t) |
715 | { |
716 | return (const struct btf_array *)(t + 1); |
717 | } |
718 | |
719 | static const struct btf_enum *btf_type_enum(const struct btf_type *t) |
720 | { |
721 | return (const struct btf_enum *)(t + 1); |
722 | } |
723 | |
724 | static const struct btf_var *btf_type_var(const struct btf_type *t) |
725 | { |
726 | return (const struct btf_var *)(t + 1); |
727 | } |
728 | |
729 | static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) |
730 | { |
731 | return (const struct btf_decl_tag *)(t + 1); |
732 | } |
733 | |
734 | static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t) |
735 | { |
736 | return (const struct btf_enum64 *)(t + 1); |
737 | } |
738 | |
739 | static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) |
740 | { |
741 | return kind_ops[BTF_INFO_KIND(t->info)]; |
742 | } |
743 | |
744 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
745 | { |
746 | if (!BTF_STR_OFFSET_VALID(offset)) |
747 | return false; |
748 | |
749 | while (offset < btf->start_str_off) |
750 | btf = btf->base_btf; |
751 | |
752 | offset -= btf->start_str_off; |
753 | return offset < btf->hdr.str_len; |
754 | } |
755 | |
756 | static bool __btf_name_char_ok(char c, bool first) |
757 | { |
758 | if ((first ? !isalpha(c) : |
759 | !isalnum(c)) && |
760 | c != '_' && |
761 | c != '.') |
762 | return false; |
763 | return true; |
764 | } |
765 | |
766 | static const char *btf_str_by_offset(const struct btf *btf, u32 offset) |
767 | { |
768 | while (offset < btf->start_str_off) |
769 | btf = btf->base_btf; |
770 | |
771 | offset -= btf->start_str_off; |
772 | if (offset < btf->hdr.str_len) |
773 | return &btf->strings[offset]; |
774 | |
775 | return NULL; |
776 | } |
777 | |
778 | static bool __btf_name_valid(const struct btf *btf, u32 offset) |
779 | { |
780 | /* offset must be valid */ |
781 | const char *src = btf_str_by_offset(btf, offset); |
782 | const char *src_limit; |
783 | |
784 | if (!__btf_name_char_ok(c: *src, first: true)) |
785 | return false; |
786 | |
787 | /* set a limit on identifier length */ |
788 | src_limit = src + KSYM_NAME_LEN; |
789 | src++; |
790 | while (*src && src < src_limit) { |
791 | if (!__btf_name_char_ok(c: *src, first: false)) |
792 | return false; |
793 | src++; |
794 | } |
795 | |
796 | return !*src; |
797 | } |
798 | |
799 | static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) |
800 | { |
801 | return __btf_name_valid(btf, offset); |
802 | } |
803 | |
804 | static bool btf_name_valid_section(const struct btf *btf, u32 offset) |
805 | { |
806 | return __btf_name_valid(btf, offset); |
807 | } |
808 | |
809 | static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) |
810 | { |
811 | const char *name; |
812 | |
813 | if (!offset) |
814 | return "(anon)" ; |
815 | |
816 | name = btf_str_by_offset(btf, offset); |
817 | return name ?: "(invalid-name-offset)" ; |
818 | } |
819 | |
820 | const char *btf_name_by_offset(const struct btf *btf, u32 offset) |
821 | { |
822 | return btf_str_by_offset(btf, offset); |
823 | } |
824 | |
825 | const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) |
826 | { |
827 | while (type_id < btf->start_id) |
828 | btf = btf->base_btf; |
829 | |
830 | type_id -= btf->start_id; |
831 | if (type_id >= btf->nr_types) |
832 | return NULL; |
833 | return btf->types[type_id]; |
834 | } |
835 | EXPORT_SYMBOL_GPL(btf_type_by_id); |
836 | |
837 | /* |
838 | * Regular int is not a bit field and it must be either |
839 | * u8/u16/u32/u64 or __int128. |
840 | */ |
841 | static bool btf_type_int_is_regular(const struct btf_type *t) |
842 | { |
843 | u8 nr_bits, nr_bytes; |
844 | u32 int_data; |
845 | |
846 | int_data = btf_type_int(t); |
847 | nr_bits = BTF_INT_BITS(int_data); |
848 | nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); |
849 | if (BITS_PER_BYTE_MASKED(nr_bits) || |
850 | BTF_INT_OFFSET(int_data) || |
851 | (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && |
852 | nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && |
853 | nr_bytes != (2 * sizeof(u64)))) { |
854 | return false; |
855 | } |
856 | |
857 | return true; |
858 | } |
859 | |
860 | /* |
861 | * Check that given struct member is a regular int with expected |
862 | * offset and size. |
863 | */ |
864 | bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, |
865 | const struct btf_member *m, |
866 | u32 expected_offset, u32 expected_size) |
867 | { |
868 | const struct btf_type *t; |
869 | u32 id, int_data; |
870 | u8 nr_bits; |
871 | |
872 | id = m->type; |
873 | t = btf_type_id_size(btf, type_id: &id, NULL); |
874 | if (!t || !btf_type_is_int(t)) |
875 | return false; |
876 | |
877 | int_data = btf_type_int(t); |
878 | nr_bits = BTF_INT_BITS(int_data); |
879 | if (btf_type_kflag(t: s)) { |
880 | u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); |
881 | u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); |
882 | |
883 | /* if kflag set, int should be a regular int and |
884 | * bit offset should be at byte boundary. |
885 | */ |
886 | return !bitfield_size && |
887 | BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && |
888 | BITS_ROUNDUP_BYTES(nr_bits) == expected_size; |
889 | } |
890 | |
891 | if (BTF_INT_OFFSET(int_data) || |
892 | BITS_PER_BYTE_MASKED(m->offset) || |
893 | BITS_ROUNDUP_BYTES(m->offset) != expected_offset || |
894 | BITS_PER_BYTE_MASKED(nr_bits) || |
895 | BITS_ROUNDUP_BYTES(nr_bits) != expected_size) |
896 | return false; |
897 | |
898 | return true; |
899 | } |
900 | |
901 | /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ |
902 | static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, |
903 | u32 id) |
904 | { |
905 | const struct btf_type *t = btf_type_by_id(btf, id); |
906 | |
907 | while (btf_type_is_modifier(t) && |
908 | BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { |
909 | t = btf_type_by_id(btf, t->type); |
910 | } |
911 | |
912 | return t; |
913 | } |
914 | |
915 | #define BTF_SHOW_MAX_ITER 10 |
916 | |
917 | #define BTF_KIND_BIT(kind) (1ULL << kind) |
918 | |
919 | /* |
920 | * Populate show->state.name with type name information. |
921 | * Format of type name is |
922 | * |
923 | * [.member_name = ] (type_name) |
924 | */ |
925 | static const char *btf_show_name(struct btf_show *show) |
926 | { |
927 | /* BTF_MAX_ITER array suffixes "[]" */ |
928 | const char *array_suffixes = "[][][][][][][][][][]" ; |
929 | const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; |
930 | /* BTF_MAX_ITER pointer suffixes "*" */ |
931 | const char *ptr_suffixes = "**********" ; |
932 | const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; |
933 | const char *name = NULL, *prefix = "" , *parens = "" ; |
934 | const struct btf_member *m = show->state.member; |
935 | const struct btf_type *t; |
936 | const struct btf_array *array; |
937 | u32 id = show->state.type_id; |
938 | const char *member = NULL; |
939 | bool show_member = false; |
940 | u64 kinds = 0; |
941 | int i; |
942 | |
943 | show->state.name[0] = '\0'; |
944 | |
945 | /* |
946 | * Don't show type name if we're showing an array member; |
947 | * in that case we show the array type so don't need to repeat |
948 | * ourselves for each member. |
949 | */ |
950 | if (show->state.array_member) |
951 | return "" ; |
952 | |
953 | /* Retrieve member name, if any. */ |
954 | if (m) { |
955 | member = btf_name_by_offset(btf: show->btf, offset: m->name_off); |
956 | show_member = strlen(member) > 0; |
957 | id = m->type; |
958 | } |
959 | |
960 | /* |
961 | * Start with type_id, as we have resolved the struct btf_type * |
962 | * via btf_modifier_show() past the parent typedef to the child |
963 | * struct, int etc it is defined as. In such cases, the type_id |
964 | * still represents the starting type while the struct btf_type * |
965 | * in our show->state points at the resolved type of the typedef. |
966 | */ |
967 | t = btf_type_by_id(show->btf, id); |
968 | if (!t) |
969 | return "" ; |
970 | |
971 | /* |
972 | * The goal here is to build up the right number of pointer and |
973 | * array suffixes while ensuring the type name for a typedef |
974 | * is represented. Along the way we accumulate a list of |
975 | * BTF kinds we have encountered, since these will inform later |
976 | * display; for example, pointer types will not require an |
977 | * opening "{" for struct, we will just display the pointer value. |
978 | * |
979 | * We also want to accumulate the right number of pointer or array |
980 | * indices in the format string while iterating until we get to |
981 | * the typedef/pointee/array member target type. |
982 | * |
983 | * We start by pointing at the end of pointer and array suffix |
984 | * strings; as we accumulate pointers and arrays we move the pointer |
985 | * or array string backwards so it will show the expected number of |
986 | * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers |
987 | * and/or arrays and typedefs are supported as a precaution. |
988 | * |
989 | * We also want to get typedef name while proceeding to resolve |
990 | * type it points to so that we can add parentheses if it is a |
991 | * "typedef struct" etc. |
992 | */ |
993 | for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { |
994 | |
995 | switch (BTF_INFO_KIND(t->info)) { |
996 | case BTF_KIND_TYPEDEF: |
997 | if (!name) |
998 | name = btf_name_by_offset(btf: show->btf, |
999 | offset: t->name_off); |
1000 | kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); |
1001 | id = t->type; |
1002 | break; |
1003 | case BTF_KIND_ARRAY: |
1004 | kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); |
1005 | parens = "[" ; |
1006 | if (!t) |
1007 | return "" ; |
1008 | array = btf_type_array(t); |
1009 | if (array_suffix > array_suffixes) |
1010 | array_suffix -= 2; |
1011 | id = array->type; |
1012 | break; |
1013 | case BTF_KIND_PTR: |
1014 | kinds |= BTF_KIND_BIT(BTF_KIND_PTR); |
1015 | if (ptr_suffix > ptr_suffixes) |
1016 | ptr_suffix -= 1; |
1017 | id = t->type; |
1018 | break; |
1019 | default: |
1020 | id = 0; |
1021 | break; |
1022 | } |
1023 | if (!id) |
1024 | break; |
1025 | t = btf_type_skip_qualifiers(btf: show->btf, id); |
1026 | } |
1027 | /* We may not be able to represent this type; bail to be safe */ |
1028 | if (i == BTF_SHOW_MAX_ITER) |
1029 | return "" ; |
1030 | |
1031 | if (!name) |
1032 | name = btf_name_by_offset(btf: show->btf, offset: t->name_off); |
1033 | |
1034 | switch (BTF_INFO_KIND(t->info)) { |
1035 | case BTF_KIND_STRUCT: |
1036 | case BTF_KIND_UNION: |
1037 | prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? |
1038 | "struct" : "union" ; |
1039 | /* if it's an array of struct/union, parens is already set */ |
1040 | if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) |
1041 | parens = "{" ; |
1042 | break; |
1043 | case BTF_KIND_ENUM: |
1044 | case BTF_KIND_ENUM64: |
1045 | prefix = "enum" ; |
1046 | break; |
1047 | default: |
1048 | break; |
1049 | } |
1050 | |
1051 | /* pointer does not require parens */ |
1052 | if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) |
1053 | parens = "" ; |
1054 | /* typedef does not require struct/union/enum prefix */ |
1055 | if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) |
1056 | prefix = "" ; |
1057 | |
1058 | if (!name) |
1059 | name = "" ; |
1060 | |
1061 | /* Even if we don't want type name info, we want parentheses etc */ |
1062 | if (show->flags & BTF_SHOW_NONAME) |
1063 | snprintf(buf: show->state.name, size: sizeof(show->state.name), fmt: "%s" , |
1064 | parens); |
1065 | else |
1066 | snprintf(buf: show->state.name, size: sizeof(show->state.name), |
1067 | fmt: "%s%s%s(%s%s%s%s%s%s)%s" , |
1068 | /* first 3 strings comprise ".member = " */ |
1069 | show_member ? "." : "" , |
1070 | show_member ? member : "" , |
1071 | show_member ? " = " : "" , |
1072 | /* ...next is our prefix (struct, enum, etc) */ |
1073 | prefix, |
1074 | strlen(prefix) > 0 && strlen(name) > 0 ? " " : "" , |
1075 | /* ...this is the type name itself */ |
1076 | name, |
1077 | /* ...suffixed by the appropriate '*', '[]' suffixes */ |
1078 | strlen(ptr_suffix) > 0 ? " " : "" , ptr_suffix, |
1079 | array_suffix, parens); |
1080 | |
1081 | return show->state.name; |
1082 | } |
1083 | |
1084 | static const char *__btf_show_indent(struct btf_show *show) |
1085 | { |
1086 | const char *indents = " " ; |
1087 | const char *indent = &indents[strlen(indents)]; |
1088 | |
1089 | if ((indent - show->state.depth) >= indents) |
1090 | return indent - show->state.depth; |
1091 | return indents; |
1092 | } |
1093 | |
1094 | static const char *btf_show_indent(struct btf_show *show) |
1095 | { |
1096 | return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); |
1097 | } |
1098 | |
1099 | static const char *btf_show_newline(struct btf_show *show) |
1100 | { |
1101 | return show->flags & BTF_SHOW_COMPACT ? "" : "\n" ; |
1102 | } |
1103 | |
1104 | static const char *btf_show_delim(struct btf_show *show) |
1105 | { |
1106 | if (show->state.depth == 0) |
1107 | return "" ; |
1108 | |
1109 | if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && |
1110 | BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) |
1111 | return "|" ; |
1112 | |
1113 | return "," ; |
1114 | } |
1115 | |
1116 | __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) |
1117 | { |
1118 | va_list args; |
1119 | |
1120 | if (!show->state.depth_check) { |
1121 | va_start(args, fmt); |
1122 | show->showfn(show, fmt, args); |
1123 | va_end(args); |
1124 | } |
1125 | } |
1126 | |
1127 | /* Macros are used here as btf_show_type_value[s]() prepends and appends |
1128 | * format specifiers to the format specifier passed in; these do the work of |
1129 | * adding indentation, delimiters etc while the caller simply has to specify |
1130 | * the type value(s) in the format specifier + value(s). |
1131 | */ |
1132 | #define btf_show_type_value(show, fmt, value) \ |
1133 | do { \ |
1134 | if ((value) != (__typeof__(value))0 || \ |
1135 | (show->flags & BTF_SHOW_ZERO) || \ |
1136 | show->state.depth == 0) { \ |
1137 | btf_show(show, "%s%s" fmt "%s%s", \ |
1138 | btf_show_indent(show), \ |
1139 | btf_show_name(show), \ |
1140 | value, btf_show_delim(show), \ |
1141 | btf_show_newline(show)); \ |
1142 | if (show->state.depth > show->state.depth_to_show) \ |
1143 | show->state.depth_to_show = show->state.depth; \ |
1144 | } \ |
1145 | } while (0) |
1146 | |
1147 | #define btf_show_type_values(show, fmt, ...) \ |
1148 | do { \ |
1149 | btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ |
1150 | btf_show_name(show), \ |
1151 | __VA_ARGS__, btf_show_delim(show), \ |
1152 | btf_show_newline(show)); \ |
1153 | if (show->state.depth > show->state.depth_to_show) \ |
1154 | show->state.depth_to_show = show->state.depth; \ |
1155 | } while (0) |
1156 | |
1157 | /* How much is left to copy to safe buffer after @data? */ |
1158 | static int btf_show_obj_size_left(struct btf_show *show, void *data) |
1159 | { |
1160 | return show->obj.head + show->obj.size - data; |
1161 | } |
1162 | |
1163 | /* Is object pointed to by @data of @size already copied to our safe buffer? */ |
1164 | static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) |
1165 | { |
1166 | return data >= show->obj.data && |
1167 | (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); |
1168 | } |
1169 | |
1170 | /* |
1171 | * If object pointed to by @data of @size falls within our safe buffer, return |
1172 | * the equivalent pointer to the same safe data. Assumes |
1173 | * copy_from_kernel_nofault() has already happened and our safe buffer is |
1174 | * populated. |
1175 | */ |
1176 | static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) |
1177 | { |
1178 | if (btf_show_obj_is_safe(show, data, size)) |
1179 | return show->obj.safe + (data - show->obj.data); |
1180 | return NULL; |
1181 | } |
1182 | |
1183 | /* |
1184 | * Return a safe-to-access version of data pointed to by @data. |
1185 | * We do this by copying the relevant amount of information |
1186 | * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). |
1187 | * |
1188 | * If BTF_SHOW_UNSAFE is specified, just return data as-is; no |
1189 | * safe copy is needed. |
1190 | * |
1191 | * Otherwise we need to determine if we have the required amount |
1192 | * of data (determined by the @data pointer and the size of the |
1193 | * largest base type we can encounter (represented by |
1194 | * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures |
1195 | * that we will be able to print some of the current object, |
1196 | * and if more is needed a copy will be triggered. |
1197 | * Some objects such as structs will not fit into the buffer; |
1198 | * in such cases additional copies when we iterate over their |
1199 | * members may be needed. |
1200 | * |
1201 | * btf_show_obj_safe() is used to return a safe buffer for |
1202 | * btf_show_start_type(); this ensures that as we recurse into |
1203 | * nested types we always have safe data for the given type. |
1204 | * This approach is somewhat wasteful; it's possible for example |
1205 | * that when iterating over a large union we'll end up copying the |
1206 | * same data repeatedly, but the goal is safety not performance. |
1207 | * We use stack data as opposed to per-CPU buffers because the |
1208 | * iteration over a type can take some time, and preemption handling |
1209 | * would greatly complicate use of the safe buffer. |
1210 | */ |
1211 | static void *btf_show_obj_safe(struct btf_show *show, |
1212 | const struct btf_type *t, |
1213 | void *data) |
1214 | { |
1215 | const struct btf_type *rt; |
1216 | int size_left, size; |
1217 | void *safe = NULL; |
1218 | |
1219 | if (show->flags & BTF_SHOW_UNSAFE) |
1220 | return data; |
1221 | |
1222 | rt = btf_resolve_size(btf: show->btf, type: t, type_size: &size); |
1223 | if (IS_ERR(ptr: rt)) { |
1224 | show->state.status = PTR_ERR(ptr: rt); |
1225 | return NULL; |
1226 | } |
1227 | |
1228 | /* |
1229 | * Is this toplevel object? If so, set total object size and |
1230 | * initialize pointers. Otherwise check if we still fall within |
1231 | * our safe object data. |
1232 | */ |
1233 | if (show->state.depth == 0) { |
1234 | show->obj.size = size; |
1235 | show->obj.head = data; |
1236 | } else { |
1237 | /* |
1238 | * If the size of the current object is > our remaining |
1239 | * safe buffer we _may_ need to do a new copy. However |
1240 | * consider the case of a nested struct; it's size pushes |
1241 | * us over the safe buffer limit, but showing any individual |
1242 | * struct members does not. In such cases, we don't need |
1243 | * to initiate a fresh copy yet; however we definitely need |
1244 | * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left |
1245 | * in our buffer, regardless of the current object size. |
1246 | * The logic here is that as we resolve types we will |
1247 | * hit a base type at some point, and we need to be sure |
1248 | * the next chunk of data is safely available to display |
1249 | * that type info safely. We cannot rely on the size of |
1250 | * the current object here because it may be much larger |
1251 | * than our current buffer (e.g. task_struct is 8k). |
1252 | * All we want to do here is ensure that we can print the |
1253 | * next basic type, which we can if either |
1254 | * - the current type size is within the safe buffer; or |
1255 | * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in |
1256 | * the safe buffer. |
1257 | */ |
1258 | safe = __btf_show_obj_safe(show, data, |
1259 | min(size, |
1260 | BTF_SHOW_OBJ_BASE_TYPE_SIZE)); |
1261 | } |
1262 | |
1263 | /* |
1264 | * We need a new copy to our safe object, either because we haven't |
1265 | * yet copied and are initializing safe data, or because the data |
1266 | * we want falls outside the boundaries of the safe object. |
1267 | */ |
1268 | if (!safe) { |
1269 | size_left = btf_show_obj_size_left(show, data); |
1270 | if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) |
1271 | size_left = BTF_SHOW_OBJ_SAFE_SIZE; |
1272 | show->state.status = copy_from_kernel_nofault(dst: show->obj.safe, |
1273 | src: data, size: size_left); |
1274 | if (!show->state.status) { |
1275 | show->obj.data = data; |
1276 | safe = show->obj.safe; |
1277 | } |
1278 | } |
1279 | |
1280 | return safe; |
1281 | } |
1282 | |
1283 | /* |
1284 | * Set the type we are starting to show and return a safe data pointer |
1285 | * to be used for showing the associated data. |
1286 | */ |
1287 | static void *btf_show_start_type(struct btf_show *show, |
1288 | const struct btf_type *t, |
1289 | u32 type_id, void *data) |
1290 | { |
1291 | show->state.type = t; |
1292 | show->state.type_id = type_id; |
1293 | show->state.name[0] = '\0'; |
1294 | |
1295 | return btf_show_obj_safe(show, t, data); |
1296 | } |
1297 | |
1298 | static void btf_show_end_type(struct btf_show *show) |
1299 | { |
1300 | show->state.type = NULL; |
1301 | show->state.type_id = 0; |
1302 | show->state.name[0] = '\0'; |
1303 | } |
1304 | |
1305 | static void *btf_show_start_aggr_type(struct btf_show *show, |
1306 | const struct btf_type *t, |
1307 | u32 type_id, void *data) |
1308 | { |
1309 | void *safe_data = btf_show_start_type(show, t, type_id, data); |
1310 | |
1311 | if (!safe_data) |
1312 | return safe_data; |
1313 | |
1314 | btf_show(show, fmt: "%s%s%s" , btf_show_indent(show), |
1315 | btf_show_name(show), |
1316 | btf_show_newline(show)); |
1317 | show->state.depth++; |
1318 | return safe_data; |
1319 | } |
1320 | |
1321 | static void btf_show_end_aggr_type(struct btf_show *show, |
1322 | const char *suffix) |
1323 | { |
1324 | show->state.depth--; |
1325 | btf_show(show, fmt: "%s%s%s%s" , btf_show_indent(show), suffix, |
1326 | btf_show_delim(show), btf_show_newline(show)); |
1327 | btf_show_end_type(show); |
1328 | } |
1329 | |
1330 | static void btf_show_start_member(struct btf_show *show, |
1331 | const struct btf_member *m) |
1332 | { |
1333 | show->state.member = m; |
1334 | } |
1335 | |
1336 | static void btf_show_start_array_member(struct btf_show *show) |
1337 | { |
1338 | show->state.array_member = 1; |
1339 | btf_show_start_member(show, NULL); |
1340 | } |
1341 | |
1342 | static void btf_show_end_member(struct btf_show *show) |
1343 | { |
1344 | show->state.member = NULL; |
1345 | } |
1346 | |
1347 | static void btf_show_end_array_member(struct btf_show *show) |
1348 | { |
1349 | show->state.array_member = 0; |
1350 | btf_show_end_member(show); |
1351 | } |
1352 | |
1353 | static void *btf_show_start_array_type(struct btf_show *show, |
1354 | const struct btf_type *t, |
1355 | u32 type_id, |
1356 | u16 array_encoding, |
1357 | void *data) |
1358 | { |
1359 | show->state.array_encoding = array_encoding; |
1360 | show->state.array_terminated = 0; |
1361 | return btf_show_start_aggr_type(show, t, type_id, data); |
1362 | } |
1363 | |
1364 | static void btf_show_end_array_type(struct btf_show *show) |
1365 | { |
1366 | show->state.array_encoding = 0; |
1367 | show->state.array_terminated = 0; |
1368 | btf_show_end_aggr_type(show, suffix: "]" ); |
1369 | } |
1370 | |
1371 | static void *btf_show_start_struct_type(struct btf_show *show, |
1372 | const struct btf_type *t, |
1373 | u32 type_id, |
1374 | void *data) |
1375 | { |
1376 | return btf_show_start_aggr_type(show, t, type_id, data); |
1377 | } |
1378 | |
1379 | static void btf_show_end_struct_type(struct btf_show *show) |
1380 | { |
1381 | btf_show_end_aggr_type(show, suffix: "}" ); |
1382 | } |
1383 | |
1384 | __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, |
1385 | const char *fmt, ...) |
1386 | { |
1387 | va_list args; |
1388 | |
1389 | va_start(args, fmt); |
1390 | bpf_verifier_vlog(log, fmt, args); |
1391 | va_end(args); |
1392 | } |
1393 | |
1394 | __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, |
1395 | const char *fmt, ...) |
1396 | { |
1397 | struct bpf_verifier_log *log = &env->log; |
1398 | va_list args; |
1399 | |
1400 | if (!bpf_verifier_log_needed(log)) |
1401 | return; |
1402 | |
1403 | va_start(args, fmt); |
1404 | bpf_verifier_vlog(log, fmt, args); |
1405 | va_end(args); |
1406 | } |
1407 | |
1408 | __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, |
1409 | const struct btf_type *t, |
1410 | bool log_details, |
1411 | const char *fmt, ...) |
1412 | { |
1413 | struct bpf_verifier_log *log = &env->log; |
1414 | struct btf *btf = env->btf; |
1415 | va_list args; |
1416 | |
1417 | if (!bpf_verifier_log_needed(log)) |
1418 | return; |
1419 | |
1420 | if (log->level == BPF_LOG_KERNEL) { |
1421 | /* btf verifier prints all types it is processing via |
1422 | * btf_verifier_log_type(..., fmt = NULL). |
1423 | * Skip those prints for in-kernel BTF verification. |
1424 | */ |
1425 | if (!fmt) |
1426 | return; |
1427 | |
1428 | /* Skip logging when loading module BTF with mismatches permitted */ |
1429 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1430 | return; |
1431 | } |
1432 | |
1433 | __btf_verifier_log(log, fmt: "[%u] %s %s%s" , |
1434 | env->log_type_id, |
1435 | btf_type_str(t), |
1436 | __btf_name_by_offset(btf, offset: t->name_off), |
1437 | log_details ? " " : "" ); |
1438 | |
1439 | if (log_details) |
1440 | btf_type_ops(t)->log_details(env, t); |
1441 | |
1442 | if (fmt && *fmt) { |
1443 | __btf_verifier_log(log, fmt: " " ); |
1444 | va_start(args, fmt); |
1445 | bpf_verifier_vlog(log, fmt, args); |
1446 | va_end(args); |
1447 | } |
1448 | |
1449 | __btf_verifier_log(log, fmt: "\n" ); |
1450 | } |
1451 | |
1452 | #define btf_verifier_log_type(env, t, ...) \ |
1453 | __btf_verifier_log_type((env), (t), true, __VA_ARGS__) |
1454 | #define btf_verifier_log_basic(env, t, ...) \ |
1455 | __btf_verifier_log_type((env), (t), false, __VA_ARGS__) |
1456 | |
1457 | __printf(4, 5) |
1458 | static void btf_verifier_log_member(struct btf_verifier_env *env, |
1459 | const struct btf_type *struct_type, |
1460 | const struct btf_member *member, |
1461 | const char *fmt, ...) |
1462 | { |
1463 | struct bpf_verifier_log *log = &env->log; |
1464 | struct btf *btf = env->btf; |
1465 | va_list args; |
1466 | |
1467 | if (!bpf_verifier_log_needed(log)) |
1468 | return; |
1469 | |
1470 | if (log->level == BPF_LOG_KERNEL) { |
1471 | if (!fmt) |
1472 | return; |
1473 | |
1474 | /* Skip logging when loading module BTF with mismatches permitted */ |
1475 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1476 | return; |
1477 | } |
1478 | |
1479 | /* The CHECK_META phase already did a btf dump. |
1480 | * |
1481 | * If member is logged again, it must hit an error in |
1482 | * parsing this member. It is useful to print out which |
1483 | * struct this member belongs to. |
1484 | */ |
1485 | if (env->phase != CHECK_META) |
1486 | btf_verifier_log_type(env, struct_type, NULL); |
1487 | |
1488 | if (btf_type_kflag(t: struct_type)) |
1489 | __btf_verifier_log(log, |
1490 | fmt: "\t%s type_id=%u bitfield_size=%u bits_offset=%u" , |
1491 | __btf_name_by_offset(btf, offset: member->name_off), |
1492 | member->type, |
1493 | BTF_MEMBER_BITFIELD_SIZE(member->offset), |
1494 | BTF_MEMBER_BIT_OFFSET(member->offset)); |
1495 | else |
1496 | __btf_verifier_log(log, fmt: "\t%s type_id=%u bits_offset=%u" , |
1497 | __btf_name_by_offset(btf, offset: member->name_off), |
1498 | member->type, member->offset); |
1499 | |
1500 | if (fmt && *fmt) { |
1501 | __btf_verifier_log(log, fmt: " " ); |
1502 | va_start(args, fmt); |
1503 | bpf_verifier_vlog(log, fmt, args); |
1504 | va_end(args); |
1505 | } |
1506 | |
1507 | __btf_verifier_log(log, fmt: "\n" ); |
1508 | } |
1509 | |
1510 | __printf(4, 5) |
1511 | static void btf_verifier_log_vsi(struct btf_verifier_env *env, |
1512 | const struct btf_type *datasec_type, |
1513 | const struct btf_var_secinfo *vsi, |
1514 | const char *fmt, ...) |
1515 | { |
1516 | struct bpf_verifier_log *log = &env->log; |
1517 | va_list args; |
1518 | |
1519 | if (!bpf_verifier_log_needed(log)) |
1520 | return; |
1521 | if (log->level == BPF_LOG_KERNEL && !fmt) |
1522 | return; |
1523 | if (env->phase != CHECK_META) |
1524 | btf_verifier_log_type(env, datasec_type, NULL); |
1525 | |
1526 | __btf_verifier_log(log, fmt: "\t type_id=%u offset=%u size=%u" , |
1527 | vsi->type, vsi->offset, vsi->size); |
1528 | if (fmt && *fmt) { |
1529 | __btf_verifier_log(log, fmt: " " ); |
1530 | va_start(args, fmt); |
1531 | bpf_verifier_vlog(log, fmt, args); |
1532 | va_end(args); |
1533 | } |
1534 | |
1535 | __btf_verifier_log(log, fmt: "\n" ); |
1536 | } |
1537 | |
1538 | static void btf_verifier_log_hdr(struct btf_verifier_env *env, |
1539 | u32 btf_data_size) |
1540 | { |
1541 | struct bpf_verifier_log *log = &env->log; |
1542 | const struct btf *btf = env->btf; |
1543 | const struct btf_header *hdr; |
1544 | |
1545 | if (!bpf_verifier_log_needed(log)) |
1546 | return; |
1547 | |
1548 | if (log->level == BPF_LOG_KERNEL) |
1549 | return; |
1550 | hdr = &btf->hdr; |
1551 | __btf_verifier_log(log, fmt: "magic: 0x%x\n" , hdr->magic); |
1552 | __btf_verifier_log(log, fmt: "version: %u\n" , hdr->version); |
1553 | __btf_verifier_log(log, fmt: "flags: 0x%x\n" , hdr->flags); |
1554 | __btf_verifier_log(log, fmt: "hdr_len: %u\n" , hdr->hdr_len); |
1555 | __btf_verifier_log(log, fmt: "type_off: %u\n" , hdr->type_off); |
1556 | __btf_verifier_log(log, fmt: "type_len: %u\n" , hdr->type_len); |
1557 | __btf_verifier_log(log, fmt: "str_off: %u\n" , hdr->str_off); |
1558 | __btf_verifier_log(log, fmt: "str_len: %u\n" , hdr->str_len); |
1559 | __btf_verifier_log(log, fmt: "btf_total_size: %u\n" , btf_data_size); |
1560 | } |
1561 | |
1562 | static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) |
1563 | { |
1564 | struct btf *btf = env->btf; |
1565 | |
1566 | if (btf->types_size == btf->nr_types) { |
1567 | /* Expand 'types' array */ |
1568 | |
1569 | struct btf_type **new_types; |
1570 | u32 expand_by, new_size; |
1571 | |
1572 | if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { |
1573 | btf_verifier_log(env, fmt: "Exceeded max num of types" ); |
1574 | return -E2BIG; |
1575 | } |
1576 | |
1577 | expand_by = max_t(u32, btf->types_size >> 2, 16); |
1578 | new_size = min_t(u32, BTF_MAX_TYPE, |
1579 | btf->types_size + expand_by); |
1580 | |
1581 | new_types = kvcalloc(n: new_size, size: sizeof(*new_types), |
1582 | GFP_KERNEL | __GFP_NOWARN); |
1583 | if (!new_types) |
1584 | return -ENOMEM; |
1585 | |
1586 | if (btf->nr_types == 0) { |
1587 | if (!btf->base_btf) { |
1588 | /* lazily init VOID type */ |
1589 | new_types[0] = &btf_void; |
1590 | btf->nr_types++; |
1591 | } |
1592 | } else { |
1593 | memcpy(new_types, btf->types, |
1594 | sizeof(*btf->types) * btf->nr_types); |
1595 | } |
1596 | |
1597 | kvfree(addr: btf->types); |
1598 | btf->types = new_types; |
1599 | btf->types_size = new_size; |
1600 | } |
1601 | |
1602 | btf->types[btf->nr_types++] = t; |
1603 | |
1604 | return 0; |
1605 | } |
1606 | |
1607 | static int btf_alloc_id(struct btf *btf) |
1608 | { |
1609 | int id; |
1610 | |
1611 | idr_preload(GFP_KERNEL); |
1612 | spin_lock_bh(lock: &btf_idr_lock); |
1613 | id = idr_alloc_cyclic(&btf_idr, ptr: btf, start: 1, INT_MAX, GFP_ATOMIC); |
1614 | if (id > 0) |
1615 | btf->id = id; |
1616 | spin_unlock_bh(lock: &btf_idr_lock); |
1617 | idr_preload_end(); |
1618 | |
1619 | if (WARN_ON_ONCE(!id)) |
1620 | return -ENOSPC; |
1621 | |
1622 | return id > 0 ? 0 : id; |
1623 | } |
1624 | |
1625 | static void btf_free_id(struct btf *btf) |
1626 | { |
1627 | unsigned long flags; |
1628 | |
1629 | /* |
1630 | * In map-in-map, calling map_delete_elem() on outer |
1631 | * map will call bpf_map_put on the inner map. |
1632 | * It will then eventually call btf_free_id() |
1633 | * on the inner map. Some of the map_delete_elem() |
1634 | * implementation may have irq disabled, so |
1635 | * we need to use the _irqsave() version instead |
1636 | * of the _bh() version. |
1637 | */ |
1638 | spin_lock_irqsave(&btf_idr_lock, flags); |
1639 | idr_remove(&btf_idr, id: btf->id); |
1640 | spin_unlock_irqrestore(lock: &btf_idr_lock, flags); |
1641 | } |
1642 | |
1643 | static void btf_free_kfunc_set_tab(struct btf *btf) |
1644 | { |
1645 | struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; |
1646 | int hook; |
1647 | |
1648 | if (!tab) |
1649 | return; |
1650 | /* For module BTF, we directly assign the sets being registered, so |
1651 | * there is nothing to free except kfunc_set_tab. |
1652 | */ |
1653 | if (btf_is_module(btf)) |
1654 | goto free_tab; |
1655 | for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) |
1656 | kfree(objp: tab->sets[hook]); |
1657 | free_tab: |
1658 | kfree(objp: tab); |
1659 | btf->kfunc_set_tab = NULL; |
1660 | } |
1661 | |
1662 | static void btf_free_dtor_kfunc_tab(struct btf *btf) |
1663 | { |
1664 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
1665 | |
1666 | if (!tab) |
1667 | return; |
1668 | kfree(objp: tab); |
1669 | btf->dtor_kfunc_tab = NULL; |
1670 | } |
1671 | |
1672 | static void btf_struct_metas_free(struct btf_struct_metas *tab) |
1673 | { |
1674 | int i; |
1675 | |
1676 | if (!tab) |
1677 | return; |
1678 | for (i = 0; i < tab->cnt; i++) |
1679 | btf_record_free(rec: tab->types[i].record); |
1680 | kfree(objp: tab); |
1681 | } |
1682 | |
1683 | static void btf_free_struct_meta_tab(struct btf *btf) |
1684 | { |
1685 | struct btf_struct_metas *tab = btf->struct_meta_tab; |
1686 | |
1687 | btf_struct_metas_free(tab); |
1688 | btf->struct_meta_tab = NULL; |
1689 | } |
1690 | |
1691 | static void btf_free(struct btf *btf) |
1692 | { |
1693 | btf_free_struct_meta_tab(btf); |
1694 | btf_free_dtor_kfunc_tab(btf); |
1695 | btf_free_kfunc_set_tab(btf); |
1696 | kvfree(addr: btf->types); |
1697 | kvfree(addr: btf->resolved_sizes); |
1698 | kvfree(addr: btf->resolved_ids); |
1699 | kvfree(addr: btf->data); |
1700 | kfree(objp: btf); |
1701 | } |
1702 | |
1703 | static void btf_free_rcu(struct rcu_head *rcu) |
1704 | { |
1705 | struct btf *btf = container_of(rcu, struct btf, rcu); |
1706 | |
1707 | btf_free(btf); |
1708 | } |
1709 | |
1710 | void btf_get(struct btf *btf) |
1711 | { |
1712 | refcount_inc(r: &btf->refcnt); |
1713 | } |
1714 | |
1715 | void btf_put(struct btf *btf) |
1716 | { |
1717 | if (btf && refcount_dec_and_test(r: &btf->refcnt)) { |
1718 | btf_free_id(btf); |
1719 | call_rcu(head: &btf->rcu, func: btf_free_rcu); |
1720 | } |
1721 | } |
1722 | |
1723 | static int env_resolve_init(struct btf_verifier_env *env) |
1724 | { |
1725 | struct btf *btf = env->btf; |
1726 | u32 nr_types = btf->nr_types; |
1727 | u32 *resolved_sizes = NULL; |
1728 | u32 *resolved_ids = NULL; |
1729 | u8 *visit_states = NULL; |
1730 | |
1731 | resolved_sizes = kvcalloc(n: nr_types, size: sizeof(*resolved_sizes), |
1732 | GFP_KERNEL | __GFP_NOWARN); |
1733 | if (!resolved_sizes) |
1734 | goto nomem; |
1735 | |
1736 | resolved_ids = kvcalloc(n: nr_types, size: sizeof(*resolved_ids), |
1737 | GFP_KERNEL | __GFP_NOWARN); |
1738 | if (!resolved_ids) |
1739 | goto nomem; |
1740 | |
1741 | visit_states = kvcalloc(n: nr_types, size: sizeof(*visit_states), |
1742 | GFP_KERNEL | __GFP_NOWARN); |
1743 | if (!visit_states) |
1744 | goto nomem; |
1745 | |
1746 | btf->resolved_sizes = resolved_sizes; |
1747 | btf->resolved_ids = resolved_ids; |
1748 | env->visit_states = visit_states; |
1749 | |
1750 | return 0; |
1751 | |
1752 | nomem: |
1753 | kvfree(addr: resolved_sizes); |
1754 | kvfree(addr: resolved_ids); |
1755 | kvfree(addr: visit_states); |
1756 | return -ENOMEM; |
1757 | } |
1758 | |
1759 | static void btf_verifier_env_free(struct btf_verifier_env *env) |
1760 | { |
1761 | kvfree(addr: env->visit_states); |
1762 | kfree(objp: env); |
1763 | } |
1764 | |
1765 | static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, |
1766 | const struct btf_type *next_type) |
1767 | { |
1768 | switch (env->resolve_mode) { |
1769 | case RESOLVE_TBD: |
1770 | /* int, enum or void is a sink */ |
1771 | return !btf_type_needs_resolve(t: next_type); |
1772 | case RESOLVE_PTR: |
1773 | /* int, enum, void, struct, array, func or func_proto is a sink |
1774 | * for ptr |
1775 | */ |
1776 | return !btf_type_is_modifier(t: next_type) && |
1777 | !btf_type_is_ptr(t: next_type); |
1778 | case RESOLVE_STRUCT_OR_ARRAY: |
1779 | /* int, enum, void, ptr, func or func_proto is a sink |
1780 | * for struct and array |
1781 | */ |
1782 | return !btf_type_is_modifier(t: next_type) && |
1783 | !btf_type_is_array(t: next_type) && |
1784 | !btf_type_is_struct(t: next_type); |
1785 | default: |
1786 | BUG(); |
1787 | } |
1788 | } |
1789 | |
1790 | static bool env_type_is_resolved(const struct btf_verifier_env *env, |
1791 | u32 type_id) |
1792 | { |
1793 | /* base BTF types should be resolved by now */ |
1794 | if (type_id < env->btf->start_id) |
1795 | return true; |
1796 | |
1797 | return env->visit_states[type_id - env->btf->start_id] == RESOLVED; |
1798 | } |
1799 | |
1800 | static int env_stack_push(struct btf_verifier_env *env, |
1801 | const struct btf_type *t, u32 type_id) |
1802 | { |
1803 | const struct btf *btf = env->btf; |
1804 | struct resolve_vertex *v; |
1805 | |
1806 | if (env->top_stack == MAX_RESOLVE_DEPTH) |
1807 | return -E2BIG; |
1808 | |
1809 | if (type_id < btf->start_id |
1810 | || env->visit_states[type_id - btf->start_id] != NOT_VISITED) |
1811 | return -EEXIST; |
1812 | |
1813 | env->visit_states[type_id - btf->start_id] = VISITED; |
1814 | |
1815 | v = &env->stack[env->top_stack++]; |
1816 | v->t = t; |
1817 | v->type_id = type_id; |
1818 | v->next_member = 0; |
1819 | |
1820 | if (env->resolve_mode == RESOLVE_TBD) { |
1821 | if (btf_type_is_ptr(t)) |
1822 | env->resolve_mode = RESOLVE_PTR; |
1823 | else if (btf_type_is_struct(t) || btf_type_is_array(t)) |
1824 | env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; |
1825 | } |
1826 | |
1827 | return 0; |
1828 | } |
1829 | |
1830 | static void env_stack_set_next_member(struct btf_verifier_env *env, |
1831 | u16 next_member) |
1832 | { |
1833 | env->stack[env->top_stack - 1].next_member = next_member; |
1834 | } |
1835 | |
1836 | static void env_stack_pop_resolved(struct btf_verifier_env *env, |
1837 | u32 resolved_type_id, |
1838 | u32 resolved_size) |
1839 | { |
1840 | u32 type_id = env->stack[--(env->top_stack)].type_id; |
1841 | struct btf *btf = env->btf; |
1842 | |
1843 | type_id -= btf->start_id; /* adjust to local type id */ |
1844 | btf->resolved_sizes[type_id] = resolved_size; |
1845 | btf->resolved_ids[type_id] = resolved_type_id; |
1846 | env->visit_states[type_id] = RESOLVED; |
1847 | } |
1848 | |
1849 | static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) |
1850 | { |
1851 | return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; |
1852 | } |
1853 | |
1854 | /* Resolve the size of a passed-in "type" |
1855 | * |
1856 | * type: is an array (e.g. u32 array[x][y]) |
1857 | * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, |
1858 | * *type_size: (x * y * sizeof(u32)). Hence, *type_size always |
1859 | * corresponds to the return type. |
1860 | * *elem_type: u32 |
1861 | * *elem_id: id of u32 |
1862 | * *total_nelems: (x * y). Hence, individual elem size is |
1863 | * (*type_size / *total_nelems) |
1864 | * *type_id: id of type if it's changed within the function, 0 if not |
1865 | * |
1866 | * type: is not an array (e.g. const struct X) |
1867 | * return type: type "struct X" |
1868 | * *type_size: sizeof(struct X) |
1869 | * *elem_type: same as return type ("struct X") |
1870 | * *elem_id: 0 |
1871 | * *total_nelems: 1 |
1872 | * *type_id: id of type if it's changed within the function, 0 if not |
1873 | */ |
1874 | static const struct btf_type * |
1875 | __btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
1876 | u32 *type_size, const struct btf_type **elem_type, |
1877 | u32 *elem_id, u32 *total_nelems, u32 *type_id) |
1878 | { |
1879 | const struct btf_type *array_type = NULL; |
1880 | const struct btf_array *array = NULL; |
1881 | u32 i, size, nelems = 1, id = 0; |
1882 | |
1883 | for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { |
1884 | switch (BTF_INFO_KIND(type->info)) { |
1885 | /* type->size can be used */ |
1886 | case BTF_KIND_INT: |
1887 | case BTF_KIND_STRUCT: |
1888 | case BTF_KIND_UNION: |
1889 | case BTF_KIND_ENUM: |
1890 | case BTF_KIND_FLOAT: |
1891 | case BTF_KIND_ENUM64: |
1892 | size = type->size; |
1893 | goto resolved; |
1894 | |
1895 | case BTF_KIND_PTR: |
1896 | size = sizeof(void *); |
1897 | goto resolved; |
1898 | |
1899 | /* Modifiers */ |
1900 | case BTF_KIND_TYPEDEF: |
1901 | case BTF_KIND_VOLATILE: |
1902 | case BTF_KIND_CONST: |
1903 | case BTF_KIND_RESTRICT: |
1904 | case BTF_KIND_TYPE_TAG: |
1905 | id = type->type; |
1906 | type = btf_type_by_id(btf, type->type); |
1907 | break; |
1908 | |
1909 | case BTF_KIND_ARRAY: |
1910 | if (!array_type) |
1911 | array_type = type; |
1912 | array = btf_type_array(t: type); |
1913 | if (nelems && array->nelems > U32_MAX / nelems) |
1914 | return ERR_PTR(error: -EINVAL); |
1915 | nelems *= array->nelems; |
1916 | type = btf_type_by_id(btf, array->type); |
1917 | break; |
1918 | |
1919 | /* type without size */ |
1920 | default: |
1921 | return ERR_PTR(error: -EINVAL); |
1922 | } |
1923 | } |
1924 | |
1925 | return ERR_PTR(error: -EINVAL); |
1926 | |
1927 | resolved: |
1928 | if (nelems && size > U32_MAX / nelems) |
1929 | return ERR_PTR(error: -EINVAL); |
1930 | |
1931 | *type_size = nelems * size; |
1932 | if (total_nelems) |
1933 | *total_nelems = nelems; |
1934 | if (elem_type) |
1935 | *elem_type = type; |
1936 | if (elem_id) |
1937 | *elem_id = array ? array->type : 0; |
1938 | if (type_id && id) |
1939 | *type_id = id; |
1940 | |
1941 | return array_type ? : type; |
1942 | } |
1943 | |
1944 | const struct btf_type * |
1945 | btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
1946 | u32 *type_size) |
1947 | { |
1948 | return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); |
1949 | } |
1950 | |
1951 | static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) |
1952 | { |
1953 | while (type_id < btf->start_id) |
1954 | btf = btf->base_btf; |
1955 | |
1956 | return btf->resolved_ids[type_id - btf->start_id]; |
1957 | } |
1958 | |
1959 | /* The input param "type_id" must point to a needs_resolve type */ |
1960 | static const struct btf_type *btf_type_id_resolve(const struct btf *btf, |
1961 | u32 *type_id) |
1962 | { |
1963 | *type_id = btf_resolved_type_id(btf, type_id: *type_id); |
1964 | return btf_type_by_id(btf, *type_id); |
1965 | } |
1966 | |
1967 | static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) |
1968 | { |
1969 | while (type_id < btf->start_id) |
1970 | btf = btf->base_btf; |
1971 | |
1972 | return btf->resolved_sizes[type_id - btf->start_id]; |
1973 | } |
1974 | |
1975 | const struct btf_type *btf_type_id_size(const struct btf *btf, |
1976 | u32 *type_id, u32 *ret_size) |
1977 | { |
1978 | const struct btf_type *size_type; |
1979 | u32 size_type_id = *type_id; |
1980 | u32 size = 0; |
1981 | |
1982 | size_type = btf_type_by_id(btf, size_type_id); |
1983 | if (btf_type_nosize_or_null(t: size_type)) |
1984 | return NULL; |
1985 | |
1986 | if (btf_type_has_size(t: size_type)) { |
1987 | size = size_type->size; |
1988 | } else if (btf_type_is_array(t: size_type)) { |
1989 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
1990 | } else if (btf_type_is_ptr(t: size_type)) { |
1991 | size = sizeof(void *); |
1992 | } else { |
1993 | if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && |
1994 | !btf_type_is_var(size_type))) |
1995 | return NULL; |
1996 | |
1997 | size_type_id = btf_resolved_type_id(btf, type_id: size_type_id); |
1998 | size_type = btf_type_by_id(btf, size_type_id); |
1999 | if (btf_type_nosize_or_null(t: size_type)) |
2000 | return NULL; |
2001 | else if (btf_type_has_size(t: size_type)) |
2002 | size = size_type->size; |
2003 | else if (btf_type_is_array(t: size_type)) |
2004 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
2005 | else if (btf_type_is_ptr(t: size_type)) |
2006 | size = sizeof(void *); |
2007 | else |
2008 | return NULL; |
2009 | } |
2010 | |
2011 | *type_id = size_type_id; |
2012 | if (ret_size) |
2013 | *ret_size = size; |
2014 | |
2015 | return size_type; |
2016 | } |
2017 | |
2018 | static int btf_df_check_member(struct btf_verifier_env *env, |
2019 | const struct btf_type *struct_type, |
2020 | const struct btf_member *member, |
2021 | const struct btf_type *member_type) |
2022 | { |
2023 | btf_verifier_log_basic(env, struct_type, |
2024 | "Unsupported check_member" ); |
2025 | return -EINVAL; |
2026 | } |
2027 | |
2028 | static int btf_df_check_kflag_member(struct btf_verifier_env *env, |
2029 | const struct btf_type *struct_type, |
2030 | const struct btf_member *member, |
2031 | const struct btf_type *member_type) |
2032 | { |
2033 | btf_verifier_log_basic(env, struct_type, |
2034 | "Unsupported check_kflag_member" ); |
2035 | return -EINVAL; |
2036 | } |
2037 | |
2038 | /* Used for ptr, array struct/union and float type members. |
2039 | * int, enum and modifier types have their specific callback functions. |
2040 | */ |
2041 | static int btf_generic_check_kflag_member(struct btf_verifier_env *env, |
2042 | const struct btf_type *struct_type, |
2043 | const struct btf_member *member, |
2044 | const struct btf_type *member_type) |
2045 | { |
2046 | if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { |
2047 | btf_verifier_log_member(env, struct_type, member, |
2048 | fmt: "Invalid member bitfield_size" ); |
2049 | return -EINVAL; |
2050 | } |
2051 | |
2052 | /* bitfield size is 0, so member->offset represents bit offset only. |
2053 | * It is safe to call non kflag check_member variants. |
2054 | */ |
2055 | return btf_type_ops(t: member_type)->check_member(env, struct_type, |
2056 | member, |
2057 | member_type); |
2058 | } |
2059 | |
2060 | static int btf_df_resolve(struct btf_verifier_env *env, |
2061 | const struct resolve_vertex *v) |
2062 | { |
2063 | btf_verifier_log_basic(env, v->t, "Unsupported resolve" ); |
2064 | return -EINVAL; |
2065 | } |
2066 | |
2067 | static void btf_df_show(const struct btf *btf, const struct btf_type *t, |
2068 | u32 type_id, void *data, u8 bits_offsets, |
2069 | struct btf_show *show) |
2070 | { |
2071 | btf_show(show, fmt: "<unsupported kind:%u>" , BTF_INFO_KIND(t->info)); |
2072 | } |
2073 | |
2074 | static int btf_int_check_member(struct btf_verifier_env *env, |
2075 | const struct btf_type *struct_type, |
2076 | const struct btf_member *member, |
2077 | const struct btf_type *member_type) |
2078 | { |
2079 | u32 int_data = btf_type_int(t: member_type); |
2080 | u32 struct_bits_off = member->offset; |
2081 | u32 struct_size = struct_type->size; |
2082 | u32 nr_copy_bits; |
2083 | u32 bytes_offset; |
2084 | |
2085 | if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { |
2086 | btf_verifier_log_member(env, struct_type, member, |
2087 | fmt: "bits_offset exceeds U32_MAX" ); |
2088 | return -EINVAL; |
2089 | } |
2090 | |
2091 | struct_bits_off += BTF_INT_OFFSET(int_data); |
2092 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2093 | nr_copy_bits = BTF_INT_BITS(int_data) + |
2094 | BITS_PER_BYTE_MASKED(struct_bits_off); |
2095 | |
2096 | if (nr_copy_bits > BITS_PER_U128) { |
2097 | btf_verifier_log_member(env, struct_type, member, |
2098 | fmt: "nr_copy_bits exceeds 128" ); |
2099 | return -EINVAL; |
2100 | } |
2101 | |
2102 | if (struct_size < bytes_offset || |
2103 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2104 | btf_verifier_log_member(env, struct_type, member, |
2105 | fmt: "Member exceeds struct_size" ); |
2106 | return -EINVAL; |
2107 | } |
2108 | |
2109 | return 0; |
2110 | } |
2111 | |
2112 | static int btf_int_check_kflag_member(struct btf_verifier_env *env, |
2113 | const struct btf_type *struct_type, |
2114 | const struct btf_member *member, |
2115 | const struct btf_type *member_type) |
2116 | { |
2117 | u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; |
2118 | u32 int_data = btf_type_int(t: member_type); |
2119 | u32 struct_size = struct_type->size; |
2120 | u32 nr_copy_bits; |
2121 | |
2122 | /* a regular int type is required for the kflag int member */ |
2123 | if (!btf_type_int_is_regular(t: member_type)) { |
2124 | btf_verifier_log_member(env, struct_type, member, |
2125 | fmt: "Invalid member base type" ); |
2126 | return -EINVAL; |
2127 | } |
2128 | |
2129 | /* check sanity of bitfield size */ |
2130 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
2131 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
2132 | nr_int_data_bits = BTF_INT_BITS(int_data); |
2133 | if (!nr_bits) { |
2134 | /* Not a bitfield member, member offset must be at byte |
2135 | * boundary. |
2136 | */ |
2137 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2138 | btf_verifier_log_member(env, struct_type, member, |
2139 | fmt: "Invalid member offset" ); |
2140 | return -EINVAL; |
2141 | } |
2142 | |
2143 | nr_bits = nr_int_data_bits; |
2144 | } else if (nr_bits > nr_int_data_bits) { |
2145 | btf_verifier_log_member(env, struct_type, member, |
2146 | fmt: "Invalid member bitfield_size" ); |
2147 | return -EINVAL; |
2148 | } |
2149 | |
2150 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2151 | nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); |
2152 | if (nr_copy_bits > BITS_PER_U128) { |
2153 | btf_verifier_log_member(env, struct_type, member, |
2154 | fmt: "nr_copy_bits exceeds 128" ); |
2155 | return -EINVAL; |
2156 | } |
2157 | |
2158 | if (struct_size < bytes_offset || |
2159 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2160 | btf_verifier_log_member(env, struct_type, member, |
2161 | fmt: "Member exceeds struct_size" ); |
2162 | return -EINVAL; |
2163 | } |
2164 | |
2165 | return 0; |
2166 | } |
2167 | |
2168 | static s32 btf_int_check_meta(struct btf_verifier_env *env, |
2169 | const struct btf_type *t, |
2170 | u32 meta_left) |
2171 | { |
2172 | u32 int_data, nr_bits, meta_needed = sizeof(int_data); |
2173 | u16 encoding; |
2174 | |
2175 | if (meta_left < meta_needed) { |
2176 | btf_verifier_log_basic(env, t, |
2177 | "meta_left:%u meta_needed:%u" , |
2178 | meta_left, meta_needed); |
2179 | return -EINVAL; |
2180 | } |
2181 | |
2182 | if (btf_type_vlen(t)) { |
2183 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2184 | return -EINVAL; |
2185 | } |
2186 | |
2187 | if (btf_type_kflag(t)) { |
2188 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2189 | return -EINVAL; |
2190 | } |
2191 | |
2192 | int_data = btf_type_int(t); |
2193 | if (int_data & ~BTF_INT_MASK) { |
2194 | btf_verifier_log_basic(env, t, "Invalid int_data:%x" , |
2195 | int_data); |
2196 | return -EINVAL; |
2197 | } |
2198 | |
2199 | nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); |
2200 | |
2201 | if (nr_bits > BITS_PER_U128) { |
2202 | btf_verifier_log_type(env, t, "nr_bits exceeds %zu" , |
2203 | BITS_PER_U128); |
2204 | return -EINVAL; |
2205 | } |
2206 | |
2207 | if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { |
2208 | btf_verifier_log_type(env, t, "nr_bits exceeds type_size" ); |
2209 | return -EINVAL; |
2210 | } |
2211 | |
2212 | /* |
2213 | * Only one of the encoding bits is allowed and it |
2214 | * should be sufficient for the pretty print purpose (i.e. decoding). |
2215 | * Multiple bits can be allowed later if it is found |
2216 | * to be insufficient. |
2217 | */ |
2218 | encoding = BTF_INT_ENCODING(int_data); |
2219 | if (encoding && |
2220 | encoding != BTF_INT_SIGNED && |
2221 | encoding != BTF_INT_CHAR && |
2222 | encoding != BTF_INT_BOOL) { |
2223 | btf_verifier_log_type(env, t, "Unsupported encoding" ); |
2224 | return -ENOTSUPP; |
2225 | } |
2226 | |
2227 | btf_verifier_log_type(env, t, NULL); |
2228 | |
2229 | return meta_needed; |
2230 | } |
2231 | |
2232 | static void btf_int_log(struct btf_verifier_env *env, |
2233 | const struct btf_type *t) |
2234 | { |
2235 | int int_data = btf_type_int(t); |
2236 | |
2237 | btf_verifier_log(env, |
2238 | fmt: "size=%u bits_offset=%u nr_bits=%u encoding=%s" , |
2239 | t->size, BTF_INT_OFFSET(int_data), |
2240 | BTF_INT_BITS(int_data), |
2241 | btf_int_encoding_str(BTF_INT_ENCODING(int_data))); |
2242 | } |
2243 | |
2244 | static void btf_int128_print(struct btf_show *show, void *data) |
2245 | { |
2246 | /* data points to a __int128 number. |
2247 | * Suppose |
2248 | * int128_num = *(__int128 *)data; |
2249 | * The below formulas shows what upper_num and lower_num represents: |
2250 | * upper_num = int128_num >> 64; |
2251 | * lower_num = int128_num & 0xffffffffFFFFFFFFULL; |
2252 | */ |
2253 | u64 upper_num, lower_num; |
2254 | |
2255 | #ifdef __BIG_ENDIAN_BITFIELD |
2256 | upper_num = *(u64 *)data; |
2257 | lower_num = *(u64 *)(data + 8); |
2258 | #else |
2259 | upper_num = *(u64 *)(data + 8); |
2260 | lower_num = *(u64 *)data; |
2261 | #endif |
2262 | if (upper_num == 0) |
2263 | btf_show_type_value(show, "0x%llx" , lower_num); |
2264 | else |
2265 | btf_show_type_values(show, "0x%llx%016llx" , upper_num, |
2266 | lower_num); |
2267 | } |
2268 | |
2269 | static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, |
2270 | u16 right_shift_bits) |
2271 | { |
2272 | u64 upper_num, lower_num; |
2273 | |
2274 | #ifdef __BIG_ENDIAN_BITFIELD |
2275 | upper_num = print_num[0]; |
2276 | lower_num = print_num[1]; |
2277 | #else |
2278 | upper_num = print_num[1]; |
2279 | lower_num = print_num[0]; |
2280 | #endif |
2281 | |
2282 | /* shake out un-needed bits by shift/or operations */ |
2283 | if (left_shift_bits >= 64) { |
2284 | upper_num = lower_num << (left_shift_bits - 64); |
2285 | lower_num = 0; |
2286 | } else { |
2287 | upper_num = (upper_num << left_shift_bits) | |
2288 | (lower_num >> (64 - left_shift_bits)); |
2289 | lower_num = lower_num << left_shift_bits; |
2290 | } |
2291 | |
2292 | if (right_shift_bits >= 64) { |
2293 | lower_num = upper_num >> (right_shift_bits - 64); |
2294 | upper_num = 0; |
2295 | } else { |
2296 | lower_num = (lower_num >> right_shift_bits) | |
2297 | (upper_num << (64 - right_shift_bits)); |
2298 | upper_num = upper_num >> right_shift_bits; |
2299 | } |
2300 | |
2301 | #ifdef __BIG_ENDIAN_BITFIELD |
2302 | print_num[0] = upper_num; |
2303 | print_num[1] = lower_num; |
2304 | #else |
2305 | print_num[0] = lower_num; |
2306 | print_num[1] = upper_num; |
2307 | #endif |
2308 | } |
2309 | |
2310 | static void btf_bitfield_show(void *data, u8 bits_offset, |
2311 | u8 nr_bits, struct btf_show *show) |
2312 | { |
2313 | u16 left_shift_bits, right_shift_bits; |
2314 | u8 nr_copy_bytes; |
2315 | u8 nr_copy_bits; |
2316 | u64 print_num[2] = {}; |
2317 | |
2318 | nr_copy_bits = nr_bits + bits_offset; |
2319 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
2320 | |
2321 | memcpy(print_num, data, nr_copy_bytes); |
2322 | |
2323 | #ifdef __BIG_ENDIAN_BITFIELD |
2324 | left_shift_bits = bits_offset; |
2325 | #else |
2326 | left_shift_bits = BITS_PER_U128 - nr_copy_bits; |
2327 | #endif |
2328 | right_shift_bits = BITS_PER_U128 - nr_bits; |
2329 | |
2330 | btf_int128_shift(print_num, left_shift_bits, right_shift_bits); |
2331 | btf_int128_print(show, data: print_num); |
2332 | } |
2333 | |
2334 | |
2335 | static void btf_int_bits_show(const struct btf *btf, |
2336 | const struct btf_type *t, |
2337 | void *data, u8 bits_offset, |
2338 | struct btf_show *show) |
2339 | { |
2340 | u32 int_data = btf_type_int(t); |
2341 | u8 nr_bits = BTF_INT_BITS(int_data); |
2342 | u8 total_bits_offset; |
2343 | |
2344 | /* |
2345 | * bits_offset is at most 7. |
2346 | * BTF_INT_OFFSET() cannot exceed 128 bits. |
2347 | */ |
2348 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
2349 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
2350 | bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); |
2351 | btf_bitfield_show(data, bits_offset, nr_bits, show); |
2352 | } |
2353 | |
2354 | static void btf_int_show(const struct btf *btf, const struct btf_type *t, |
2355 | u32 type_id, void *data, u8 bits_offset, |
2356 | struct btf_show *show) |
2357 | { |
2358 | u32 int_data = btf_type_int(t); |
2359 | u8 encoding = BTF_INT_ENCODING(int_data); |
2360 | bool sign = encoding & BTF_INT_SIGNED; |
2361 | u8 nr_bits = BTF_INT_BITS(int_data); |
2362 | void *safe_data; |
2363 | |
2364 | safe_data = btf_show_start_type(show, t, type_id, data); |
2365 | if (!safe_data) |
2366 | return; |
2367 | |
2368 | if (bits_offset || BTF_INT_OFFSET(int_data) || |
2369 | BITS_PER_BYTE_MASKED(nr_bits)) { |
2370 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2371 | goto out; |
2372 | } |
2373 | |
2374 | switch (nr_bits) { |
2375 | case 128: |
2376 | btf_int128_print(show, data: safe_data); |
2377 | break; |
2378 | case 64: |
2379 | if (sign) |
2380 | btf_show_type_value(show, "%lld" , *(s64 *)safe_data); |
2381 | else |
2382 | btf_show_type_value(show, "%llu" , *(u64 *)safe_data); |
2383 | break; |
2384 | case 32: |
2385 | if (sign) |
2386 | btf_show_type_value(show, "%d" , *(s32 *)safe_data); |
2387 | else |
2388 | btf_show_type_value(show, "%u" , *(u32 *)safe_data); |
2389 | break; |
2390 | case 16: |
2391 | if (sign) |
2392 | btf_show_type_value(show, "%d" , *(s16 *)safe_data); |
2393 | else |
2394 | btf_show_type_value(show, "%u" , *(u16 *)safe_data); |
2395 | break; |
2396 | case 8: |
2397 | if (show->state.array_encoding == BTF_INT_CHAR) { |
2398 | /* check for null terminator */ |
2399 | if (show->state.array_terminated) |
2400 | break; |
2401 | if (*(char *)data == '\0') { |
2402 | show->state.array_terminated = 1; |
2403 | break; |
2404 | } |
2405 | if (isprint(*(char *)data)) { |
2406 | btf_show_type_value(show, "'%c'" , |
2407 | *(char *)safe_data); |
2408 | break; |
2409 | } |
2410 | } |
2411 | if (sign) |
2412 | btf_show_type_value(show, "%d" , *(s8 *)safe_data); |
2413 | else |
2414 | btf_show_type_value(show, "%u" , *(u8 *)safe_data); |
2415 | break; |
2416 | default: |
2417 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2418 | break; |
2419 | } |
2420 | out: |
2421 | btf_show_end_type(show); |
2422 | } |
2423 | |
2424 | static const struct btf_kind_operations int_ops = { |
2425 | .check_meta = btf_int_check_meta, |
2426 | .resolve = btf_df_resolve, |
2427 | .check_member = btf_int_check_member, |
2428 | .check_kflag_member = btf_int_check_kflag_member, |
2429 | .log_details = btf_int_log, |
2430 | .show = btf_int_show, |
2431 | }; |
2432 | |
2433 | static int btf_modifier_check_member(struct btf_verifier_env *env, |
2434 | const struct btf_type *struct_type, |
2435 | const struct btf_member *member, |
2436 | const struct btf_type *member_type) |
2437 | { |
2438 | const struct btf_type *resolved_type; |
2439 | u32 resolved_type_id = member->type; |
2440 | struct btf_member resolved_member; |
2441 | struct btf *btf = env->btf; |
2442 | |
2443 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2444 | if (!resolved_type) { |
2445 | btf_verifier_log_member(env, struct_type, member, |
2446 | fmt: "Invalid member" ); |
2447 | return -EINVAL; |
2448 | } |
2449 | |
2450 | resolved_member = *member; |
2451 | resolved_member.type = resolved_type_id; |
2452 | |
2453 | return btf_type_ops(t: resolved_type)->check_member(env, struct_type, |
2454 | &resolved_member, |
2455 | resolved_type); |
2456 | } |
2457 | |
2458 | static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, |
2459 | const struct btf_type *struct_type, |
2460 | const struct btf_member *member, |
2461 | const struct btf_type *member_type) |
2462 | { |
2463 | const struct btf_type *resolved_type; |
2464 | u32 resolved_type_id = member->type; |
2465 | struct btf_member resolved_member; |
2466 | struct btf *btf = env->btf; |
2467 | |
2468 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2469 | if (!resolved_type) { |
2470 | btf_verifier_log_member(env, struct_type, member, |
2471 | fmt: "Invalid member" ); |
2472 | return -EINVAL; |
2473 | } |
2474 | |
2475 | resolved_member = *member; |
2476 | resolved_member.type = resolved_type_id; |
2477 | |
2478 | return btf_type_ops(t: resolved_type)->check_kflag_member(env, struct_type, |
2479 | &resolved_member, |
2480 | resolved_type); |
2481 | } |
2482 | |
2483 | static int btf_ptr_check_member(struct btf_verifier_env *env, |
2484 | const struct btf_type *struct_type, |
2485 | const struct btf_member *member, |
2486 | const struct btf_type *member_type) |
2487 | { |
2488 | u32 struct_size, struct_bits_off, bytes_offset; |
2489 | |
2490 | struct_size = struct_type->size; |
2491 | struct_bits_off = member->offset; |
2492 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2493 | |
2494 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2495 | btf_verifier_log_member(env, struct_type, member, |
2496 | fmt: "Member is not byte aligned" ); |
2497 | return -EINVAL; |
2498 | } |
2499 | |
2500 | if (struct_size - bytes_offset < sizeof(void *)) { |
2501 | btf_verifier_log_member(env, struct_type, member, |
2502 | fmt: "Member exceeds struct_size" ); |
2503 | return -EINVAL; |
2504 | } |
2505 | |
2506 | return 0; |
2507 | } |
2508 | |
2509 | static int btf_ref_type_check_meta(struct btf_verifier_env *env, |
2510 | const struct btf_type *t, |
2511 | u32 meta_left) |
2512 | { |
2513 | const char *value; |
2514 | |
2515 | if (btf_type_vlen(t)) { |
2516 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2517 | return -EINVAL; |
2518 | } |
2519 | |
2520 | if (btf_type_kflag(t)) { |
2521 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2522 | return -EINVAL; |
2523 | } |
2524 | |
2525 | if (!BTF_TYPE_ID_VALID(t->type)) { |
2526 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
2527 | return -EINVAL; |
2528 | } |
2529 | |
2530 | /* typedef/type_tag type must have a valid name, and other ref types, |
2531 | * volatile, const, restrict, should have a null name. |
2532 | */ |
2533 | if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { |
2534 | if (!t->name_off || |
2535 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2536 | btf_verifier_log_type(env, t, "Invalid name" ); |
2537 | return -EINVAL; |
2538 | } |
2539 | } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { |
2540 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
2541 | if (!value || !value[0]) { |
2542 | btf_verifier_log_type(env, t, "Invalid name" ); |
2543 | return -EINVAL; |
2544 | } |
2545 | } else { |
2546 | if (t->name_off) { |
2547 | btf_verifier_log_type(env, t, "Invalid name" ); |
2548 | return -EINVAL; |
2549 | } |
2550 | } |
2551 | |
2552 | btf_verifier_log_type(env, t, NULL); |
2553 | |
2554 | return 0; |
2555 | } |
2556 | |
2557 | static int btf_modifier_resolve(struct btf_verifier_env *env, |
2558 | const struct resolve_vertex *v) |
2559 | { |
2560 | const struct btf_type *t = v->t; |
2561 | const struct btf_type *next_type; |
2562 | u32 next_type_id = t->type; |
2563 | struct btf *btf = env->btf; |
2564 | |
2565 | next_type = btf_type_by_id(btf, next_type_id); |
2566 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2567 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2568 | return -EINVAL; |
2569 | } |
2570 | |
2571 | if (!env_type_is_resolve_sink(env, next_type) && |
2572 | !env_type_is_resolved(env, type_id: next_type_id)) |
2573 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2574 | |
2575 | /* Figure out the resolved next_type_id with size. |
2576 | * They will be stored in the current modifier's |
2577 | * resolved_ids and resolved_sizes such that it can |
2578 | * save us a few type-following when we use it later (e.g. in |
2579 | * pretty print). |
2580 | */ |
2581 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2582 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2583 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2584 | |
2585 | /* "typedef void new_void", "const void"...etc */ |
2586 | if (!btf_type_is_void(t: next_type) && |
2587 | !btf_type_is_fwd(t: next_type) && |
2588 | !btf_type_is_func_proto(t: next_type)) { |
2589 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2590 | return -EINVAL; |
2591 | } |
2592 | } |
2593 | |
2594 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2595 | |
2596 | return 0; |
2597 | } |
2598 | |
2599 | static int btf_var_resolve(struct btf_verifier_env *env, |
2600 | const struct resolve_vertex *v) |
2601 | { |
2602 | const struct btf_type *next_type; |
2603 | const struct btf_type *t = v->t; |
2604 | u32 next_type_id = t->type; |
2605 | struct btf *btf = env->btf; |
2606 | |
2607 | next_type = btf_type_by_id(btf, next_type_id); |
2608 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2609 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2610 | return -EINVAL; |
2611 | } |
2612 | |
2613 | if (!env_type_is_resolve_sink(env, next_type) && |
2614 | !env_type_is_resolved(env, type_id: next_type_id)) |
2615 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2616 | |
2617 | if (btf_type_is_modifier(t: next_type)) { |
2618 | const struct btf_type *resolved_type; |
2619 | u32 resolved_type_id; |
2620 | |
2621 | resolved_type_id = next_type_id; |
2622 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2623 | |
2624 | if (btf_type_is_ptr(t: resolved_type) && |
2625 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2626 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2627 | return env_stack_push(env, t: resolved_type, |
2628 | type_id: resolved_type_id); |
2629 | } |
2630 | |
2631 | /* We must resolve to something concrete at this point, no |
2632 | * forward types or similar that would resolve to size of |
2633 | * zero is allowed. |
2634 | */ |
2635 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2636 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2637 | return -EINVAL; |
2638 | } |
2639 | |
2640 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2641 | |
2642 | return 0; |
2643 | } |
2644 | |
2645 | static int btf_ptr_resolve(struct btf_verifier_env *env, |
2646 | const struct resolve_vertex *v) |
2647 | { |
2648 | const struct btf_type *next_type; |
2649 | const struct btf_type *t = v->t; |
2650 | u32 next_type_id = t->type; |
2651 | struct btf *btf = env->btf; |
2652 | |
2653 | next_type = btf_type_by_id(btf, next_type_id); |
2654 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2655 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2656 | return -EINVAL; |
2657 | } |
2658 | |
2659 | if (!env_type_is_resolve_sink(env, next_type) && |
2660 | !env_type_is_resolved(env, type_id: next_type_id)) |
2661 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2662 | |
2663 | /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, |
2664 | * the modifier may have stopped resolving when it was resolved |
2665 | * to a ptr (last-resolved-ptr). |
2666 | * |
2667 | * We now need to continue from the last-resolved-ptr to |
2668 | * ensure the last-resolved-ptr will not referring back to |
2669 | * the current ptr (t). |
2670 | */ |
2671 | if (btf_type_is_modifier(t: next_type)) { |
2672 | const struct btf_type *resolved_type; |
2673 | u32 resolved_type_id; |
2674 | |
2675 | resolved_type_id = next_type_id; |
2676 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2677 | |
2678 | if (btf_type_is_ptr(t: resolved_type) && |
2679 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2680 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2681 | return env_stack_push(env, t: resolved_type, |
2682 | type_id: resolved_type_id); |
2683 | } |
2684 | |
2685 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2686 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2687 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2688 | |
2689 | if (!btf_type_is_void(t: next_type) && |
2690 | !btf_type_is_fwd(t: next_type) && |
2691 | !btf_type_is_func_proto(t: next_type)) { |
2692 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2693 | return -EINVAL; |
2694 | } |
2695 | } |
2696 | |
2697 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2698 | |
2699 | return 0; |
2700 | } |
2701 | |
2702 | static void btf_modifier_show(const struct btf *btf, |
2703 | const struct btf_type *t, |
2704 | u32 type_id, void *data, |
2705 | u8 bits_offset, struct btf_show *show) |
2706 | { |
2707 | if (btf->resolved_ids) |
2708 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2709 | else |
2710 | t = btf_type_skip_modifiers(btf, id: type_id, NULL); |
2711 | |
2712 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2713 | } |
2714 | |
2715 | static void btf_var_show(const struct btf *btf, const struct btf_type *t, |
2716 | u32 type_id, void *data, u8 bits_offset, |
2717 | struct btf_show *show) |
2718 | { |
2719 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2720 | |
2721 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2722 | } |
2723 | |
2724 | static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, |
2725 | u32 type_id, void *data, u8 bits_offset, |
2726 | struct btf_show *show) |
2727 | { |
2728 | void *safe_data; |
2729 | |
2730 | safe_data = btf_show_start_type(show, t, type_id, data); |
2731 | if (!safe_data) |
2732 | return; |
2733 | |
2734 | /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ |
2735 | if (show->flags & BTF_SHOW_PTR_RAW) |
2736 | btf_show_type_value(show, "0x%px" , *(void **)safe_data); |
2737 | else |
2738 | btf_show_type_value(show, "0x%p" , *(void **)safe_data); |
2739 | btf_show_end_type(show); |
2740 | } |
2741 | |
2742 | static void btf_ref_type_log(struct btf_verifier_env *env, |
2743 | const struct btf_type *t) |
2744 | { |
2745 | btf_verifier_log(env, fmt: "type_id=%u" , t->type); |
2746 | } |
2747 | |
2748 | static struct btf_kind_operations modifier_ops = { |
2749 | .check_meta = btf_ref_type_check_meta, |
2750 | .resolve = btf_modifier_resolve, |
2751 | .check_member = btf_modifier_check_member, |
2752 | .check_kflag_member = btf_modifier_check_kflag_member, |
2753 | .log_details = btf_ref_type_log, |
2754 | .show = btf_modifier_show, |
2755 | }; |
2756 | |
2757 | static struct btf_kind_operations ptr_ops = { |
2758 | .check_meta = btf_ref_type_check_meta, |
2759 | .resolve = btf_ptr_resolve, |
2760 | .check_member = btf_ptr_check_member, |
2761 | .check_kflag_member = btf_generic_check_kflag_member, |
2762 | .log_details = btf_ref_type_log, |
2763 | .show = btf_ptr_show, |
2764 | }; |
2765 | |
2766 | static s32 btf_fwd_check_meta(struct btf_verifier_env *env, |
2767 | const struct btf_type *t, |
2768 | u32 meta_left) |
2769 | { |
2770 | if (btf_type_vlen(t)) { |
2771 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2772 | return -EINVAL; |
2773 | } |
2774 | |
2775 | if (t->type) { |
2776 | btf_verifier_log_type(env, t, "type != 0" ); |
2777 | return -EINVAL; |
2778 | } |
2779 | |
2780 | /* fwd type must have a valid name */ |
2781 | if (!t->name_off || |
2782 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2783 | btf_verifier_log_type(env, t, "Invalid name" ); |
2784 | return -EINVAL; |
2785 | } |
2786 | |
2787 | btf_verifier_log_type(env, t, NULL); |
2788 | |
2789 | return 0; |
2790 | } |
2791 | |
2792 | static void btf_fwd_type_log(struct btf_verifier_env *env, |
2793 | const struct btf_type *t) |
2794 | { |
2795 | btf_verifier_log(env, fmt: "%s" , btf_type_kflag(t) ? "union" : "struct" ); |
2796 | } |
2797 | |
2798 | static struct btf_kind_operations fwd_ops = { |
2799 | .check_meta = btf_fwd_check_meta, |
2800 | .resolve = btf_df_resolve, |
2801 | .check_member = btf_df_check_member, |
2802 | .check_kflag_member = btf_df_check_kflag_member, |
2803 | .log_details = btf_fwd_type_log, |
2804 | .show = btf_df_show, |
2805 | }; |
2806 | |
2807 | static int btf_array_check_member(struct btf_verifier_env *env, |
2808 | const struct btf_type *struct_type, |
2809 | const struct btf_member *member, |
2810 | const struct btf_type *member_type) |
2811 | { |
2812 | u32 struct_bits_off = member->offset; |
2813 | u32 struct_size, bytes_offset; |
2814 | u32 array_type_id, array_size; |
2815 | struct btf *btf = env->btf; |
2816 | |
2817 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2818 | btf_verifier_log_member(env, struct_type, member, |
2819 | fmt: "Member is not byte aligned" ); |
2820 | return -EINVAL; |
2821 | } |
2822 | |
2823 | array_type_id = member->type; |
2824 | btf_type_id_size(btf, type_id: &array_type_id, ret_size: &array_size); |
2825 | struct_size = struct_type->size; |
2826 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2827 | if (struct_size - bytes_offset < array_size) { |
2828 | btf_verifier_log_member(env, struct_type, member, |
2829 | fmt: "Member exceeds struct_size" ); |
2830 | return -EINVAL; |
2831 | } |
2832 | |
2833 | return 0; |
2834 | } |
2835 | |
2836 | static s32 btf_array_check_meta(struct btf_verifier_env *env, |
2837 | const struct btf_type *t, |
2838 | u32 meta_left) |
2839 | { |
2840 | const struct btf_array *array = btf_type_array(t); |
2841 | u32 meta_needed = sizeof(*array); |
2842 | |
2843 | if (meta_left < meta_needed) { |
2844 | btf_verifier_log_basic(env, t, |
2845 | "meta_left:%u meta_needed:%u" , |
2846 | meta_left, meta_needed); |
2847 | return -EINVAL; |
2848 | } |
2849 | |
2850 | /* array type should not have a name */ |
2851 | if (t->name_off) { |
2852 | btf_verifier_log_type(env, t, "Invalid name" ); |
2853 | return -EINVAL; |
2854 | } |
2855 | |
2856 | if (btf_type_vlen(t)) { |
2857 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2858 | return -EINVAL; |
2859 | } |
2860 | |
2861 | if (btf_type_kflag(t)) { |
2862 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2863 | return -EINVAL; |
2864 | } |
2865 | |
2866 | if (t->size) { |
2867 | btf_verifier_log_type(env, t, "size != 0" ); |
2868 | return -EINVAL; |
2869 | } |
2870 | |
2871 | /* Array elem type and index type cannot be in type void, |
2872 | * so !array->type and !array->index_type are not allowed. |
2873 | */ |
2874 | if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { |
2875 | btf_verifier_log_type(env, t, "Invalid elem" ); |
2876 | return -EINVAL; |
2877 | } |
2878 | |
2879 | if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { |
2880 | btf_verifier_log_type(env, t, "Invalid index" ); |
2881 | return -EINVAL; |
2882 | } |
2883 | |
2884 | btf_verifier_log_type(env, t, NULL); |
2885 | |
2886 | return meta_needed; |
2887 | } |
2888 | |
2889 | static int btf_array_resolve(struct btf_verifier_env *env, |
2890 | const struct resolve_vertex *v) |
2891 | { |
2892 | const struct btf_array *array = btf_type_array(t: v->t); |
2893 | const struct btf_type *elem_type, *index_type; |
2894 | u32 elem_type_id, index_type_id; |
2895 | struct btf *btf = env->btf; |
2896 | u32 elem_size; |
2897 | |
2898 | /* Check array->index_type */ |
2899 | index_type_id = array->index_type; |
2900 | index_type = btf_type_by_id(btf, index_type_id); |
2901 | if (btf_type_nosize_or_null(t: index_type) || |
2902 | btf_type_is_resolve_source_only(t: index_type)) { |
2903 | btf_verifier_log_type(env, v->t, "Invalid index" ); |
2904 | return -EINVAL; |
2905 | } |
2906 | |
2907 | if (!env_type_is_resolve_sink(env, next_type: index_type) && |
2908 | !env_type_is_resolved(env, type_id: index_type_id)) |
2909 | return env_stack_push(env, t: index_type, type_id: index_type_id); |
2910 | |
2911 | index_type = btf_type_id_size(btf, type_id: &index_type_id, NULL); |
2912 | if (!index_type || !btf_type_is_int(t: index_type) || |
2913 | !btf_type_int_is_regular(t: index_type)) { |
2914 | btf_verifier_log_type(env, v->t, "Invalid index" ); |
2915 | return -EINVAL; |
2916 | } |
2917 | |
2918 | /* Check array->type */ |
2919 | elem_type_id = array->type; |
2920 | elem_type = btf_type_by_id(btf, elem_type_id); |
2921 | if (btf_type_nosize_or_null(t: elem_type) || |
2922 | btf_type_is_resolve_source_only(t: elem_type)) { |
2923 | btf_verifier_log_type(env, v->t, |
2924 | "Invalid elem" ); |
2925 | return -EINVAL; |
2926 | } |
2927 | |
2928 | if (!env_type_is_resolve_sink(env, next_type: elem_type) && |
2929 | !env_type_is_resolved(env, type_id: elem_type_id)) |
2930 | return env_stack_push(env, t: elem_type, type_id: elem_type_id); |
2931 | |
2932 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
2933 | if (!elem_type) { |
2934 | btf_verifier_log_type(env, v->t, "Invalid elem" ); |
2935 | return -EINVAL; |
2936 | } |
2937 | |
2938 | if (btf_type_is_int(t: elem_type) && !btf_type_int_is_regular(t: elem_type)) { |
2939 | btf_verifier_log_type(env, v->t, "Invalid array of int" ); |
2940 | return -EINVAL; |
2941 | } |
2942 | |
2943 | if (array->nelems && elem_size > U32_MAX / array->nelems) { |
2944 | btf_verifier_log_type(env, v->t, |
2945 | "Array size overflows U32_MAX" ); |
2946 | return -EINVAL; |
2947 | } |
2948 | |
2949 | env_stack_pop_resolved(env, resolved_type_id: elem_type_id, resolved_size: elem_size * array->nelems); |
2950 | |
2951 | return 0; |
2952 | } |
2953 | |
2954 | static void btf_array_log(struct btf_verifier_env *env, |
2955 | const struct btf_type *t) |
2956 | { |
2957 | const struct btf_array *array = btf_type_array(t); |
2958 | |
2959 | btf_verifier_log(env, fmt: "type_id=%u index_type_id=%u nr_elems=%u" , |
2960 | array->type, array->index_type, array->nelems); |
2961 | } |
2962 | |
2963 | static void __btf_array_show(const struct btf *btf, const struct btf_type *t, |
2964 | u32 type_id, void *data, u8 bits_offset, |
2965 | struct btf_show *show) |
2966 | { |
2967 | const struct btf_array *array = btf_type_array(t); |
2968 | const struct btf_kind_operations *elem_ops; |
2969 | const struct btf_type *elem_type; |
2970 | u32 i, elem_size = 0, elem_type_id; |
2971 | u16 encoding = 0; |
2972 | |
2973 | elem_type_id = array->type; |
2974 | elem_type = btf_type_skip_modifiers(btf, id: elem_type_id, NULL); |
2975 | if (elem_type && btf_type_has_size(t: elem_type)) |
2976 | elem_size = elem_type->size; |
2977 | |
2978 | if (elem_type && btf_type_is_int(t: elem_type)) { |
2979 | u32 int_type = btf_type_int(t: elem_type); |
2980 | |
2981 | encoding = BTF_INT_ENCODING(int_type); |
2982 | |
2983 | /* |
2984 | * BTF_INT_CHAR encoding never seems to be set for |
2985 | * char arrays, so if size is 1 and element is |
2986 | * printable as a char, we'll do that. |
2987 | */ |
2988 | if (elem_size == 1) |
2989 | encoding = BTF_INT_CHAR; |
2990 | } |
2991 | |
2992 | if (!btf_show_start_array_type(show, t, type_id, array_encoding: encoding, data)) |
2993 | return; |
2994 | |
2995 | if (!elem_type) |
2996 | goto out; |
2997 | elem_ops = btf_type_ops(t: elem_type); |
2998 | |
2999 | for (i = 0; i < array->nelems; i++) { |
3000 | |
3001 | btf_show_start_array_member(show); |
3002 | |
3003 | elem_ops->show(btf, elem_type, elem_type_id, data, |
3004 | bits_offset, show); |
3005 | data += elem_size; |
3006 | |
3007 | btf_show_end_array_member(show); |
3008 | |
3009 | if (show->state.array_terminated) |
3010 | break; |
3011 | } |
3012 | out: |
3013 | btf_show_end_array_type(show); |
3014 | } |
3015 | |
3016 | static void btf_array_show(const struct btf *btf, const struct btf_type *t, |
3017 | u32 type_id, void *data, u8 bits_offset, |
3018 | struct btf_show *show) |
3019 | { |
3020 | const struct btf_member *m = show->state.member; |
3021 | |
3022 | /* |
3023 | * First check if any members would be shown (are non-zero). |
3024 | * See comments above "struct btf_show" definition for more |
3025 | * details on how this works at a high-level. |
3026 | */ |
3027 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
3028 | if (!show->state.depth_check) { |
3029 | show->state.depth_check = show->state.depth + 1; |
3030 | show->state.depth_to_show = 0; |
3031 | } |
3032 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3033 | show->state.member = m; |
3034 | |
3035 | if (show->state.depth_check != show->state.depth + 1) |
3036 | return; |
3037 | show->state.depth_check = 0; |
3038 | |
3039 | if (show->state.depth_to_show <= show->state.depth) |
3040 | return; |
3041 | /* |
3042 | * Reaching here indicates we have recursed and found |
3043 | * non-zero array member(s). |
3044 | */ |
3045 | } |
3046 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3047 | } |
3048 | |
3049 | static struct btf_kind_operations array_ops = { |
3050 | .check_meta = btf_array_check_meta, |
3051 | .resolve = btf_array_resolve, |
3052 | .check_member = btf_array_check_member, |
3053 | .check_kflag_member = btf_generic_check_kflag_member, |
3054 | .log_details = btf_array_log, |
3055 | .show = btf_array_show, |
3056 | }; |
3057 | |
3058 | static int btf_struct_check_member(struct btf_verifier_env *env, |
3059 | const struct btf_type *struct_type, |
3060 | const struct btf_member *member, |
3061 | const struct btf_type *member_type) |
3062 | { |
3063 | u32 struct_bits_off = member->offset; |
3064 | u32 struct_size, bytes_offset; |
3065 | |
3066 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
3067 | btf_verifier_log_member(env, struct_type, member, |
3068 | fmt: "Member is not byte aligned" ); |
3069 | return -EINVAL; |
3070 | } |
3071 | |
3072 | struct_size = struct_type->size; |
3073 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
3074 | if (struct_size - bytes_offset < member_type->size) { |
3075 | btf_verifier_log_member(env, struct_type, member, |
3076 | fmt: "Member exceeds struct_size" ); |
3077 | return -EINVAL; |
3078 | } |
3079 | |
3080 | return 0; |
3081 | } |
3082 | |
3083 | static s32 btf_struct_check_meta(struct btf_verifier_env *env, |
3084 | const struct btf_type *t, |
3085 | u32 meta_left) |
3086 | { |
3087 | bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; |
3088 | const struct btf_member *member; |
3089 | u32 meta_needed, last_offset; |
3090 | struct btf *btf = env->btf; |
3091 | u32 struct_size = t->size; |
3092 | u32 offset; |
3093 | u16 i; |
3094 | |
3095 | meta_needed = btf_type_vlen(t) * sizeof(*member); |
3096 | if (meta_left < meta_needed) { |
3097 | btf_verifier_log_basic(env, t, |
3098 | "meta_left:%u meta_needed:%u" , |
3099 | meta_left, meta_needed); |
3100 | return -EINVAL; |
3101 | } |
3102 | |
3103 | /* struct type either no name or a valid one */ |
3104 | if (t->name_off && |
3105 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
3106 | btf_verifier_log_type(env, t, "Invalid name" ); |
3107 | return -EINVAL; |
3108 | } |
3109 | |
3110 | btf_verifier_log_type(env, t, NULL); |
3111 | |
3112 | last_offset = 0; |
3113 | for_each_member(i, t, member) { |
3114 | if (!btf_name_offset_valid(btf, offset: member->name_off)) { |
3115 | btf_verifier_log_member(env, struct_type: t, member, |
3116 | fmt: "Invalid member name_offset:%u" , |
3117 | member->name_off); |
3118 | return -EINVAL; |
3119 | } |
3120 | |
3121 | /* struct member either no name or a valid one */ |
3122 | if (member->name_off && |
3123 | !btf_name_valid_identifier(btf, offset: member->name_off)) { |
3124 | btf_verifier_log_member(env, struct_type: t, member, fmt: "Invalid name" ); |
3125 | return -EINVAL; |
3126 | } |
3127 | /* A member cannot be in type void */ |
3128 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { |
3129 | btf_verifier_log_member(env, struct_type: t, member, |
3130 | fmt: "Invalid type_id" ); |
3131 | return -EINVAL; |
3132 | } |
3133 | |
3134 | offset = __btf_member_bit_offset(struct_type: t, member); |
3135 | if (is_union && offset) { |
3136 | btf_verifier_log_member(env, struct_type: t, member, |
3137 | fmt: "Invalid member bits_offset" ); |
3138 | return -EINVAL; |
3139 | } |
3140 | |
3141 | /* |
3142 | * ">" instead of ">=" because the last member could be |
3143 | * "char a[0];" |
3144 | */ |
3145 | if (last_offset > offset) { |
3146 | btf_verifier_log_member(env, struct_type: t, member, |
3147 | fmt: "Invalid member bits_offset" ); |
3148 | return -EINVAL; |
3149 | } |
3150 | |
3151 | if (BITS_ROUNDUP_BYTES(offset) > struct_size) { |
3152 | btf_verifier_log_member(env, struct_type: t, member, |
3153 | fmt: "Member bits_offset exceeds its struct size" ); |
3154 | return -EINVAL; |
3155 | } |
3156 | |
3157 | btf_verifier_log_member(env, struct_type: t, member, NULL); |
3158 | last_offset = offset; |
3159 | } |
3160 | |
3161 | return meta_needed; |
3162 | } |
3163 | |
3164 | static int btf_struct_resolve(struct btf_verifier_env *env, |
3165 | const struct resolve_vertex *v) |
3166 | { |
3167 | const struct btf_member *member; |
3168 | int err; |
3169 | u16 i; |
3170 | |
3171 | /* Before continue resolving the next_member, |
3172 | * ensure the last member is indeed resolved to a |
3173 | * type with size info. |
3174 | */ |
3175 | if (v->next_member) { |
3176 | const struct btf_type *last_member_type; |
3177 | const struct btf_member *last_member; |
3178 | u32 last_member_type_id; |
3179 | |
3180 | last_member = btf_type_member(t: v->t) + v->next_member - 1; |
3181 | last_member_type_id = last_member->type; |
3182 | if (WARN_ON_ONCE(!env_type_is_resolved(env, |
3183 | last_member_type_id))) |
3184 | return -EINVAL; |
3185 | |
3186 | last_member_type = btf_type_by_id(env->btf, |
3187 | last_member_type_id); |
3188 | if (btf_type_kflag(t: v->t)) |
3189 | err = btf_type_ops(t: last_member_type)->check_kflag_member(env, v->t, |
3190 | last_member, |
3191 | last_member_type); |
3192 | else |
3193 | err = btf_type_ops(t: last_member_type)->check_member(env, v->t, |
3194 | last_member, |
3195 | last_member_type); |
3196 | if (err) |
3197 | return err; |
3198 | } |
3199 | |
3200 | for_each_member_from(i, v->next_member, v->t, member) { |
3201 | u32 member_type_id = member->type; |
3202 | const struct btf_type *member_type = btf_type_by_id(env->btf, |
3203 | member_type_id); |
3204 | |
3205 | if (btf_type_nosize_or_null(t: member_type) || |
3206 | btf_type_is_resolve_source_only(t: member_type)) { |
3207 | btf_verifier_log_member(env, struct_type: v->t, member, |
3208 | fmt: "Invalid member" ); |
3209 | return -EINVAL; |
3210 | } |
3211 | |
3212 | if (!env_type_is_resolve_sink(env, next_type: member_type) && |
3213 | !env_type_is_resolved(env, type_id: member_type_id)) { |
3214 | env_stack_set_next_member(env, next_member: i + 1); |
3215 | return env_stack_push(env, t: member_type, type_id: member_type_id); |
3216 | } |
3217 | |
3218 | if (btf_type_kflag(t: v->t)) |
3219 | err = btf_type_ops(t: member_type)->check_kflag_member(env, v->t, |
3220 | member, |
3221 | member_type); |
3222 | else |
3223 | err = btf_type_ops(t: member_type)->check_member(env, v->t, |
3224 | member, |
3225 | member_type); |
3226 | if (err) |
3227 | return err; |
3228 | } |
3229 | |
3230 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
3231 | |
3232 | return 0; |
3233 | } |
3234 | |
3235 | static void btf_struct_log(struct btf_verifier_env *env, |
3236 | const struct btf_type *t) |
3237 | { |
3238 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
3239 | } |
3240 | |
3241 | enum { |
3242 | BTF_FIELD_IGNORE = 0, |
3243 | BTF_FIELD_FOUND = 1, |
3244 | }; |
3245 | |
3246 | struct btf_field_info { |
3247 | enum btf_field_type type; |
3248 | u32 off; |
3249 | union { |
3250 | struct { |
3251 | u32 type_id; |
3252 | } kptr; |
3253 | struct { |
3254 | const char *node_name; |
3255 | u32 value_btf_id; |
3256 | } graph_root; |
3257 | }; |
3258 | }; |
3259 | |
3260 | static int btf_find_struct(const struct btf *btf, const struct btf_type *t, |
3261 | u32 off, int sz, enum btf_field_type field_type, |
3262 | struct btf_field_info *info) |
3263 | { |
3264 | if (!__btf_type_is_struct(t)) |
3265 | return BTF_FIELD_IGNORE; |
3266 | if (t->size != sz) |
3267 | return BTF_FIELD_IGNORE; |
3268 | info->type = field_type; |
3269 | info->off = off; |
3270 | return BTF_FIELD_FOUND; |
3271 | } |
3272 | |
3273 | static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, |
3274 | u32 off, int sz, struct btf_field_info *info) |
3275 | { |
3276 | enum btf_field_type type; |
3277 | u32 res_id; |
3278 | |
3279 | /* Permit modifiers on the pointer itself */ |
3280 | if (btf_type_is_volatile(t)) |
3281 | t = btf_type_by_id(btf, t->type); |
3282 | /* For PTR, sz is always == 8 */ |
3283 | if (!btf_type_is_ptr(t)) |
3284 | return BTF_FIELD_IGNORE; |
3285 | t = btf_type_by_id(btf, t->type); |
3286 | |
3287 | if (!btf_type_is_type_tag(t)) |
3288 | return BTF_FIELD_IGNORE; |
3289 | /* Reject extra tags */ |
3290 | if (btf_type_is_type_tag(t: btf_type_by_id(btf, t->type))) |
3291 | return -EINVAL; |
3292 | if (!strcmp("kptr_untrusted" , __btf_name_by_offset(btf, offset: t->name_off))) |
3293 | type = BPF_KPTR_UNREF; |
3294 | else if (!strcmp("kptr" , __btf_name_by_offset(btf, offset: t->name_off))) |
3295 | type = BPF_KPTR_REF; |
3296 | else if (!strcmp("percpu_kptr" , __btf_name_by_offset(btf, offset: t->name_off))) |
3297 | type = BPF_KPTR_PERCPU; |
3298 | else |
3299 | return -EINVAL; |
3300 | |
3301 | /* Get the base type */ |
3302 | t = btf_type_skip_modifiers(btf, id: t->type, res_id: &res_id); |
3303 | /* Only pointer to struct is allowed */ |
3304 | if (!__btf_type_is_struct(t)) |
3305 | return -EINVAL; |
3306 | |
3307 | info->type = type; |
3308 | info->off = off; |
3309 | info->kptr.type_id = res_id; |
3310 | return BTF_FIELD_FOUND; |
3311 | } |
3312 | |
3313 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
3314 | int comp_idx, const char *tag_key) |
3315 | { |
3316 | const char *value = NULL; |
3317 | int i; |
3318 | |
3319 | for (i = 1; i < btf_nr_types(btf); i++) { |
3320 | const struct btf_type *t = btf_type_by_id(btf, i); |
3321 | int len = strlen(tag_key); |
3322 | |
3323 | if (!btf_type_is_decl_tag(t)) |
3324 | continue; |
3325 | if (pt != btf_type_by_id(btf, t->type) || |
3326 | btf_type_decl_tag(t)->component_idx != comp_idx) |
3327 | continue; |
3328 | if (strncmp(__btf_name_by_offset(btf, offset: t->name_off), tag_key, len)) |
3329 | continue; |
3330 | /* Prevent duplicate entries for same type */ |
3331 | if (value) |
3332 | return ERR_PTR(error: -EEXIST); |
3333 | value = __btf_name_by_offset(btf, offset: t->name_off) + len; |
3334 | } |
3335 | if (!value) |
3336 | return ERR_PTR(error: -ENOENT); |
3337 | return value; |
3338 | } |
3339 | |
3340 | static int |
3341 | btf_find_graph_root(const struct btf *btf, const struct btf_type *pt, |
3342 | const struct btf_type *t, int comp_idx, u32 off, |
3343 | int sz, struct btf_field_info *info, |
3344 | enum btf_field_type head_type) |
3345 | { |
3346 | const char *node_field_name; |
3347 | const char *value_type; |
3348 | s32 id; |
3349 | |
3350 | if (!__btf_type_is_struct(t)) |
3351 | return BTF_FIELD_IGNORE; |
3352 | if (t->size != sz) |
3353 | return BTF_FIELD_IGNORE; |
3354 | value_type = btf_find_decl_tag_value(btf, pt, comp_idx, tag_key: "contains:" ); |
3355 | if (IS_ERR(ptr: value_type)) |
3356 | return -EINVAL; |
3357 | node_field_name = strstr(value_type, ":" ); |
3358 | if (!node_field_name) |
3359 | return -EINVAL; |
3360 | value_type = kstrndup(s: value_type, len: node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN); |
3361 | if (!value_type) |
3362 | return -ENOMEM; |
3363 | id = btf_find_by_name_kind(btf, name: value_type, kind: BTF_KIND_STRUCT); |
3364 | kfree(objp: value_type); |
3365 | if (id < 0) |
3366 | return id; |
3367 | node_field_name++; |
3368 | if (str_is_empty(s: node_field_name)) |
3369 | return -EINVAL; |
3370 | info->type = head_type; |
3371 | info->off = off; |
3372 | info->graph_root.value_btf_id = id; |
3373 | info->graph_root.node_name = node_field_name; |
3374 | return BTF_FIELD_FOUND; |
3375 | } |
3376 | |
3377 | #define field_mask_test_name(field_type, field_type_str) \ |
3378 | if (field_mask & field_type && !strcmp(name, field_type_str)) { \ |
3379 | type = field_type; \ |
3380 | goto end; \ |
3381 | } |
3382 | |
3383 | static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, |
3384 | int *align, int *sz) |
3385 | { |
3386 | int type = 0; |
3387 | |
3388 | if (field_mask & BPF_SPIN_LOCK) { |
3389 | if (!strcmp(name, "bpf_spin_lock" )) { |
3390 | if (*seen_mask & BPF_SPIN_LOCK) |
3391 | return -E2BIG; |
3392 | *seen_mask |= BPF_SPIN_LOCK; |
3393 | type = BPF_SPIN_LOCK; |
3394 | goto end; |
3395 | } |
3396 | } |
3397 | if (field_mask & BPF_TIMER) { |
3398 | if (!strcmp(name, "bpf_timer" )) { |
3399 | if (*seen_mask & BPF_TIMER) |
3400 | return -E2BIG; |
3401 | *seen_mask |= BPF_TIMER; |
3402 | type = BPF_TIMER; |
3403 | goto end; |
3404 | } |
3405 | } |
3406 | field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head" ); |
3407 | field_mask_test_name(BPF_LIST_NODE, "bpf_list_node" ); |
3408 | field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root" ); |
3409 | field_mask_test_name(BPF_RB_NODE, "bpf_rb_node" ); |
3410 | field_mask_test_name(BPF_REFCOUNT, "bpf_refcount" ); |
3411 | |
3412 | /* Only return BPF_KPTR when all other types with matchable names fail */ |
3413 | if (field_mask & BPF_KPTR) { |
3414 | type = BPF_KPTR_REF; |
3415 | goto end; |
3416 | } |
3417 | return 0; |
3418 | end: |
3419 | *sz = btf_field_type_size(type); |
3420 | *align = btf_field_type_align(type); |
3421 | return type; |
3422 | } |
3423 | |
3424 | #undef field_mask_test_name |
3425 | |
3426 | static int btf_find_struct_field(const struct btf *btf, |
3427 | const struct btf_type *t, u32 field_mask, |
3428 | struct btf_field_info *info, int info_cnt) |
3429 | { |
3430 | int ret, idx = 0, align, sz, field_type; |
3431 | const struct btf_member *member; |
3432 | struct btf_field_info tmp; |
3433 | u32 i, off, seen_mask = 0; |
3434 | |
3435 | for_each_member(i, t, member) { |
3436 | const struct btf_type *member_type = btf_type_by_id(btf, |
3437 | member->type); |
3438 | |
3439 | field_type = btf_get_field_type(name: __btf_name_by_offset(btf, offset: member_type->name_off), |
3440 | field_mask, seen_mask: &seen_mask, align: &align, sz: &sz); |
3441 | if (field_type == 0) |
3442 | continue; |
3443 | if (field_type < 0) |
3444 | return field_type; |
3445 | |
3446 | off = __btf_member_bit_offset(struct_type: t, member); |
3447 | if (off % 8) |
3448 | /* valid C code cannot generate such BTF */ |
3449 | return -EINVAL; |
3450 | off /= 8; |
3451 | if (off % align) |
3452 | continue; |
3453 | |
3454 | switch (field_type) { |
3455 | case BPF_SPIN_LOCK: |
3456 | case BPF_TIMER: |
3457 | case BPF_LIST_NODE: |
3458 | case BPF_RB_NODE: |
3459 | case BPF_REFCOUNT: |
3460 | ret = btf_find_struct(btf, t: member_type, off, sz, field_type, |
3461 | info: idx < info_cnt ? &info[idx] : &tmp); |
3462 | if (ret < 0) |
3463 | return ret; |
3464 | break; |
3465 | case BPF_KPTR_UNREF: |
3466 | case BPF_KPTR_REF: |
3467 | case BPF_KPTR_PERCPU: |
3468 | ret = btf_find_kptr(btf, t: member_type, off, sz, |
3469 | info: idx < info_cnt ? &info[idx] : &tmp); |
3470 | if (ret < 0) |
3471 | return ret; |
3472 | break; |
3473 | case BPF_LIST_HEAD: |
3474 | case BPF_RB_ROOT: |
3475 | ret = btf_find_graph_root(btf, pt: t, t: member_type, |
3476 | comp_idx: i, off, sz, |
3477 | info: idx < info_cnt ? &info[idx] : &tmp, |
3478 | head_type: field_type); |
3479 | if (ret < 0) |
3480 | return ret; |
3481 | break; |
3482 | default: |
3483 | return -EFAULT; |
3484 | } |
3485 | |
3486 | if (ret == BTF_FIELD_IGNORE) |
3487 | continue; |
3488 | if (idx >= info_cnt) |
3489 | return -E2BIG; |
3490 | ++idx; |
3491 | } |
3492 | return idx; |
3493 | } |
3494 | |
3495 | static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, |
3496 | u32 field_mask, struct btf_field_info *info, |
3497 | int info_cnt) |
3498 | { |
3499 | int ret, idx = 0, align, sz, field_type; |
3500 | const struct btf_var_secinfo *vsi; |
3501 | struct btf_field_info tmp; |
3502 | u32 i, off, seen_mask = 0; |
3503 | |
3504 | for_each_vsi(i, t, vsi) { |
3505 | const struct btf_type *var = btf_type_by_id(btf, vsi->type); |
3506 | const struct btf_type *var_type = btf_type_by_id(btf, var->type); |
3507 | |
3508 | field_type = btf_get_field_type(name: __btf_name_by_offset(btf, offset: var_type->name_off), |
3509 | field_mask, seen_mask: &seen_mask, align: &align, sz: &sz); |
3510 | if (field_type == 0) |
3511 | continue; |
3512 | if (field_type < 0) |
3513 | return field_type; |
3514 | |
3515 | off = vsi->offset; |
3516 | if (vsi->size != sz) |
3517 | continue; |
3518 | if (off % align) |
3519 | continue; |
3520 | |
3521 | switch (field_type) { |
3522 | case BPF_SPIN_LOCK: |
3523 | case BPF_TIMER: |
3524 | case BPF_LIST_NODE: |
3525 | case BPF_RB_NODE: |
3526 | case BPF_REFCOUNT: |
3527 | ret = btf_find_struct(btf, t: var_type, off, sz, field_type, |
3528 | info: idx < info_cnt ? &info[idx] : &tmp); |
3529 | if (ret < 0) |
3530 | return ret; |
3531 | break; |
3532 | case BPF_KPTR_UNREF: |
3533 | case BPF_KPTR_REF: |
3534 | case BPF_KPTR_PERCPU: |
3535 | ret = btf_find_kptr(btf, t: var_type, off, sz, |
3536 | info: idx < info_cnt ? &info[idx] : &tmp); |
3537 | if (ret < 0) |
3538 | return ret; |
3539 | break; |
3540 | case BPF_LIST_HEAD: |
3541 | case BPF_RB_ROOT: |
3542 | ret = btf_find_graph_root(btf, pt: var, t: var_type, |
3543 | comp_idx: -1, off, sz, |
3544 | info: idx < info_cnt ? &info[idx] : &tmp, |
3545 | head_type: field_type); |
3546 | if (ret < 0) |
3547 | return ret; |
3548 | break; |
3549 | default: |
3550 | return -EFAULT; |
3551 | } |
3552 | |
3553 | if (ret == BTF_FIELD_IGNORE) |
3554 | continue; |
3555 | if (idx >= info_cnt) |
3556 | return -E2BIG; |
3557 | ++idx; |
3558 | } |
3559 | return idx; |
3560 | } |
3561 | |
3562 | static int btf_find_field(const struct btf *btf, const struct btf_type *t, |
3563 | u32 field_mask, struct btf_field_info *info, |
3564 | int info_cnt) |
3565 | { |
3566 | if (__btf_type_is_struct(t)) |
3567 | return btf_find_struct_field(btf, t, field_mask, info, info_cnt); |
3568 | else if (btf_type_is_datasec(t)) |
3569 | return btf_find_datasec_var(btf, t, field_mask, info, info_cnt); |
3570 | return -EINVAL; |
3571 | } |
3572 | |
3573 | static int btf_parse_kptr(const struct btf *btf, struct btf_field *field, |
3574 | struct btf_field_info *info) |
3575 | { |
3576 | struct module *mod = NULL; |
3577 | const struct btf_type *t; |
3578 | /* If a matching btf type is found in kernel or module BTFs, kptr_ref |
3579 | * is that BTF, otherwise it's program BTF |
3580 | */ |
3581 | struct btf *kptr_btf; |
3582 | int ret; |
3583 | s32 id; |
3584 | |
3585 | /* Find type in map BTF, and use it to look up the matching type |
3586 | * in vmlinux or module BTFs, by name and kind. |
3587 | */ |
3588 | t = btf_type_by_id(btf, info->kptr.type_id); |
3589 | id = bpf_find_btf_id(name: __btf_name_by_offset(btf, offset: t->name_off), BTF_INFO_KIND(t->info), |
3590 | btf_p: &kptr_btf); |
3591 | if (id == -ENOENT) { |
3592 | /* btf_parse_kptr should only be called w/ btf = program BTF */ |
3593 | WARN_ON_ONCE(btf_is_kernel(btf)); |
3594 | |
3595 | /* Type exists only in program BTF. Assume that it's a MEM_ALLOC |
3596 | * kptr allocated via bpf_obj_new |
3597 | */ |
3598 | field->kptr.dtor = NULL; |
3599 | id = info->kptr.type_id; |
3600 | kptr_btf = (struct btf *)btf; |
3601 | btf_get(btf: kptr_btf); |
3602 | goto found_dtor; |
3603 | } |
3604 | if (id < 0) |
3605 | return id; |
3606 | |
3607 | /* Find and stash the function pointer for the destruction function that |
3608 | * needs to be eventually invoked from the map free path. |
3609 | */ |
3610 | if (info->type == BPF_KPTR_REF) { |
3611 | const struct btf_type *dtor_func; |
3612 | const char *dtor_func_name; |
3613 | unsigned long addr; |
3614 | s32 dtor_btf_id; |
3615 | |
3616 | /* This call also serves as a whitelist of allowed objects that |
3617 | * can be used as a referenced pointer and be stored in a map at |
3618 | * the same time. |
3619 | */ |
3620 | dtor_btf_id = btf_find_dtor_kfunc(btf: kptr_btf, btf_id: id); |
3621 | if (dtor_btf_id < 0) { |
3622 | ret = dtor_btf_id; |
3623 | goto end_btf; |
3624 | } |
3625 | |
3626 | dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id); |
3627 | if (!dtor_func) { |
3628 | ret = -ENOENT; |
3629 | goto end_btf; |
3630 | } |
3631 | |
3632 | if (btf_is_module(btf: kptr_btf)) { |
3633 | mod = btf_try_get_module(btf: kptr_btf); |
3634 | if (!mod) { |
3635 | ret = -ENXIO; |
3636 | goto end_btf; |
3637 | } |
3638 | } |
3639 | |
3640 | /* We already verified dtor_func to be btf_type_is_func |
3641 | * in register_btf_id_dtor_kfuncs. |
3642 | */ |
3643 | dtor_func_name = __btf_name_by_offset(btf: kptr_btf, offset: dtor_func->name_off); |
3644 | addr = kallsyms_lookup_name(name: dtor_func_name); |
3645 | if (!addr) { |
3646 | ret = -EINVAL; |
3647 | goto end_mod; |
3648 | } |
3649 | field->kptr.dtor = (void *)addr; |
3650 | } |
3651 | |
3652 | found_dtor: |
3653 | field->kptr.btf_id = id; |
3654 | field->kptr.btf = kptr_btf; |
3655 | field->kptr.module = mod; |
3656 | return 0; |
3657 | end_mod: |
3658 | module_put(module: mod); |
3659 | end_btf: |
3660 | btf_put(btf: kptr_btf); |
3661 | return ret; |
3662 | } |
3663 | |
3664 | static int btf_parse_graph_root(const struct btf *btf, |
3665 | struct btf_field *field, |
3666 | struct btf_field_info *info, |
3667 | const char *node_type_name, |
3668 | size_t node_type_align) |
3669 | { |
3670 | const struct btf_type *t, *n = NULL; |
3671 | const struct btf_member *member; |
3672 | u32 offset; |
3673 | int i; |
3674 | |
3675 | t = btf_type_by_id(btf, info->graph_root.value_btf_id); |
3676 | /* We've already checked that value_btf_id is a struct type. We |
3677 | * just need to figure out the offset of the list_node, and |
3678 | * verify its type. |
3679 | */ |
3680 | for_each_member(i, t, member) { |
3681 | if (strcmp(info->graph_root.node_name, |
3682 | __btf_name_by_offset(btf, offset: member->name_off))) |
3683 | continue; |
3684 | /* Invalid BTF, two members with same name */ |
3685 | if (n) |
3686 | return -EINVAL; |
3687 | n = btf_type_by_id(btf, member->type); |
3688 | if (!__btf_type_is_struct(t: n)) |
3689 | return -EINVAL; |
3690 | if (strcmp(node_type_name, __btf_name_by_offset(btf, offset: n->name_off))) |
3691 | return -EINVAL; |
3692 | offset = __btf_member_bit_offset(struct_type: n, member); |
3693 | if (offset % 8) |
3694 | return -EINVAL; |
3695 | offset /= 8; |
3696 | if (offset % node_type_align) |
3697 | return -EINVAL; |
3698 | |
3699 | field->graph_root.btf = (struct btf *)btf; |
3700 | field->graph_root.value_btf_id = info->graph_root.value_btf_id; |
3701 | field->graph_root.node_offset = offset; |
3702 | } |
3703 | if (!n) |
3704 | return -ENOENT; |
3705 | return 0; |
3706 | } |
3707 | |
3708 | static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, |
3709 | struct btf_field_info *info) |
3710 | { |
3711 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_list_node" , |
3712 | node_type_align: __alignof__(struct bpf_list_node)); |
3713 | } |
3714 | |
3715 | static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field, |
3716 | struct btf_field_info *info) |
3717 | { |
3718 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_rb_node" , |
3719 | node_type_align: __alignof__(struct bpf_rb_node)); |
3720 | } |
3721 | |
3722 | static int btf_field_cmp(const void *_a, const void *_b, const void *priv) |
3723 | { |
3724 | const struct btf_field *a = (const struct btf_field *)_a; |
3725 | const struct btf_field *b = (const struct btf_field *)_b; |
3726 | |
3727 | if (a->offset < b->offset) |
3728 | return -1; |
3729 | else if (a->offset > b->offset) |
3730 | return 1; |
3731 | return 0; |
3732 | } |
3733 | |
3734 | struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t, |
3735 | u32 field_mask, u32 value_size) |
3736 | { |
3737 | struct btf_field_info info_arr[BTF_FIELDS_MAX]; |
3738 | u32 next_off = 0, field_type_size; |
3739 | struct btf_record *rec; |
3740 | int ret, i, cnt; |
3741 | |
3742 | ret = btf_find_field(btf, t, field_mask, info: info_arr, ARRAY_SIZE(info_arr)); |
3743 | if (ret < 0) |
3744 | return ERR_PTR(error: ret); |
3745 | if (!ret) |
3746 | return NULL; |
3747 | |
3748 | cnt = ret; |
3749 | /* This needs to be kzalloc to zero out padding and unused fields, see |
3750 | * comment in btf_record_equal. |
3751 | */ |
3752 | rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN); |
3753 | if (!rec) |
3754 | return ERR_PTR(error: -ENOMEM); |
3755 | |
3756 | rec->spin_lock_off = -EINVAL; |
3757 | rec->timer_off = -EINVAL; |
3758 | rec->refcount_off = -EINVAL; |
3759 | for (i = 0; i < cnt; i++) { |
3760 | field_type_size = btf_field_type_size(type: info_arr[i].type); |
3761 | if (info_arr[i].off + field_type_size > value_size) { |
3762 | WARN_ONCE(1, "verifier bug off %d size %d" , info_arr[i].off, value_size); |
3763 | ret = -EFAULT; |
3764 | goto end; |
3765 | } |
3766 | if (info_arr[i].off < next_off) { |
3767 | ret = -EEXIST; |
3768 | goto end; |
3769 | } |
3770 | next_off = info_arr[i].off + field_type_size; |
3771 | |
3772 | rec->field_mask |= info_arr[i].type; |
3773 | rec->fields[i].offset = info_arr[i].off; |
3774 | rec->fields[i].type = info_arr[i].type; |
3775 | rec->fields[i].size = field_type_size; |
3776 | |
3777 | switch (info_arr[i].type) { |
3778 | case BPF_SPIN_LOCK: |
3779 | WARN_ON_ONCE(rec->spin_lock_off >= 0); |
3780 | /* Cache offset for faster lookup at runtime */ |
3781 | rec->spin_lock_off = rec->fields[i].offset; |
3782 | break; |
3783 | case BPF_TIMER: |
3784 | WARN_ON_ONCE(rec->timer_off >= 0); |
3785 | /* Cache offset for faster lookup at runtime */ |
3786 | rec->timer_off = rec->fields[i].offset; |
3787 | break; |
3788 | case BPF_REFCOUNT: |
3789 | WARN_ON_ONCE(rec->refcount_off >= 0); |
3790 | /* Cache offset for faster lookup at runtime */ |
3791 | rec->refcount_off = rec->fields[i].offset; |
3792 | break; |
3793 | case BPF_KPTR_UNREF: |
3794 | case BPF_KPTR_REF: |
3795 | case BPF_KPTR_PERCPU: |
3796 | ret = btf_parse_kptr(btf, field: &rec->fields[i], info: &info_arr[i]); |
3797 | if (ret < 0) |
3798 | goto end; |
3799 | break; |
3800 | case BPF_LIST_HEAD: |
3801 | ret = btf_parse_list_head(btf, field: &rec->fields[i], info: &info_arr[i]); |
3802 | if (ret < 0) |
3803 | goto end; |
3804 | break; |
3805 | case BPF_RB_ROOT: |
3806 | ret = btf_parse_rb_root(btf, field: &rec->fields[i], info: &info_arr[i]); |
3807 | if (ret < 0) |
3808 | goto end; |
3809 | break; |
3810 | case BPF_LIST_NODE: |
3811 | case BPF_RB_NODE: |
3812 | break; |
3813 | default: |
3814 | ret = -EFAULT; |
3815 | goto end; |
3816 | } |
3817 | rec->cnt++; |
3818 | } |
3819 | |
3820 | /* bpf_{list_head, rb_node} require bpf_spin_lock */ |
3821 | if ((btf_record_has_field(rec, type: BPF_LIST_HEAD) || |
3822 | btf_record_has_field(rec, type: BPF_RB_ROOT)) && rec->spin_lock_off < 0) { |
3823 | ret = -EINVAL; |
3824 | goto end; |
3825 | } |
3826 | |
3827 | if (rec->refcount_off < 0 && |
3828 | btf_record_has_field(rec, type: BPF_LIST_NODE) && |
3829 | btf_record_has_field(rec, type: BPF_RB_NODE)) { |
3830 | ret = -EINVAL; |
3831 | goto end; |
3832 | } |
3833 | |
3834 | sort_r(base: rec->fields, num: rec->cnt, size: sizeof(struct btf_field), cmp_func: btf_field_cmp, |
3835 | NULL, priv: rec); |
3836 | |
3837 | return rec; |
3838 | end: |
3839 | btf_record_free(rec); |
3840 | return ERR_PTR(error: ret); |
3841 | } |
3842 | |
3843 | #define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT) |
3844 | #define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE) |
3845 | |
3846 | int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) |
3847 | { |
3848 | int i; |
3849 | |
3850 | /* There are three types that signify ownership of some other type: |
3851 | * kptr_ref, bpf_list_head, bpf_rb_root. |
3852 | * kptr_ref only supports storing kernel types, which can't store |
3853 | * references to program allocated local types. |
3854 | * |
3855 | * Hence we only need to ensure that bpf_{list_head,rb_root} ownership |
3856 | * does not form cycles. |
3857 | */ |
3858 | if (IS_ERR_OR_NULL(ptr: rec) || !(rec->field_mask & GRAPH_ROOT_MASK)) |
3859 | return 0; |
3860 | for (i = 0; i < rec->cnt; i++) { |
3861 | struct btf_struct_meta *meta; |
3862 | u32 btf_id; |
3863 | |
3864 | if (!(rec->fields[i].type & GRAPH_ROOT_MASK)) |
3865 | continue; |
3866 | btf_id = rec->fields[i].graph_root.value_btf_id; |
3867 | meta = btf_find_struct_meta(btf, btf_id); |
3868 | if (!meta) |
3869 | return -EFAULT; |
3870 | rec->fields[i].graph_root.value_rec = meta->record; |
3871 | |
3872 | /* We need to set value_rec for all root types, but no need |
3873 | * to check ownership cycle for a type unless it's also a |
3874 | * node type. |
3875 | */ |
3876 | if (!(rec->field_mask & GRAPH_NODE_MASK)) |
3877 | continue; |
3878 | |
3879 | /* We need to ensure ownership acyclicity among all types. The |
3880 | * proper way to do it would be to topologically sort all BTF |
3881 | * IDs based on the ownership edges, since there can be multiple |
3882 | * bpf_{list_head,rb_node} in a type. Instead, we use the |
3883 | * following resaoning: |
3884 | * |
3885 | * - A type can only be owned by another type in user BTF if it |
3886 | * has a bpf_{list,rb}_node. Let's call these node types. |
3887 | * - A type can only _own_ another type in user BTF if it has a |
3888 | * bpf_{list_head,rb_root}. Let's call these root types. |
3889 | * |
3890 | * We ensure that if a type is both a root and node, its |
3891 | * element types cannot be root types. |
3892 | * |
3893 | * To ensure acyclicity: |
3894 | * |
3895 | * When A is an root type but not a node, its ownership |
3896 | * chain can be: |
3897 | * A -> B -> C |
3898 | * Where: |
3899 | * - A is an root, e.g. has bpf_rb_root. |
3900 | * - B is both a root and node, e.g. has bpf_rb_node and |
3901 | * bpf_list_head. |
3902 | * - C is only an root, e.g. has bpf_list_node |
3903 | * |
3904 | * When A is both a root and node, some other type already |
3905 | * owns it in the BTF domain, hence it can not own |
3906 | * another root type through any of the ownership edges. |
3907 | * A -> B |
3908 | * Where: |
3909 | * - A is both an root and node. |
3910 | * - B is only an node. |
3911 | */ |
3912 | if (meta->record->field_mask & GRAPH_ROOT_MASK) |
3913 | return -ELOOP; |
3914 | } |
3915 | return 0; |
3916 | } |
3917 | |
3918 | static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, |
3919 | u32 type_id, void *data, u8 bits_offset, |
3920 | struct btf_show *show) |
3921 | { |
3922 | const struct btf_member *member; |
3923 | void *safe_data; |
3924 | u32 i; |
3925 | |
3926 | safe_data = btf_show_start_struct_type(show, t, type_id, data); |
3927 | if (!safe_data) |
3928 | return; |
3929 | |
3930 | for_each_member(i, t, member) { |
3931 | const struct btf_type *member_type = btf_type_by_id(btf, |
3932 | member->type); |
3933 | const struct btf_kind_operations *ops; |
3934 | u32 member_offset, bitfield_size; |
3935 | u32 bytes_offset; |
3936 | u8 bits8_offset; |
3937 | |
3938 | btf_show_start_member(show, m: member); |
3939 | |
3940 | member_offset = __btf_member_bit_offset(struct_type: t, member); |
3941 | bitfield_size = __btf_member_bitfield_size(struct_type: t, member); |
3942 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); |
3943 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); |
3944 | if (bitfield_size) { |
3945 | safe_data = btf_show_start_type(show, t: member_type, |
3946 | type_id: member->type, |
3947 | data: data + bytes_offset); |
3948 | if (safe_data) |
3949 | btf_bitfield_show(data: safe_data, |
3950 | bits_offset: bits8_offset, |
3951 | nr_bits: bitfield_size, show); |
3952 | btf_show_end_type(show); |
3953 | } else { |
3954 | ops = btf_type_ops(t: member_type); |
3955 | ops->show(btf, member_type, member->type, |
3956 | data + bytes_offset, bits8_offset, show); |
3957 | } |
3958 | |
3959 | btf_show_end_member(show); |
3960 | } |
3961 | |
3962 | btf_show_end_struct_type(show); |
3963 | } |
3964 | |
3965 | static void btf_struct_show(const struct btf *btf, const struct btf_type *t, |
3966 | u32 type_id, void *data, u8 bits_offset, |
3967 | struct btf_show *show) |
3968 | { |
3969 | const struct btf_member *m = show->state.member; |
3970 | |
3971 | /* |
3972 | * First check if any members would be shown (are non-zero). |
3973 | * See comments above "struct btf_show" definition for more |
3974 | * details on how this works at a high-level. |
3975 | */ |
3976 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
3977 | if (!show->state.depth_check) { |
3978 | show->state.depth_check = show->state.depth + 1; |
3979 | show->state.depth_to_show = 0; |
3980 | } |
3981 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
3982 | /* Restore saved member data here */ |
3983 | show->state.member = m; |
3984 | if (show->state.depth_check != show->state.depth + 1) |
3985 | return; |
3986 | show->state.depth_check = 0; |
3987 | |
3988 | if (show->state.depth_to_show <= show->state.depth) |
3989 | return; |
3990 | /* |
3991 | * Reaching here indicates we have recursed and found |
3992 | * non-zero child values. |
3993 | */ |
3994 | } |
3995 | |
3996 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
3997 | } |
3998 | |
3999 | static struct btf_kind_operations struct_ops = { |
4000 | .check_meta = btf_struct_check_meta, |
4001 | .resolve = btf_struct_resolve, |
4002 | .check_member = btf_struct_check_member, |
4003 | .check_kflag_member = btf_generic_check_kflag_member, |
4004 | .log_details = btf_struct_log, |
4005 | .show = btf_struct_show, |
4006 | }; |
4007 | |
4008 | static int btf_enum_check_member(struct btf_verifier_env *env, |
4009 | const struct btf_type *struct_type, |
4010 | const struct btf_member *member, |
4011 | const struct btf_type *member_type) |
4012 | { |
4013 | u32 struct_bits_off = member->offset; |
4014 | u32 struct_size, bytes_offset; |
4015 | |
4016 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4017 | btf_verifier_log_member(env, struct_type, member, |
4018 | fmt: "Member is not byte aligned" ); |
4019 | return -EINVAL; |
4020 | } |
4021 | |
4022 | struct_size = struct_type->size; |
4023 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
4024 | if (struct_size - bytes_offset < member_type->size) { |
4025 | btf_verifier_log_member(env, struct_type, member, |
4026 | fmt: "Member exceeds struct_size" ); |
4027 | return -EINVAL; |
4028 | } |
4029 | |
4030 | return 0; |
4031 | } |
4032 | |
4033 | static int btf_enum_check_kflag_member(struct btf_verifier_env *env, |
4034 | const struct btf_type *struct_type, |
4035 | const struct btf_member *member, |
4036 | const struct btf_type *member_type) |
4037 | { |
4038 | u32 struct_bits_off, nr_bits, bytes_end, struct_size; |
4039 | u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; |
4040 | |
4041 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
4042 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
4043 | if (!nr_bits) { |
4044 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4045 | btf_verifier_log_member(env, struct_type, member, |
4046 | fmt: "Member is not byte aligned" ); |
4047 | return -EINVAL; |
4048 | } |
4049 | |
4050 | nr_bits = int_bitsize; |
4051 | } else if (nr_bits > int_bitsize) { |
4052 | btf_verifier_log_member(env, struct_type, member, |
4053 | fmt: "Invalid member bitfield_size" ); |
4054 | return -EINVAL; |
4055 | } |
4056 | |
4057 | struct_size = struct_type->size; |
4058 | bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); |
4059 | if (struct_size < bytes_end) { |
4060 | btf_verifier_log_member(env, struct_type, member, |
4061 | fmt: "Member exceeds struct_size" ); |
4062 | return -EINVAL; |
4063 | } |
4064 | |
4065 | return 0; |
4066 | } |
4067 | |
4068 | static s32 btf_enum_check_meta(struct btf_verifier_env *env, |
4069 | const struct btf_type *t, |
4070 | u32 meta_left) |
4071 | { |
4072 | const struct btf_enum *enums = btf_type_enum(t); |
4073 | struct btf *btf = env->btf; |
4074 | const char *fmt_str; |
4075 | u16 i, nr_enums; |
4076 | u32 meta_needed; |
4077 | |
4078 | nr_enums = btf_type_vlen(t); |
4079 | meta_needed = nr_enums * sizeof(*enums); |
4080 | |
4081 | if (meta_left < meta_needed) { |
4082 | btf_verifier_log_basic(env, t, |
4083 | "meta_left:%u meta_needed:%u" , |
4084 | meta_left, meta_needed); |
4085 | return -EINVAL; |
4086 | } |
4087 | |
4088 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4089 | btf_verifier_log_type(env, t, "Unexpected size" ); |
4090 | return -EINVAL; |
4091 | } |
4092 | |
4093 | /* enum type either no name or a valid one */ |
4094 | if (t->name_off && |
4095 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4096 | btf_verifier_log_type(env, t, "Invalid name" ); |
4097 | return -EINVAL; |
4098 | } |
4099 | |
4100 | btf_verifier_log_type(env, t, NULL); |
4101 | |
4102 | for (i = 0; i < nr_enums; i++) { |
4103 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4104 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u" , |
4105 | enums[i].name_off); |
4106 | return -EINVAL; |
4107 | } |
4108 | |
4109 | /* enum member must have a valid name */ |
4110 | if (!enums[i].name_off || |
4111 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4112 | btf_verifier_log_type(env, t, "Invalid name" ); |
4113 | return -EINVAL; |
4114 | } |
4115 | |
4116 | if (env->log.level == BPF_LOG_KERNEL) |
4117 | continue; |
4118 | fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n" ; |
4119 | btf_verifier_log(env, fmt: fmt_str, |
4120 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4121 | enums[i].val); |
4122 | } |
4123 | |
4124 | return meta_needed; |
4125 | } |
4126 | |
4127 | static void btf_enum_log(struct btf_verifier_env *env, |
4128 | const struct btf_type *t) |
4129 | { |
4130 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
4131 | } |
4132 | |
4133 | static void btf_enum_show(const struct btf *btf, const struct btf_type *t, |
4134 | u32 type_id, void *data, u8 bits_offset, |
4135 | struct btf_show *show) |
4136 | { |
4137 | const struct btf_enum *enums = btf_type_enum(t); |
4138 | u32 i, nr_enums = btf_type_vlen(t); |
4139 | void *safe_data; |
4140 | int v; |
4141 | |
4142 | safe_data = btf_show_start_type(show, t, type_id, data); |
4143 | if (!safe_data) |
4144 | return; |
4145 | |
4146 | v = *(int *)safe_data; |
4147 | |
4148 | for (i = 0; i < nr_enums; i++) { |
4149 | if (v != enums[i].val) |
4150 | continue; |
4151 | |
4152 | btf_show_type_value(show, "%s" , |
4153 | __btf_name_by_offset(btf, |
4154 | enums[i].name_off)); |
4155 | |
4156 | btf_show_end_type(show); |
4157 | return; |
4158 | } |
4159 | |
4160 | if (btf_type_kflag(t)) |
4161 | btf_show_type_value(show, "%d" , v); |
4162 | else |
4163 | btf_show_type_value(show, "%u" , v); |
4164 | btf_show_end_type(show); |
4165 | } |
4166 | |
4167 | static struct btf_kind_operations enum_ops = { |
4168 | .check_meta = btf_enum_check_meta, |
4169 | .resolve = btf_df_resolve, |
4170 | .check_member = btf_enum_check_member, |
4171 | .check_kflag_member = btf_enum_check_kflag_member, |
4172 | .log_details = btf_enum_log, |
4173 | .show = btf_enum_show, |
4174 | }; |
4175 | |
4176 | static s32 btf_enum64_check_meta(struct btf_verifier_env *env, |
4177 | const struct btf_type *t, |
4178 | u32 meta_left) |
4179 | { |
4180 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4181 | struct btf *btf = env->btf; |
4182 | const char *fmt_str; |
4183 | u16 i, nr_enums; |
4184 | u32 meta_needed; |
4185 | |
4186 | nr_enums = btf_type_vlen(t); |
4187 | meta_needed = nr_enums * sizeof(*enums); |
4188 | |
4189 | if (meta_left < meta_needed) { |
4190 | btf_verifier_log_basic(env, t, |
4191 | "meta_left:%u meta_needed:%u" , |
4192 | meta_left, meta_needed); |
4193 | return -EINVAL; |
4194 | } |
4195 | |
4196 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4197 | btf_verifier_log_type(env, t, "Unexpected size" ); |
4198 | return -EINVAL; |
4199 | } |
4200 | |
4201 | /* enum type either no name or a valid one */ |
4202 | if (t->name_off && |
4203 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4204 | btf_verifier_log_type(env, t, "Invalid name" ); |
4205 | return -EINVAL; |
4206 | } |
4207 | |
4208 | btf_verifier_log_type(env, t, NULL); |
4209 | |
4210 | for (i = 0; i < nr_enums; i++) { |
4211 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4212 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u" , |
4213 | enums[i].name_off); |
4214 | return -EINVAL; |
4215 | } |
4216 | |
4217 | /* enum member must have a valid name */ |
4218 | if (!enums[i].name_off || |
4219 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4220 | btf_verifier_log_type(env, t, "Invalid name" ); |
4221 | return -EINVAL; |
4222 | } |
4223 | |
4224 | if (env->log.level == BPF_LOG_KERNEL) |
4225 | continue; |
4226 | |
4227 | fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n" ; |
4228 | btf_verifier_log(env, fmt: fmt_str, |
4229 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4230 | btf_enum64_value(e: enums + i)); |
4231 | } |
4232 | |
4233 | return meta_needed; |
4234 | } |
4235 | |
4236 | static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, |
4237 | u32 type_id, void *data, u8 bits_offset, |
4238 | struct btf_show *show) |
4239 | { |
4240 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4241 | u32 i, nr_enums = btf_type_vlen(t); |
4242 | void *safe_data; |
4243 | s64 v; |
4244 | |
4245 | safe_data = btf_show_start_type(show, t, type_id, data); |
4246 | if (!safe_data) |
4247 | return; |
4248 | |
4249 | v = *(u64 *)safe_data; |
4250 | |
4251 | for (i = 0; i < nr_enums; i++) { |
4252 | if (v != btf_enum64_value(e: enums + i)) |
4253 | continue; |
4254 | |
4255 | btf_show_type_value(show, "%s" , |
4256 | __btf_name_by_offset(btf, |
4257 | enums[i].name_off)); |
4258 | |
4259 | btf_show_end_type(show); |
4260 | return; |
4261 | } |
4262 | |
4263 | if (btf_type_kflag(t)) |
4264 | btf_show_type_value(show, "%lld" , v); |
4265 | else |
4266 | btf_show_type_value(show, "%llu" , v); |
4267 | btf_show_end_type(show); |
4268 | } |
4269 | |
4270 | static struct btf_kind_operations enum64_ops = { |
4271 | .check_meta = btf_enum64_check_meta, |
4272 | .resolve = btf_df_resolve, |
4273 | .check_member = btf_enum_check_member, |
4274 | .check_kflag_member = btf_enum_check_kflag_member, |
4275 | .log_details = btf_enum_log, |
4276 | .show = btf_enum64_show, |
4277 | }; |
4278 | |
4279 | static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, |
4280 | const struct btf_type *t, |
4281 | u32 meta_left) |
4282 | { |
4283 | u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); |
4284 | |
4285 | if (meta_left < meta_needed) { |
4286 | btf_verifier_log_basic(env, t, |
4287 | "meta_left:%u meta_needed:%u" , |
4288 | meta_left, meta_needed); |
4289 | return -EINVAL; |
4290 | } |
4291 | |
4292 | if (t->name_off) { |
4293 | btf_verifier_log_type(env, t, "Invalid name" ); |
4294 | return -EINVAL; |
4295 | } |
4296 | |
4297 | if (btf_type_kflag(t)) { |
4298 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4299 | return -EINVAL; |
4300 | } |
4301 | |
4302 | btf_verifier_log_type(env, t, NULL); |
4303 | |
4304 | return meta_needed; |
4305 | } |
4306 | |
4307 | static void btf_func_proto_log(struct btf_verifier_env *env, |
4308 | const struct btf_type *t) |
4309 | { |
4310 | const struct btf_param *args = (const struct btf_param *)(t + 1); |
4311 | u16 nr_args = btf_type_vlen(t), i; |
4312 | |
4313 | btf_verifier_log(env, fmt: "return=%u args=(" , t->type); |
4314 | if (!nr_args) { |
4315 | btf_verifier_log(env, fmt: "void" ); |
4316 | goto done; |
4317 | } |
4318 | |
4319 | if (nr_args == 1 && !args[0].type) { |
4320 | /* Only one vararg */ |
4321 | btf_verifier_log(env, fmt: "vararg" ); |
4322 | goto done; |
4323 | } |
4324 | |
4325 | btf_verifier_log(env, fmt: "%u %s" , args[0].type, |
4326 | __btf_name_by_offset(btf: env->btf, |
4327 | offset: args[0].name_off)); |
4328 | for (i = 1; i < nr_args - 1; i++) |
4329 | btf_verifier_log(env, fmt: ", %u %s" , args[i].type, |
4330 | __btf_name_by_offset(btf: env->btf, |
4331 | offset: args[i].name_off)); |
4332 | |
4333 | if (nr_args > 1) { |
4334 | const struct btf_param *last_arg = &args[nr_args - 1]; |
4335 | |
4336 | if (last_arg->type) |
4337 | btf_verifier_log(env, fmt: ", %u %s" , last_arg->type, |
4338 | __btf_name_by_offset(btf: env->btf, |
4339 | offset: last_arg->name_off)); |
4340 | else |
4341 | btf_verifier_log(env, fmt: ", vararg" ); |
4342 | } |
4343 | |
4344 | done: |
4345 | btf_verifier_log(env, fmt: ")" ); |
4346 | } |
4347 | |
4348 | static struct btf_kind_operations func_proto_ops = { |
4349 | .check_meta = btf_func_proto_check_meta, |
4350 | .resolve = btf_df_resolve, |
4351 | /* |
4352 | * BTF_KIND_FUNC_PROTO cannot be directly referred by |
4353 | * a struct's member. |
4354 | * |
4355 | * It should be a function pointer instead. |
4356 | * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) |
4357 | * |
4358 | * Hence, there is no btf_func_check_member(). |
4359 | */ |
4360 | .check_member = btf_df_check_member, |
4361 | .check_kflag_member = btf_df_check_kflag_member, |
4362 | .log_details = btf_func_proto_log, |
4363 | .show = btf_df_show, |
4364 | }; |
4365 | |
4366 | static s32 btf_func_check_meta(struct btf_verifier_env *env, |
4367 | const struct btf_type *t, |
4368 | u32 meta_left) |
4369 | { |
4370 | if (!t->name_off || |
4371 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4372 | btf_verifier_log_type(env, t, "Invalid name" ); |
4373 | return -EINVAL; |
4374 | } |
4375 | |
4376 | if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { |
4377 | btf_verifier_log_type(env, t, "Invalid func linkage" ); |
4378 | return -EINVAL; |
4379 | } |
4380 | |
4381 | if (btf_type_kflag(t)) { |
4382 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4383 | return -EINVAL; |
4384 | } |
4385 | |
4386 | btf_verifier_log_type(env, t, NULL); |
4387 | |
4388 | return 0; |
4389 | } |
4390 | |
4391 | static int btf_func_resolve(struct btf_verifier_env *env, |
4392 | const struct resolve_vertex *v) |
4393 | { |
4394 | const struct btf_type *t = v->t; |
4395 | u32 next_type_id = t->type; |
4396 | int err; |
4397 | |
4398 | err = btf_func_check(env, t); |
4399 | if (err) |
4400 | return err; |
4401 | |
4402 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
4403 | return 0; |
4404 | } |
4405 | |
4406 | static struct btf_kind_operations func_ops = { |
4407 | .check_meta = btf_func_check_meta, |
4408 | .resolve = btf_func_resolve, |
4409 | .check_member = btf_df_check_member, |
4410 | .check_kflag_member = btf_df_check_kflag_member, |
4411 | .log_details = btf_ref_type_log, |
4412 | .show = btf_df_show, |
4413 | }; |
4414 | |
4415 | static s32 btf_var_check_meta(struct btf_verifier_env *env, |
4416 | const struct btf_type *t, |
4417 | u32 meta_left) |
4418 | { |
4419 | const struct btf_var *var; |
4420 | u32 meta_needed = sizeof(*var); |
4421 | |
4422 | if (meta_left < meta_needed) { |
4423 | btf_verifier_log_basic(env, t, |
4424 | "meta_left:%u meta_needed:%u" , |
4425 | meta_left, meta_needed); |
4426 | return -EINVAL; |
4427 | } |
4428 | |
4429 | if (btf_type_vlen(t)) { |
4430 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4431 | return -EINVAL; |
4432 | } |
4433 | |
4434 | if (btf_type_kflag(t)) { |
4435 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4436 | return -EINVAL; |
4437 | } |
4438 | |
4439 | if (!t->name_off || |
4440 | !__btf_name_valid(btf: env->btf, offset: t->name_off)) { |
4441 | btf_verifier_log_type(env, t, "Invalid name" ); |
4442 | return -EINVAL; |
4443 | } |
4444 | |
4445 | /* A var cannot be in type void */ |
4446 | if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { |
4447 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
4448 | return -EINVAL; |
4449 | } |
4450 | |
4451 | var = btf_type_var(t); |
4452 | if (var->linkage != BTF_VAR_STATIC && |
4453 | var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
4454 | btf_verifier_log_type(env, t, "Linkage not supported" ); |
4455 | return -EINVAL; |
4456 | } |
4457 | |
4458 | btf_verifier_log_type(env, t, NULL); |
4459 | |
4460 | return meta_needed; |
4461 | } |
4462 | |
4463 | static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) |
4464 | { |
4465 | const struct btf_var *var = btf_type_var(t); |
4466 | |
4467 | btf_verifier_log(env, fmt: "type_id=%u linkage=%u" , t->type, var->linkage); |
4468 | } |
4469 | |
4470 | static const struct btf_kind_operations var_ops = { |
4471 | .check_meta = btf_var_check_meta, |
4472 | .resolve = btf_var_resolve, |
4473 | .check_member = btf_df_check_member, |
4474 | .check_kflag_member = btf_df_check_kflag_member, |
4475 | .log_details = btf_var_log, |
4476 | .show = btf_var_show, |
4477 | }; |
4478 | |
4479 | static s32 btf_datasec_check_meta(struct btf_verifier_env *env, |
4480 | const struct btf_type *t, |
4481 | u32 meta_left) |
4482 | { |
4483 | const struct btf_var_secinfo *vsi; |
4484 | u64 last_vsi_end_off = 0, sum = 0; |
4485 | u32 i, meta_needed; |
4486 | |
4487 | meta_needed = btf_type_vlen(t) * sizeof(*vsi); |
4488 | if (meta_left < meta_needed) { |
4489 | btf_verifier_log_basic(env, t, |
4490 | "meta_left:%u meta_needed:%u" , |
4491 | meta_left, meta_needed); |
4492 | return -EINVAL; |
4493 | } |
4494 | |
4495 | if (!t->size) { |
4496 | btf_verifier_log_type(env, t, "size == 0" ); |
4497 | return -EINVAL; |
4498 | } |
4499 | |
4500 | if (btf_type_kflag(t)) { |
4501 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4502 | return -EINVAL; |
4503 | } |
4504 | |
4505 | if (!t->name_off || |
4506 | !btf_name_valid_section(btf: env->btf, offset: t->name_off)) { |
4507 | btf_verifier_log_type(env, t, "Invalid name" ); |
4508 | return -EINVAL; |
4509 | } |
4510 | |
4511 | btf_verifier_log_type(env, t, NULL); |
4512 | |
4513 | for_each_vsi(i, t, vsi) { |
4514 | /* A var cannot be in type void */ |
4515 | if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { |
4516 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4517 | fmt: "Invalid type_id" ); |
4518 | return -EINVAL; |
4519 | } |
4520 | |
4521 | if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { |
4522 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4523 | fmt: "Invalid offset" ); |
4524 | return -EINVAL; |
4525 | } |
4526 | |
4527 | if (!vsi->size || vsi->size > t->size) { |
4528 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4529 | fmt: "Invalid size" ); |
4530 | return -EINVAL; |
4531 | } |
4532 | |
4533 | last_vsi_end_off = vsi->offset + vsi->size; |
4534 | if (last_vsi_end_off > t->size) { |
4535 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4536 | fmt: "Invalid offset+size" ); |
4537 | return -EINVAL; |
4538 | } |
4539 | |
4540 | btf_verifier_log_vsi(env, datasec_type: t, vsi, NULL); |
4541 | sum += vsi->size; |
4542 | } |
4543 | |
4544 | if (t->size < sum) { |
4545 | btf_verifier_log_type(env, t, "Invalid btf_info size" ); |
4546 | return -EINVAL; |
4547 | } |
4548 | |
4549 | return meta_needed; |
4550 | } |
4551 | |
4552 | static int btf_datasec_resolve(struct btf_verifier_env *env, |
4553 | const struct resolve_vertex *v) |
4554 | { |
4555 | const struct btf_var_secinfo *vsi; |
4556 | struct btf *btf = env->btf; |
4557 | u16 i; |
4558 | |
4559 | env->resolve_mode = RESOLVE_TBD; |
4560 | for_each_vsi_from(i, v->next_member, v->t, vsi) { |
4561 | u32 var_type_id = vsi->type, type_id, type_size = 0; |
4562 | const struct btf_type *var_type = btf_type_by_id(env->btf, |
4563 | var_type_id); |
4564 | if (!var_type || !btf_type_is_var(t: var_type)) { |
4565 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, |
4566 | fmt: "Not a VAR kind member" ); |
4567 | return -EINVAL; |
4568 | } |
4569 | |
4570 | if (!env_type_is_resolve_sink(env, next_type: var_type) && |
4571 | !env_type_is_resolved(env, type_id: var_type_id)) { |
4572 | env_stack_set_next_member(env, next_member: i + 1); |
4573 | return env_stack_push(env, t: var_type, type_id: var_type_id); |
4574 | } |
4575 | |
4576 | type_id = var_type->type; |
4577 | if (!btf_type_id_size(btf, type_id: &type_id, ret_size: &type_size)) { |
4578 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid type" ); |
4579 | return -EINVAL; |
4580 | } |
4581 | |
4582 | if (vsi->size < type_size) { |
4583 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid size" ); |
4584 | return -EINVAL; |
4585 | } |
4586 | } |
4587 | |
4588 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
4589 | return 0; |
4590 | } |
4591 | |
4592 | static void btf_datasec_log(struct btf_verifier_env *env, |
4593 | const struct btf_type *t) |
4594 | { |
4595 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
4596 | } |
4597 | |
4598 | static void btf_datasec_show(const struct btf *btf, |
4599 | const struct btf_type *t, u32 type_id, |
4600 | void *data, u8 bits_offset, |
4601 | struct btf_show *show) |
4602 | { |
4603 | const struct btf_var_secinfo *vsi; |
4604 | const struct btf_type *var; |
4605 | u32 i; |
4606 | |
4607 | if (!btf_show_start_type(show, t, type_id, data)) |
4608 | return; |
4609 | |
4610 | btf_show_type_value(show, "section (\"%s\") = {" , |
4611 | __btf_name_by_offset(btf, t->name_off)); |
4612 | for_each_vsi(i, t, vsi) { |
4613 | var = btf_type_by_id(btf, vsi->type); |
4614 | if (i) |
4615 | btf_show(show, fmt: "," ); |
4616 | btf_type_ops(t: var)->show(btf, var, vsi->type, |
4617 | data + vsi->offset, bits_offset, show); |
4618 | } |
4619 | btf_show_end_type(show); |
4620 | } |
4621 | |
4622 | static const struct btf_kind_operations datasec_ops = { |
4623 | .check_meta = btf_datasec_check_meta, |
4624 | .resolve = btf_datasec_resolve, |
4625 | .check_member = btf_df_check_member, |
4626 | .check_kflag_member = btf_df_check_kflag_member, |
4627 | .log_details = btf_datasec_log, |
4628 | .show = btf_datasec_show, |
4629 | }; |
4630 | |
4631 | static s32 btf_float_check_meta(struct btf_verifier_env *env, |
4632 | const struct btf_type *t, |
4633 | u32 meta_left) |
4634 | { |
4635 | if (btf_type_vlen(t)) { |
4636 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4637 | return -EINVAL; |
4638 | } |
4639 | |
4640 | if (btf_type_kflag(t)) { |
4641 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4642 | return -EINVAL; |
4643 | } |
4644 | |
4645 | if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && |
4646 | t->size != 16) { |
4647 | btf_verifier_log_type(env, t, "Invalid type_size" ); |
4648 | return -EINVAL; |
4649 | } |
4650 | |
4651 | btf_verifier_log_type(env, t, NULL); |
4652 | |
4653 | return 0; |
4654 | } |
4655 | |
4656 | static int btf_float_check_member(struct btf_verifier_env *env, |
4657 | const struct btf_type *struct_type, |
4658 | const struct btf_member *member, |
4659 | const struct btf_type *member_type) |
4660 | { |
4661 | u64 start_offset_bytes; |
4662 | u64 end_offset_bytes; |
4663 | u64 misalign_bits; |
4664 | u64 align_bytes; |
4665 | u64 align_bits; |
4666 | |
4667 | /* Different architectures have different alignment requirements, so |
4668 | * here we check only for the reasonable minimum. This way we ensure |
4669 | * that types after CO-RE can pass the kernel BTF verifier. |
4670 | */ |
4671 | align_bytes = min_t(u64, sizeof(void *), member_type->size); |
4672 | align_bits = align_bytes * BITS_PER_BYTE; |
4673 | div64_u64_rem(dividend: member->offset, divisor: align_bits, remainder: &misalign_bits); |
4674 | if (misalign_bits) { |
4675 | btf_verifier_log_member(env, struct_type, member, |
4676 | fmt: "Member is not properly aligned" ); |
4677 | return -EINVAL; |
4678 | } |
4679 | |
4680 | start_offset_bytes = member->offset / BITS_PER_BYTE; |
4681 | end_offset_bytes = start_offset_bytes + member_type->size; |
4682 | if (end_offset_bytes > struct_type->size) { |
4683 | btf_verifier_log_member(env, struct_type, member, |
4684 | fmt: "Member exceeds struct_size" ); |
4685 | return -EINVAL; |
4686 | } |
4687 | |
4688 | return 0; |
4689 | } |
4690 | |
4691 | static void btf_float_log(struct btf_verifier_env *env, |
4692 | const struct btf_type *t) |
4693 | { |
4694 | btf_verifier_log(env, fmt: "size=%u" , t->size); |
4695 | } |
4696 | |
4697 | static const struct btf_kind_operations float_ops = { |
4698 | .check_meta = btf_float_check_meta, |
4699 | .resolve = btf_df_resolve, |
4700 | .check_member = btf_float_check_member, |
4701 | .check_kflag_member = btf_generic_check_kflag_member, |
4702 | .log_details = btf_float_log, |
4703 | .show = btf_df_show, |
4704 | }; |
4705 | |
4706 | static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, |
4707 | const struct btf_type *t, |
4708 | u32 meta_left) |
4709 | { |
4710 | const struct btf_decl_tag *tag; |
4711 | u32 meta_needed = sizeof(*tag); |
4712 | s32 component_idx; |
4713 | const char *value; |
4714 | |
4715 | if (meta_left < meta_needed) { |
4716 | btf_verifier_log_basic(env, t, |
4717 | "meta_left:%u meta_needed:%u" , |
4718 | meta_left, meta_needed); |
4719 | return -EINVAL; |
4720 | } |
4721 | |
4722 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
4723 | if (!value || !value[0]) { |
4724 | btf_verifier_log_type(env, t, "Invalid value" ); |
4725 | return -EINVAL; |
4726 | } |
4727 | |
4728 | if (btf_type_vlen(t)) { |
4729 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4730 | return -EINVAL; |
4731 | } |
4732 | |
4733 | if (btf_type_kflag(t)) { |
4734 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4735 | return -EINVAL; |
4736 | } |
4737 | |
4738 | component_idx = btf_type_decl_tag(t)->component_idx; |
4739 | if (component_idx < -1) { |
4740 | btf_verifier_log_type(env, t, "Invalid component_idx" ); |
4741 | return -EINVAL; |
4742 | } |
4743 | |
4744 | btf_verifier_log_type(env, t, NULL); |
4745 | |
4746 | return meta_needed; |
4747 | } |
4748 | |
4749 | static int btf_decl_tag_resolve(struct btf_verifier_env *env, |
4750 | const struct resolve_vertex *v) |
4751 | { |
4752 | const struct btf_type *next_type; |
4753 | const struct btf_type *t = v->t; |
4754 | u32 next_type_id = t->type; |
4755 | struct btf *btf = env->btf; |
4756 | s32 component_idx; |
4757 | u32 vlen; |
4758 | |
4759 | next_type = btf_type_by_id(btf, next_type_id); |
4760 | if (!next_type || !btf_type_is_decl_tag_target(t: next_type)) { |
4761 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
4762 | return -EINVAL; |
4763 | } |
4764 | |
4765 | if (!env_type_is_resolve_sink(env, next_type) && |
4766 | !env_type_is_resolved(env, type_id: next_type_id)) |
4767 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
4768 | |
4769 | component_idx = btf_type_decl_tag(t)->component_idx; |
4770 | if (component_idx != -1) { |
4771 | if (btf_type_is_var(t: next_type) || btf_type_is_typedef(t: next_type)) { |
4772 | btf_verifier_log_type(env, v->t, "Invalid component_idx" ); |
4773 | return -EINVAL; |
4774 | } |
4775 | |
4776 | if (btf_type_is_struct(t: next_type)) { |
4777 | vlen = btf_type_vlen(t: next_type); |
4778 | } else { |
4779 | /* next_type should be a function */ |
4780 | next_type = btf_type_by_id(btf, next_type->type); |
4781 | vlen = btf_type_vlen(t: next_type); |
4782 | } |
4783 | |
4784 | if ((u32)component_idx >= vlen) { |
4785 | btf_verifier_log_type(env, v->t, "Invalid component_idx" ); |
4786 | return -EINVAL; |
4787 | } |
4788 | } |
4789 | |
4790 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
4791 | |
4792 | return 0; |
4793 | } |
4794 | |
4795 | static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) |
4796 | { |
4797 | btf_verifier_log(env, fmt: "type=%u component_idx=%d" , t->type, |
4798 | btf_type_decl_tag(t)->component_idx); |
4799 | } |
4800 | |
4801 | static const struct btf_kind_operations decl_tag_ops = { |
4802 | .check_meta = btf_decl_tag_check_meta, |
4803 | .resolve = btf_decl_tag_resolve, |
4804 | .check_member = btf_df_check_member, |
4805 | .check_kflag_member = btf_df_check_kflag_member, |
4806 | .log_details = btf_decl_tag_log, |
4807 | .show = btf_df_show, |
4808 | }; |
4809 | |
4810 | static int btf_func_proto_check(struct btf_verifier_env *env, |
4811 | const struct btf_type *t) |
4812 | { |
4813 | const struct btf_type *ret_type; |
4814 | const struct btf_param *args; |
4815 | const struct btf *btf; |
4816 | u16 nr_args, i; |
4817 | int err; |
4818 | |
4819 | btf = env->btf; |
4820 | args = (const struct btf_param *)(t + 1); |
4821 | nr_args = btf_type_vlen(t); |
4822 | |
4823 | /* Check func return type which could be "void" (t->type == 0) */ |
4824 | if (t->type) { |
4825 | u32 ret_type_id = t->type; |
4826 | |
4827 | ret_type = btf_type_by_id(btf, ret_type_id); |
4828 | if (!ret_type) { |
4829 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4830 | return -EINVAL; |
4831 | } |
4832 | |
4833 | if (btf_type_is_resolve_source_only(t: ret_type)) { |
4834 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4835 | return -EINVAL; |
4836 | } |
4837 | |
4838 | if (btf_type_needs_resolve(t: ret_type) && |
4839 | !env_type_is_resolved(env, type_id: ret_type_id)) { |
4840 | err = btf_resolve(env, t: ret_type, type_id: ret_type_id); |
4841 | if (err) |
4842 | return err; |
4843 | } |
4844 | |
4845 | /* Ensure the return type is a type that has a size */ |
4846 | if (!btf_type_id_size(btf, type_id: &ret_type_id, NULL)) { |
4847 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4848 | return -EINVAL; |
4849 | } |
4850 | } |
4851 | |
4852 | if (!nr_args) |
4853 | return 0; |
4854 | |
4855 | /* Last func arg type_id could be 0 if it is a vararg */ |
4856 | if (!args[nr_args - 1].type) { |
4857 | if (args[nr_args - 1].name_off) { |
4858 | btf_verifier_log_type(env, t, "Invalid arg#%u" , |
4859 | nr_args); |
4860 | return -EINVAL; |
4861 | } |
4862 | nr_args--; |
4863 | } |
4864 | |
4865 | for (i = 0; i < nr_args; i++) { |
4866 | const struct btf_type *arg_type; |
4867 | u32 arg_type_id; |
4868 | |
4869 | arg_type_id = args[i].type; |
4870 | arg_type = btf_type_by_id(btf, arg_type_id); |
4871 | if (!arg_type) { |
4872 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4873 | return -EINVAL; |
4874 | } |
4875 | |
4876 | if (btf_type_is_resolve_source_only(t: arg_type)) { |
4877 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4878 | return -EINVAL; |
4879 | } |
4880 | |
4881 | if (args[i].name_off && |
4882 | (!btf_name_offset_valid(btf, offset: args[i].name_off) || |
4883 | !btf_name_valid_identifier(btf, offset: args[i].name_off))) { |
4884 | btf_verifier_log_type(env, t, |
4885 | "Invalid arg#%u" , i + 1); |
4886 | return -EINVAL; |
4887 | } |
4888 | |
4889 | if (btf_type_needs_resolve(t: arg_type) && |
4890 | !env_type_is_resolved(env, type_id: arg_type_id)) { |
4891 | err = btf_resolve(env, t: arg_type, type_id: arg_type_id); |
4892 | if (err) |
4893 | return err; |
4894 | } |
4895 | |
4896 | if (!btf_type_id_size(btf, type_id: &arg_type_id, NULL)) { |
4897 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4898 | return -EINVAL; |
4899 | } |
4900 | } |
4901 | |
4902 | return 0; |
4903 | } |
4904 | |
4905 | static int btf_func_check(struct btf_verifier_env *env, |
4906 | const struct btf_type *t) |
4907 | { |
4908 | const struct btf_type *proto_type; |
4909 | const struct btf_param *args; |
4910 | const struct btf *btf; |
4911 | u16 nr_args, i; |
4912 | |
4913 | btf = env->btf; |
4914 | proto_type = btf_type_by_id(btf, t->type); |
4915 | |
4916 | if (!proto_type || !btf_type_is_func_proto(t: proto_type)) { |
4917 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
4918 | return -EINVAL; |
4919 | } |
4920 | |
4921 | args = (const struct btf_param *)(proto_type + 1); |
4922 | nr_args = btf_type_vlen(t: proto_type); |
4923 | for (i = 0; i < nr_args; i++) { |
4924 | if (!args[i].name_off && args[i].type) { |
4925 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4926 | return -EINVAL; |
4927 | } |
4928 | } |
4929 | |
4930 | return 0; |
4931 | } |
4932 | |
4933 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { |
4934 | [BTF_KIND_INT] = &int_ops, |
4935 | [BTF_KIND_PTR] = &ptr_ops, |
4936 | [BTF_KIND_ARRAY] = &array_ops, |
4937 | [BTF_KIND_STRUCT] = &struct_ops, |
4938 | [BTF_KIND_UNION] = &struct_ops, |
4939 | [BTF_KIND_ENUM] = &enum_ops, |
4940 | [BTF_KIND_FWD] = &fwd_ops, |
4941 | [BTF_KIND_TYPEDEF] = &modifier_ops, |
4942 | [BTF_KIND_VOLATILE] = &modifier_ops, |
4943 | [BTF_KIND_CONST] = &modifier_ops, |
4944 | [BTF_KIND_RESTRICT] = &modifier_ops, |
4945 | [BTF_KIND_FUNC] = &func_ops, |
4946 | [BTF_KIND_FUNC_PROTO] = &func_proto_ops, |
4947 | [BTF_KIND_VAR] = &var_ops, |
4948 | [BTF_KIND_DATASEC] = &datasec_ops, |
4949 | [BTF_KIND_FLOAT] = &float_ops, |
4950 | [BTF_KIND_DECL_TAG] = &decl_tag_ops, |
4951 | [BTF_KIND_TYPE_TAG] = &modifier_ops, |
4952 | [BTF_KIND_ENUM64] = &enum64_ops, |
4953 | }; |
4954 | |
4955 | static s32 btf_check_meta(struct btf_verifier_env *env, |
4956 | const struct btf_type *t, |
4957 | u32 meta_left) |
4958 | { |
4959 | u32 saved_meta_left = meta_left; |
4960 | s32 var_meta_size; |
4961 | |
4962 | if (meta_left < sizeof(*t)) { |
4963 | btf_verifier_log(env, fmt: "[%u] meta_left:%u meta_needed:%zu" , |
4964 | env->log_type_id, meta_left, sizeof(*t)); |
4965 | return -EINVAL; |
4966 | } |
4967 | meta_left -= sizeof(*t); |
4968 | |
4969 | if (t->info & ~BTF_INFO_MASK) { |
4970 | btf_verifier_log(env, fmt: "[%u] Invalid btf_info:%x" , |
4971 | env->log_type_id, t->info); |
4972 | return -EINVAL; |
4973 | } |
4974 | |
4975 | if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || |
4976 | BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { |
4977 | btf_verifier_log(env, fmt: "[%u] Invalid kind:%u" , |
4978 | env->log_type_id, BTF_INFO_KIND(t->info)); |
4979 | return -EINVAL; |
4980 | } |
4981 | |
4982 | if (!btf_name_offset_valid(btf: env->btf, offset: t->name_off)) { |
4983 | btf_verifier_log(env, fmt: "[%u] Invalid name_offset:%u" , |
4984 | env->log_type_id, t->name_off); |
4985 | return -EINVAL; |
4986 | } |
4987 | |
4988 | var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); |
4989 | if (var_meta_size < 0) |
4990 | return var_meta_size; |
4991 | |
4992 | meta_left -= var_meta_size; |
4993 | |
4994 | return saved_meta_left - meta_left; |
4995 | } |
4996 | |
4997 | static int btf_check_all_metas(struct btf_verifier_env *env) |
4998 | { |
4999 | struct btf *btf = env->btf; |
5000 | struct btf_header *hdr; |
5001 | void *cur, *end; |
5002 | |
5003 | hdr = &btf->hdr; |
5004 | cur = btf->nohdr_data + hdr->type_off; |
5005 | end = cur + hdr->type_len; |
5006 | |
5007 | env->log_type_id = btf->base_btf ? btf->start_id : 1; |
5008 | while (cur < end) { |
5009 | struct btf_type *t = cur; |
5010 | s32 meta_size; |
5011 | |
5012 | meta_size = btf_check_meta(env, t, meta_left: end - cur); |
5013 | if (meta_size < 0) |
5014 | return meta_size; |
5015 | |
5016 | btf_add_type(env, t); |
5017 | cur += meta_size; |
5018 | env->log_type_id++; |
5019 | } |
5020 | |
5021 | return 0; |
5022 | } |
5023 | |
5024 | static bool btf_resolve_valid(struct btf_verifier_env *env, |
5025 | const struct btf_type *t, |
5026 | u32 type_id) |
5027 | { |
5028 | struct btf *btf = env->btf; |
5029 | |
5030 | if (!env_type_is_resolved(env, type_id)) |
5031 | return false; |
5032 | |
5033 | if (btf_type_is_struct(t) || btf_type_is_datasec(t)) |
5034 | return !btf_resolved_type_id(btf, type_id) && |
5035 | !btf_resolved_type_size(btf, type_id); |
5036 | |
5037 | if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) |
5038 | return btf_resolved_type_id(btf, type_id) && |
5039 | !btf_resolved_type_size(btf, type_id); |
5040 | |
5041 | if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || |
5042 | btf_type_is_var(t)) { |
5043 | t = btf_type_id_resolve(btf, type_id: &type_id); |
5044 | return t && |
5045 | !btf_type_is_modifier(t) && |
5046 | !btf_type_is_var(t) && |
5047 | !btf_type_is_datasec(t); |
5048 | } |
5049 | |
5050 | if (btf_type_is_array(t)) { |
5051 | const struct btf_array *array = btf_type_array(t); |
5052 | const struct btf_type *elem_type; |
5053 | u32 elem_type_id = array->type; |
5054 | u32 elem_size; |
5055 | |
5056 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
5057 | return elem_type && !btf_type_is_modifier(t: elem_type) && |
5058 | (array->nelems * elem_size == |
5059 | btf_resolved_type_size(btf, type_id)); |
5060 | } |
5061 | |
5062 | return false; |
5063 | } |
5064 | |
5065 | static int btf_resolve(struct btf_verifier_env *env, |
5066 | const struct btf_type *t, u32 type_id) |
5067 | { |
5068 | u32 save_log_type_id = env->log_type_id; |
5069 | const struct resolve_vertex *v; |
5070 | int err = 0; |
5071 | |
5072 | env->resolve_mode = RESOLVE_TBD; |
5073 | env_stack_push(env, t, type_id); |
5074 | while (!err && (v = env_stack_peak(env))) { |
5075 | env->log_type_id = v->type_id; |
5076 | err = btf_type_ops(t: v->t)->resolve(env, v); |
5077 | } |
5078 | |
5079 | env->log_type_id = type_id; |
5080 | if (err == -E2BIG) { |
5081 | btf_verifier_log_type(env, t, |
5082 | "Exceeded max resolving depth:%u" , |
5083 | MAX_RESOLVE_DEPTH); |
5084 | } else if (err == -EEXIST) { |
5085 | btf_verifier_log_type(env, t, "Loop detected" ); |
5086 | } |
5087 | |
5088 | /* Final sanity check */ |
5089 | if (!err && !btf_resolve_valid(env, t, type_id)) { |
5090 | btf_verifier_log_type(env, t, "Invalid resolve state" ); |
5091 | err = -EINVAL; |
5092 | } |
5093 | |
5094 | env->log_type_id = save_log_type_id; |
5095 | return err; |
5096 | } |
5097 | |
5098 | static int btf_check_all_types(struct btf_verifier_env *env) |
5099 | { |
5100 | struct btf *btf = env->btf; |
5101 | const struct btf_type *t; |
5102 | u32 type_id, i; |
5103 | int err; |
5104 | |
5105 | err = env_resolve_init(env); |
5106 | if (err) |
5107 | return err; |
5108 | |
5109 | env->phase++; |
5110 | for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { |
5111 | type_id = btf->start_id + i; |
5112 | t = btf_type_by_id(btf, type_id); |
5113 | |
5114 | env->log_type_id = type_id; |
5115 | if (btf_type_needs_resolve(t) && |
5116 | !env_type_is_resolved(env, type_id)) { |
5117 | err = btf_resolve(env, t, type_id); |
5118 | if (err) |
5119 | return err; |
5120 | } |
5121 | |
5122 | if (btf_type_is_func_proto(t)) { |
5123 | err = btf_func_proto_check(env, t); |
5124 | if (err) |
5125 | return err; |
5126 | } |
5127 | } |
5128 | |
5129 | return 0; |
5130 | } |
5131 | |
5132 | static int btf_parse_type_sec(struct btf_verifier_env *env) |
5133 | { |
5134 | const struct btf_header *hdr = &env->btf->hdr; |
5135 | int err; |
5136 | |
5137 | /* Type section must align to 4 bytes */ |
5138 | if (hdr->type_off & (sizeof(u32) - 1)) { |
5139 | btf_verifier_log(env, fmt: "Unaligned type_off" ); |
5140 | return -EINVAL; |
5141 | } |
5142 | |
5143 | if (!env->btf->base_btf && !hdr->type_len) { |
5144 | btf_verifier_log(env, fmt: "No type found" ); |
5145 | return -EINVAL; |
5146 | } |
5147 | |
5148 | err = btf_check_all_metas(env); |
5149 | if (err) |
5150 | return err; |
5151 | |
5152 | return btf_check_all_types(env); |
5153 | } |
5154 | |
5155 | static int btf_parse_str_sec(struct btf_verifier_env *env) |
5156 | { |
5157 | const struct btf_header *hdr; |
5158 | struct btf *btf = env->btf; |
5159 | const char *start, *end; |
5160 | |
5161 | hdr = &btf->hdr; |
5162 | start = btf->nohdr_data + hdr->str_off; |
5163 | end = start + hdr->str_len; |
5164 | |
5165 | if (end != btf->data + btf->data_size) { |
5166 | btf_verifier_log(env, fmt: "String section is not at the end" ); |
5167 | return -EINVAL; |
5168 | } |
5169 | |
5170 | btf->strings = start; |
5171 | |
5172 | if (btf->base_btf && !hdr->str_len) |
5173 | return 0; |
5174 | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { |
5175 | btf_verifier_log(env, fmt: "Invalid string section" ); |
5176 | return -EINVAL; |
5177 | } |
5178 | if (!btf->base_btf && start[0]) { |
5179 | btf_verifier_log(env, fmt: "Invalid string section" ); |
5180 | return -EINVAL; |
5181 | } |
5182 | |
5183 | return 0; |
5184 | } |
5185 | |
5186 | static const size_t btf_sec_info_offset[] = { |
5187 | offsetof(struct btf_header, type_off), |
5188 | offsetof(struct btf_header, str_off), |
5189 | }; |
5190 | |
5191 | static int btf_sec_info_cmp(const void *a, const void *b) |
5192 | { |
5193 | const struct btf_sec_info *x = a; |
5194 | const struct btf_sec_info *y = b; |
5195 | |
5196 | return (int)(x->off - y->off) ? : (int)(x->len - y->len); |
5197 | } |
5198 | |
5199 | static int btf_check_sec_info(struct btf_verifier_env *env, |
5200 | u32 btf_data_size) |
5201 | { |
5202 | struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; |
5203 | u32 total, expected_total, i; |
5204 | const struct btf_header *hdr; |
5205 | const struct btf *btf; |
5206 | |
5207 | btf = env->btf; |
5208 | hdr = &btf->hdr; |
5209 | |
5210 | /* Populate the secs from hdr */ |
5211 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) |
5212 | secs[i] = *(struct btf_sec_info *)((void *)hdr + |
5213 | btf_sec_info_offset[i]); |
5214 | |
5215 | sort(base: secs, ARRAY_SIZE(btf_sec_info_offset), |
5216 | size: sizeof(struct btf_sec_info), cmp_func: btf_sec_info_cmp, NULL); |
5217 | |
5218 | /* Check for gaps and overlap among sections */ |
5219 | total = 0; |
5220 | expected_total = btf_data_size - hdr->hdr_len; |
5221 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { |
5222 | if (expected_total < secs[i].off) { |
5223 | btf_verifier_log(env, fmt: "Invalid section offset" ); |
5224 | return -EINVAL; |
5225 | } |
5226 | if (total < secs[i].off) { |
5227 | /* gap */ |
5228 | btf_verifier_log(env, fmt: "Unsupported section found" ); |
5229 | return -EINVAL; |
5230 | } |
5231 | if (total > secs[i].off) { |
5232 | btf_verifier_log(env, fmt: "Section overlap found" ); |
5233 | return -EINVAL; |
5234 | } |
5235 | if (expected_total - total < secs[i].len) { |
5236 | btf_verifier_log(env, |
5237 | fmt: "Total section length too long" ); |
5238 | return -EINVAL; |
5239 | } |
5240 | total += secs[i].len; |
5241 | } |
5242 | |
5243 | /* There is data other than hdr and known sections */ |
5244 | if (expected_total != total) { |
5245 | btf_verifier_log(env, fmt: "Unsupported section found" ); |
5246 | return -EINVAL; |
5247 | } |
5248 | |
5249 | return 0; |
5250 | } |
5251 | |
5252 | static int btf_parse_hdr(struct btf_verifier_env *env) |
5253 | { |
5254 | u32 hdr_len, hdr_copy, btf_data_size; |
5255 | const struct btf_header *hdr; |
5256 | struct btf *btf; |
5257 | |
5258 | btf = env->btf; |
5259 | btf_data_size = btf->data_size; |
5260 | |
5261 | if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { |
5262 | btf_verifier_log(env, fmt: "hdr_len not found" ); |
5263 | return -EINVAL; |
5264 | } |
5265 | |
5266 | hdr = btf->data; |
5267 | hdr_len = hdr->hdr_len; |
5268 | if (btf_data_size < hdr_len) { |
5269 | btf_verifier_log(env, fmt: "btf_header not found" ); |
5270 | return -EINVAL; |
5271 | } |
5272 | |
5273 | /* Ensure the unsupported header fields are zero */ |
5274 | if (hdr_len > sizeof(btf->hdr)) { |
5275 | u8 *expected_zero = btf->data + sizeof(btf->hdr); |
5276 | u8 *end = btf->data + hdr_len; |
5277 | |
5278 | for (; expected_zero < end; expected_zero++) { |
5279 | if (*expected_zero) { |
5280 | btf_verifier_log(env, fmt: "Unsupported btf_header" ); |
5281 | return -E2BIG; |
5282 | } |
5283 | } |
5284 | } |
5285 | |
5286 | hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); |
5287 | memcpy(&btf->hdr, btf->data, hdr_copy); |
5288 | |
5289 | hdr = &btf->hdr; |
5290 | |
5291 | btf_verifier_log_hdr(env, btf_data_size); |
5292 | |
5293 | if (hdr->magic != BTF_MAGIC) { |
5294 | btf_verifier_log(env, fmt: "Invalid magic" ); |
5295 | return -EINVAL; |
5296 | } |
5297 | |
5298 | if (hdr->version != BTF_VERSION) { |
5299 | btf_verifier_log(env, fmt: "Unsupported version" ); |
5300 | return -ENOTSUPP; |
5301 | } |
5302 | |
5303 | if (hdr->flags) { |
5304 | btf_verifier_log(env, fmt: "Unsupported flags" ); |
5305 | return -ENOTSUPP; |
5306 | } |
5307 | |
5308 | if (!btf->base_btf && btf_data_size == hdr->hdr_len) { |
5309 | btf_verifier_log(env, fmt: "No data" ); |
5310 | return -EINVAL; |
5311 | } |
5312 | |
5313 | return btf_check_sec_info(env, btf_data_size); |
5314 | } |
5315 | |
5316 | static const char *alloc_obj_fields[] = { |
5317 | "bpf_spin_lock" , |
5318 | "bpf_list_head" , |
5319 | "bpf_list_node" , |
5320 | "bpf_rb_root" , |
5321 | "bpf_rb_node" , |
5322 | "bpf_refcount" , |
5323 | }; |
5324 | |
5325 | static struct btf_struct_metas * |
5326 | btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) |
5327 | { |
5328 | union { |
5329 | struct btf_id_set set; |
5330 | struct { |
5331 | u32 _cnt; |
5332 | u32 _ids[ARRAY_SIZE(alloc_obj_fields)]; |
5333 | } _arr; |
5334 | } aof; |
5335 | struct btf_struct_metas *tab = NULL; |
5336 | int i, n, id, ret; |
5337 | |
5338 | BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0); |
5339 | BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32)); |
5340 | |
5341 | memset(&aof, 0, sizeof(aof)); |
5342 | for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) { |
5343 | /* Try to find whether this special type exists in user BTF, and |
5344 | * if so remember its ID so we can easily find it among members |
5345 | * of structs that we iterate in the next loop. |
5346 | */ |
5347 | id = btf_find_by_name_kind(btf, name: alloc_obj_fields[i], kind: BTF_KIND_STRUCT); |
5348 | if (id < 0) |
5349 | continue; |
5350 | aof.set.ids[aof.set.cnt++] = id; |
5351 | } |
5352 | |
5353 | if (!aof.set.cnt) |
5354 | return NULL; |
5355 | sort(base: &aof.set.ids, num: aof.set.cnt, size: sizeof(aof.set.ids[0]), cmp_func: btf_id_cmp_func, NULL); |
5356 | |
5357 | n = btf_nr_types(btf); |
5358 | for (i = 1; i < n; i++) { |
5359 | struct btf_struct_metas *new_tab; |
5360 | const struct btf_member *member; |
5361 | struct btf_struct_meta *type; |
5362 | struct btf_record *record; |
5363 | const struct btf_type *t; |
5364 | int j, tab_cnt; |
5365 | |
5366 | t = btf_type_by_id(btf, i); |
5367 | if (!t) { |
5368 | ret = -EINVAL; |
5369 | goto free; |
5370 | } |
5371 | if (!__btf_type_is_struct(t)) |
5372 | continue; |
5373 | |
5374 | cond_resched(); |
5375 | |
5376 | for_each_member(j, t, member) { |
5377 | if (btf_id_set_contains(set: &aof.set, id: member->type)) |
5378 | goto parse; |
5379 | } |
5380 | continue; |
5381 | parse: |
5382 | tab_cnt = tab ? tab->cnt : 0; |
5383 | new_tab = krealloc(objp: tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]), |
5384 | GFP_KERNEL | __GFP_NOWARN); |
5385 | if (!new_tab) { |
5386 | ret = -ENOMEM; |
5387 | goto free; |
5388 | } |
5389 | if (!tab) |
5390 | new_tab->cnt = 0; |
5391 | tab = new_tab; |
5392 | |
5393 | type = &tab->types[tab->cnt]; |
5394 | type->btf_id = i; |
5395 | record = btf_parse_fields(btf, t, field_mask: BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | |
5396 | BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, value_size: t->size); |
5397 | /* The record cannot be unset, treat it as an error if so */ |
5398 | if (IS_ERR_OR_NULL(ptr: record)) { |
5399 | ret = PTR_ERR_OR_ZERO(ptr: record) ?: -EFAULT; |
5400 | goto free; |
5401 | } |
5402 | type->record = record; |
5403 | tab->cnt++; |
5404 | } |
5405 | return tab; |
5406 | free: |
5407 | btf_struct_metas_free(tab); |
5408 | return ERR_PTR(error: ret); |
5409 | } |
5410 | |
5411 | struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id) |
5412 | { |
5413 | struct btf_struct_metas *tab; |
5414 | |
5415 | BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0); |
5416 | tab = btf->struct_meta_tab; |
5417 | if (!tab) |
5418 | return NULL; |
5419 | return bsearch(key: &btf_id, base: tab->types, num: tab->cnt, size: sizeof(tab->types[0]), cmp: btf_id_cmp_func); |
5420 | } |
5421 | |
5422 | static int btf_check_type_tags(struct btf_verifier_env *env, |
5423 | struct btf *btf, int start_id) |
5424 | { |
5425 | int i, n, good_id = start_id - 1; |
5426 | bool in_tags; |
5427 | |
5428 | n = btf_nr_types(btf); |
5429 | for (i = start_id; i < n; i++) { |
5430 | const struct btf_type *t; |
5431 | int chain_limit = 32; |
5432 | u32 cur_id = i; |
5433 | |
5434 | t = btf_type_by_id(btf, i); |
5435 | if (!t) |
5436 | return -EINVAL; |
5437 | if (!btf_type_is_modifier(t)) |
5438 | continue; |
5439 | |
5440 | cond_resched(); |
5441 | |
5442 | in_tags = btf_type_is_type_tag(t); |
5443 | while (btf_type_is_modifier(t)) { |
5444 | if (!chain_limit--) { |
5445 | btf_verifier_log(env, fmt: "Max chain length or cycle detected" ); |
5446 | return -ELOOP; |
5447 | } |
5448 | if (btf_type_is_type_tag(t)) { |
5449 | if (!in_tags) { |
5450 | btf_verifier_log(env, fmt: "Type tags don't precede modifiers" ); |
5451 | return -EINVAL; |
5452 | } |
5453 | } else if (in_tags) { |
5454 | in_tags = false; |
5455 | } |
5456 | if (cur_id <= good_id) |
5457 | break; |
5458 | /* Move to next type */ |
5459 | cur_id = t->type; |
5460 | t = btf_type_by_id(btf, cur_id); |
5461 | if (!t) |
5462 | return -EINVAL; |
5463 | } |
5464 | good_id = i; |
5465 | } |
5466 | return 0; |
5467 | } |
5468 | |
5469 | static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size) |
5470 | { |
5471 | u32 log_true_size; |
5472 | int err; |
5473 | |
5474 | err = bpf_vlog_finalize(log, log_size_actual: &log_true_size); |
5475 | |
5476 | if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) && |
5477 | copy_to_bpfptr_offset(dst: uattr, offsetof(union bpf_attr, btf_log_true_size), |
5478 | src: &log_true_size, size: sizeof(log_true_size))) |
5479 | err = -EFAULT; |
5480 | |
5481 | return err; |
5482 | } |
5483 | |
5484 | static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
5485 | { |
5486 | bpfptr_t btf_data = make_bpfptr(addr: attr->btf, is_kernel: uattr.is_kernel); |
5487 | char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf); |
5488 | struct btf_struct_metas *struct_meta_tab; |
5489 | struct btf_verifier_env *env = NULL; |
5490 | struct btf *btf = NULL; |
5491 | u8 *data; |
5492 | int err, ret; |
5493 | |
5494 | if (attr->btf_size > BTF_MAX_SIZE) |
5495 | return ERR_PTR(error: -E2BIG); |
5496 | |
5497 | env = kzalloc(size: sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5498 | if (!env) |
5499 | return ERR_PTR(error: -ENOMEM); |
5500 | |
5501 | /* user could have requested verbose verifier output |
5502 | * and supplied buffer to store the verification trace |
5503 | */ |
5504 | err = bpf_vlog_init(log: &env->log, log_level: attr->btf_log_level, |
5505 | log_buf: log_ubuf, log_size: attr->btf_log_size); |
5506 | if (err) |
5507 | goto errout_free; |
5508 | |
5509 | btf = kzalloc(size: sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5510 | if (!btf) { |
5511 | err = -ENOMEM; |
5512 | goto errout; |
5513 | } |
5514 | env->btf = btf; |
5515 | |
5516 | data = kvmalloc(size: attr->btf_size, GFP_KERNEL | __GFP_NOWARN); |
5517 | if (!data) { |
5518 | err = -ENOMEM; |
5519 | goto errout; |
5520 | } |
5521 | |
5522 | btf->data = data; |
5523 | btf->data_size = attr->btf_size; |
5524 | |
5525 | if (copy_from_bpfptr(dst: data, src: btf_data, size: attr->btf_size)) { |
5526 | err = -EFAULT; |
5527 | goto errout; |
5528 | } |
5529 | |
5530 | err = btf_parse_hdr(env); |
5531 | if (err) |
5532 | goto errout; |
5533 | |
5534 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5535 | |
5536 | err = btf_parse_str_sec(env); |
5537 | if (err) |
5538 | goto errout; |
5539 | |
5540 | err = btf_parse_type_sec(env); |
5541 | if (err) |
5542 | goto errout; |
5543 | |
5544 | err = btf_check_type_tags(env, btf, start_id: 1); |
5545 | if (err) |
5546 | goto errout; |
5547 | |
5548 | struct_meta_tab = btf_parse_struct_metas(log: &env->log, btf); |
5549 | if (IS_ERR(ptr: struct_meta_tab)) { |
5550 | err = PTR_ERR(ptr: struct_meta_tab); |
5551 | goto errout; |
5552 | } |
5553 | btf->struct_meta_tab = struct_meta_tab; |
5554 | |
5555 | if (struct_meta_tab) { |
5556 | int i; |
5557 | |
5558 | for (i = 0; i < struct_meta_tab->cnt; i++) { |
5559 | err = btf_check_and_fixup_fields(btf, rec: struct_meta_tab->types[i].record); |
5560 | if (err < 0) |
5561 | goto errout_meta; |
5562 | } |
5563 | } |
5564 | |
5565 | err = finalize_log(log: &env->log, uattr, uattr_size); |
5566 | if (err) |
5567 | goto errout_free; |
5568 | |
5569 | btf_verifier_env_free(env); |
5570 | refcount_set(r: &btf->refcnt, n: 1); |
5571 | return btf; |
5572 | |
5573 | errout_meta: |
5574 | btf_free_struct_meta_tab(btf); |
5575 | errout: |
5576 | /* overwrite err with -ENOSPC or -EFAULT */ |
5577 | ret = finalize_log(log: &env->log, uattr, uattr_size); |
5578 | if (ret) |
5579 | err = ret; |
5580 | errout_free: |
5581 | btf_verifier_env_free(env); |
5582 | if (btf) |
5583 | btf_free(btf); |
5584 | return ERR_PTR(error: err); |
5585 | } |
5586 | |
5587 | extern char __weak __start_BTF[]; |
5588 | extern char __weak __stop_BTF[]; |
5589 | extern struct btf *btf_vmlinux; |
5590 | |
5591 | #define BPF_MAP_TYPE(_id, _ops) |
5592 | #define BPF_LINK_TYPE(_id, _name) |
5593 | static union { |
5594 | struct bpf_ctx_convert { |
5595 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5596 | prog_ctx_type _id##_prog; \ |
5597 | kern_ctx_type _id##_kern; |
5598 | #include <linux/bpf_types.h> |
5599 | #undef BPF_PROG_TYPE |
5600 | } *__t; |
5601 | /* 't' is written once under lock. Read many times. */ |
5602 | const struct btf_type *t; |
5603 | } bpf_ctx_convert; |
5604 | enum { |
5605 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5606 | __ctx_convert##_id, |
5607 | #include <linux/bpf_types.h> |
5608 | #undef BPF_PROG_TYPE |
5609 | __ctx_convert_unused, /* to avoid empty enum in extreme .config */ |
5610 | }; |
5611 | static u8 bpf_ctx_convert_map[] = { |
5612 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5613 | [_id] = __ctx_convert##_id, |
5614 | #include <linux/bpf_types.h> |
5615 | #undef BPF_PROG_TYPE |
5616 | 0, /* avoid empty array */ |
5617 | }; |
5618 | #undef BPF_MAP_TYPE |
5619 | #undef BPF_LINK_TYPE |
5620 | |
5621 | const struct btf_member * |
5622 | btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, |
5623 | const struct btf_type *t, enum bpf_prog_type prog_type, |
5624 | int arg) |
5625 | { |
5626 | const struct btf_type *conv_struct; |
5627 | const struct btf_type *ctx_struct; |
5628 | const struct btf_member *ctx_type; |
5629 | const char *tname, *ctx_tname; |
5630 | |
5631 | conv_struct = bpf_ctx_convert.t; |
5632 | if (!conv_struct) { |
5633 | bpf_log(log, fmt: "btf_vmlinux is malformed\n" ); |
5634 | return NULL; |
5635 | } |
5636 | t = btf_type_by_id(btf, t->type); |
5637 | while (btf_type_is_modifier(t)) |
5638 | t = btf_type_by_id(btf, t->type); |
5639 | if (!btf_type_is_struct(t)) { |
5640 | /* Only pointer to struct is supported for now. |
5641 | * That means that BPF_PROG_TYPE_TRACEPOINT with BTF |
5642 | * is not supported yet. |
5643 | * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. |
5644 | */ |
5645 | return NULL; |
5646 | } |
5647 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5648 | if (!tname) { |
5649 | bpf_log(log, fmt: "arg#%d struct doesn't have a name\n" , arg); |
5650 | return NULL; |
5651 | } |
5652 | /* prog_type is valid bpf program type. No need for bounds check. */ |
5653 | ctx_type = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2; |
5654 | /* ctx_struct is a pointer to prog_ctx_type in vmlinux. |
5655 | * Like 'struct __sk_buff' |
5656 | */ |
5657 | ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); |
5658 | if (!ctx_struct) |
5659 | /* should not happen */ |
5660 | return NULL; |
5661 | again: |
5662 | ctx_tname = btf_name_by_offset(btf: btf_vmlinux, offset: ctx_struct->name_off); |
5663 | if (!ctx_tname) { |
5664 | /* should not happen */ |
5665 | bpf_log(log, fmt: "Please fix kernel include/linux/bpf_types.h\n" ); |
5666 | return NULL; |
5667 | } |
5668 | /* only compare that prog's ctx type name is the same as |
5669 | * kernel expects. No need to compare field by field. |
5670 | * It's ok for bpf prog to do: |
5671 | * struct __sk_buff {}; |
5672 | * int socket_filter_bpf_prog(struct __sk_buff *skb) |
5673 | * { // no fields of skb are ever used } |
5674 | */ |
5675 | if (strcmp(ctx_tname, "__sk_buff" ) == 0 && strcmp(tname, "sk_buff" ) == 0) |
5676 | return ctx_type; |
5677 | if (strcmp(ctx_tname, "xdp_md" ) == 0 && strcmp(tname, "xdp_buff" ) == 0) |
5678 | return ctx_type; |
5679 | if (strcmp(ctx_tname, tname)) { |
5680 | /* bpf_user_pt_regs_t is a typedef, so resolve it to |
5681 | * underlying struct and check name again |
5682 | */ |
5683 | if (!btf_type_is_modifier(t: ctx_struct)) |
5684 | return NULL; |
5685 | while (btf_type_is_modifier(t: ctx_struct)) |
5686 | ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type); |
5687 | goto again; |
5688 | } |
5689 | return ctx_type; |
5690 | } |
5691 | |
5692 | static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, |
5693 | struct btf *btf, |
5694 | const struct btf_type *t, |
5695 | enum bpf_prog_type prog_type, |
5696 | int arg) |
5697 | { |
5698 | const struct btf_member *prog_ctx_type, *kern_ctx_type; |
5699 | |
5700 | prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); |
5701 | if (!prog_ctx_type) |
5702 | return -ENOENT; |
5703 | kern_ctx_type = prog_ctx_type + 1; |
5704 | return kern_ctx_type->type; |
5705 | } |
5706 | |
5707 | int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type) |
5708 | { |
5709 | const struct btf_member *kctx_member; |
5710 | const struct btf_type *conv_struct; |
5711 | const struct btf_type *kctx_type; |
5712 | u32 kctx_type_id; |
5713 | |
5714 | conv_struct = bpf_ctx_convert.t; |
5715 | /* get member for kernel ctx type */ |
5716 | kctx_member = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1; |
5717 | kctx_type_id = kctx_member->type; |
5718 | kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id); |
5719 | if (!btf_type_is_struct(t: kctx_type)) { |
5720 | bpf_log(log, fmt: "kern ctx type id %u is not a struct\n" , kctx_type_id); |
5721 | return -EINVAL; |
5722 | } |
5723 | |
5724 | return kctx_type_id; |
5725 | } |
5726 | |
5727 | BTF_ID_LIST(bpf_ctx_convert_btf_id) |
5728 | BTF_ID(struct, bpf_ctx_convert) |
5729 | |
5730 | struct btf *btf_parse_vmlinux(void) |
5731 | { |
5732 | struct btf_verifier_env *env = NULL; |
5733 | struct bpf_verifier_log *log; |
5734 | struct btf *btf = NULL; |
5735 | int err; |
5736 | |
5737 | env = kzalloc(size: sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5738 | if (!env) |
5739 | return ERR_PTR(error: -ENOMEM); |
5740 | |
5741 | log = &env->log; |
5742 | log->level = BPF_LOG_KERNEL; |
5743 | |
5744 | btf = kzalloc(size: sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5745 | if (!btf) { |
5746 | err = -ENOMEM; |
5747 | goto errout; |
5748 | } |
5749 | env->btf = btf; |
5750 | |
5751 | btf->data = __start_BTF; |
5752 | btf->data_size = __stop_BTF - __start_BTF; |
5753 | btf->kernel_btf = true; |
5754 | snprintf(buf: btf->name, size: sizeof(btf->name), fmt: "vmlinux" ); |
5755 | |
5756 | err = btf_parse_hdr(env); |
5757 | if (err) |
5758 | goto errout; |
5759 | |
5760 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5761 | |
5762 | err = btf_parse_str_sec(env); |
5763 | if (err) |
5764 | goto errout; |
5765 | |
5766 | err = btf_check_all_metas(env); |
5767 | if (err) |
5768 | goto errout; |
5769 | |
5770 | err = btf_check_type_tags(env, btf, start_id: 1); |
5771 | if (err) |
5772 | goto errout; |
5773 | |
5774 | /* btf_parse_vmlinux() runs under bpf_verifier_lock */ |
5775 | bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); |
5776 | |
5777 | bpf_struct_ops_init(btf, log); |
5778 | |
5779 | refcount_set(r: &btf->refcnt, n: 1); |
5780 | |
5781 | err = btf_alloc_id(btf); |
5782 | if (err) |
5783 | goto errout; |
5784 | |
5785 | btf_verifier_env_free(env); |
5786 | return btf; |
5787 | |
5788 | errout: |
5789 | btf_verifier_env_free(env); |
5790 | if (btf) { |
5791 | kvfree(addr: btf->types); |
5792 | kfree(objp: btf); |
5793 | } |
5794 | return ERR_PTR(error: err); |
5795 | } |
5796 | |
5797 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
5798 | |
5799 | static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) |
5800 | { |
5801 | struct btf_verifier_env *env = NULL; |
5802 | struct bpf_verifier_log *log; |
5803 | struct btf *btf = NULL, *base_btf; |
5804 | int err; |
5805 | |
5806 | base_btf = bpf_get_btf_vmlinux(); |
5807 | if (IS_ERR(base_btf)) |
5808 | return base_btf; |
5809 | if (!base_btf) |
5810 | return ERR_PTR(-EINVAL); |
5811 | |
5812 | env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5813 | if (!env) |
5814 | return ERR_PTR(-ENOMEM); |
5815 | |
5816 | log = &env->log; |
5817 | log->level = BPF_LOG_KERNEL; |
5818 | |
5819 | btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5820 | if (!btf) { |
5821 | err = -ENOMEM; |
5822 | goto errout; |
5823 | } |
5824 | env->btf = btf; |
5825 | |
5826 | btf->base_btf = base_btf; |
5827 | btf->start_id = base_btf->nr_types; |
5828 | btf->start_str_off = base_btf->hdr.str_len; |
5829 | btf->kernel_btf = true; |
5830 | snprintf(btf->name, sizeof(btf->name), "%s" , module_name); |
5831 | |
5832 | btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); |
5833 | if (!btf->data) { |
5834 | err = -ENOMEM; |
5835 | goto errout; |
5836 | } |
5837 | memcpy(btf->data, data, data_size); |
5838 | btf->data_size = data_size; |
5839 | |
5840 | err = btf_parse_hdr(env); |
5841 | if (err) |
5842 | goto errout; |
5843 | |
5844 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5845 | |
5846 | err = btf_parse_str_sec(env); |
5847 | if (err) |
5848 | goto errout; |
5849 | |
5850 | err = btf_check_all_metas(env); |
5851 | if (err) |
5852 | goto errout; |
5853 | |
5854 | err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); |
5855 | if (err) |
5856 | goto errout; |
5857 | |
5858 | btf_verifier_env_free(env); |
5859 | refcount_set(&btf->refcnt, 1); |
5860 | return btf; |
5861 | |
5862 | errout: |
5863 | btf_verifier_env_free(env); |
5864 | if (btf) { |
5865 | kvfree(btf->data); |
5866 | kvfree(btf->types); |
5867 | kfree(btf); |
5868 | } |
5869 | return ERR_PTR(err); |
5870 | } |
5871 | |
5872 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
5873 | |
5874 | struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) |
5875 | { |
5876 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
5877 | |
5878 | if (tgt_prog) |
5879 | return tgt_prog->aux->btf; |
5880 | else |
5881 | return prog->aux->attach_btf; |
5882 | } |
5883 | |
5884 | static bool is_int_ptr(struct btf *btf, const struct btf_type *t) |
5885 | { |
5886 | /* skip modifiers */ |
5887 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
5888 | |
5889 | return btf_type_is_int(t); |
5890 | } |
5891 | |
5892 | static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, |
5893 | int off) |
5894 | { |
5895 | const struct btf_param *args; |
5896 | const struct btf_type *t; |
5897 | u32 offset = 0, nr_args; |
5898 | int i; |
5899 | |
5900 | if (!func_proto) |
5901 | return off / 8; |
5902 | |
5903 | nr_args = btf_type_vlen(t: func_proto); |
5904 | args = (const struct btf_param *)(func_proto + 1); |
5905 | for (i = 0; i < nr_args; i++) { |
5906 | t = btf_type_skip_modifiers(btf, id: args[i].type, NULL); |
5907 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
5908 | if (off < offset) |
5909 | return i; |
5910 | } |
5911 | |
5912 | t = btf_type_skip_modifiers(btf, id: func_proto->type, NULL); |
5913 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
5914 | if (off < offset) |
5915 | return nr_args; |
5916 | |
5917 | return nr_args + 1; |
5918 | } |
5919 | |
5920 | static bool prog_args_trusted(const struct bpf_prog *prog) |
5921 | { |
5922 | enum bpf_attach_type atype = prog->expected_attach_type; |
5923 | |
5924 | switch (prog->type) { |
5925 | case BPF_PROG_TYPE_TRACING: |
5926 | return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER; |
5927 | case BPF_PROG_TYPE_LSM: |
5928 | return bpf_lsm_is_trusted(prog); |
5929 | case BPF_PROG_TYPE_STRUCT_OPS: |
5930 | return true; |
5931 | default: |
5932 | return false; |
5933 | } |
5934 | } |
5935 | |
5936 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
5937 | const struct bpf_prog *prog, |
5938 | struct bpf_insn_access_aux *info) |
5939 | { |
5940 | const struct btf_type *t = prog->aux->attach_func_proto; |
5941 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
5942 | struct btf *btf = bpf_prog_get_target_btf(prog); |
5943 | const char *tname = prog->aux->attach_func_name; |
5944 | struct bpf_verifier_log *log = info->log; |
5945 | const struct btf_param *args; |
5946 | const char *tag_value; |
5947 | u32 nr_args, arg; |
5948 | int i, ret; |
5949 | |
5950 | if (off % 8) { |
5951 | bpf_log(log, fmt: "func '%s' offset %d is not multiple of 8\n" , |
5952 | tname, off); |
5953 | return false; |
5954 | } |
5955 | arg = get_ctx_arg_idx(btf, func_proto: t, off); |
5956 | args = (const struct btf_param *)(t + 1); |
5957 | /* if (t == NULL) Fall back to default BPF prog with |
5958 | * MAX_BPF_FUNC_REG_ARGS u64 arguments. |
5959 | */ |
5960 | nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; |
5961 | if (prog->aux->attach_btf_trace) { |
5962 | /* skip first 'void *__data' argument in btf_trace_##name typedef */ |
5963 | args++; |
5964 | nr_args--; |
5965 | } |
5966 | |
5967 | if (arg > nr_args) { |
5968 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n" , |
5969 | tname, arg + 1); |
5970 | return false; |
5971 | } |
5972 | |
5973 | if (arg == nr_args) { |
5974 | switch (prog->expected_attach_type) { |
5975 | case BPF_LSM_CGROUP: |
5976 | case BPF_LSM_MAC: |
5977 | case BPF_TRACE_FEXIT: |
5978 | /* When LSM programs are attached to void LSM hooks |
5979 | * they use FEXIT trampolines and when attached to |
5980 | * int LSM hooks, they use MODIFY_RETURN trampolines. |
5981 | * |
5982 | * While the LSM programs are BPF_MODIFY_RETURN-like |
5983 | * the check: |
5984 | * |
5985 | * if (ret_type != 'int') |
5986 | * return -EINVAL; |
5987 | * |
5988 | * is _not_ done here. This is still safe as LSM hooks |
5989 | * have only void and int return types. |
5990 | */ |
5991 | if (!t) |
5992 | return true; |
5993 | t = btf_type_by_id(btf, t->type); |
5994 | break; |
5995 | case BPF_MODIFY_RETURN: |
5996 | /* For now the BPF_MODIFY_RETURN can only be attached to |
5997 | * functions that return an int. |
5998 | */ |
5999 | if (!t) |
6000 | return false; |
6001 | |
6002 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6003 | if (!btf_type_is_small_int(t)) { |
6004 | bpf_log(log, |
6005 | fmt: "ret type %s not allowed for fmod_ret\n" , |
6006 | btf_type_str(t)); |
6007 | return false; |
6008 | } |
6009 | break; |
6010 | default: |
6011 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n" , |
6012 | tname, arg + 1); |
6013 | return false; |
6014 | } |
6015 | } else { |
6016 | if (!t) |
6017 | /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ |
6018 | return true; |
6019 | t = btf_type_by_id(btf, args[arg].type); |
6020 | } |
6021 | |
6022 | /* skip modifiers */ |
6023 | while (btf_type_is_modifier(t)) |
6024 | t = btf_type_by_id(btf, t->type); |
6025 | if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
6026 | /* accessing a scalar */ |
6027 | return true; |
6028 | if (!btf_type_is_ptr(t)) { |
6029 | bpf_log(log, |
6030 | fmt: "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n" , |
6031 | tname, arg, |
6032 | __btf_name_by_offset(btf, offset: t->name_off), |
6033 | btf_type_str(t)); |
6034 | return false; |
6035 | } |
6036 | |
6037 | /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ |
6038 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6039 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6040 | u32 type, flag; |
6041 | |
6042 | type = base_type(type: ctx_arg_info->reg_type); |
6043 | flag = type_flag(type: ctx_arg_info->reg_type); |
6044 | if (ctx_arg_info->offset == off && type == PTR_TO_BUF && |
6045 | (flag & PTR_MAYBE_NULL)) { |
6046 | info->reg_type = ctx_arg_info->reg_type; |
6047 | return true; |
6048 | } |
6049 | } |
6050 | |
6051 | if (t->type == 0) |
6052 | /* This is a pointer to void. |
6053 | * It is the same as scalar from the verifier safety pov. |
6054 | * No further pointer walking is allowed. |
6055 | */ |
6056 | return true; |
6057 | |
6058 | if (is_int_ptr(btf, t)) |
6059 | return true; |
6060 | |
6061 | /* this is a pointer to another type */ |
6062 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6063 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6064 | |
6065 | if (ctx_arg_info->offset == off) { |
6066 | if (!ctx_arg_info->btf_id) { |
6067 | bpf_log(log,fmt: "invalid btf_id for context argument offset %u\n" , off); |
6068 | return false; |
6069 | } |
6070 | |
6071 | info->reg_type = ctx_arg_info->reg_type; |
6072 | info->btf = btf_vmlinux; |
6073 | info->btf_id = ctx_arg_info->btf_id; |
6074 | return true; |
6075 | } |
6076 | } |
6077 | |
6078 | info->reg_type = PTR_TO_BTF_ID; |
6079 | if (prog_args_trusted(prog)) |
6080 | info->reg_type |= PTR_TRUSTED; |
6081 | |
6082 | if (tgt_prog) { |
6083 | enum bpf_prog_type tgt_type; |
6084 | |
6085 | if (tgt_prog->type == BPF_PROG_TYPE_EXT) |
6086 | tgt_type = tgt_prog->aux->saved_dst_prog_type; |
6087 | else |
6088 | tgt_type = tgt_prog->type; |
6089 | |
6090 | ret = btf_translate_to_vmlinux(log, btf, t, prog_type: tgt_type, arg); |
6091 | if (ret > 0) { |
6092 | info->btf = btf_vmlinux; |
6093 | info->btf_id = ret; |
6094 | return true; |
6095 | } else { |
6096 | return false; |
6097 | } |
6098 | } |
6099 | |
6100 | info->btf = btf; |
6101 | info->btf_id = t->type; |
6102 | t = btf_type_by_id(btf, t->type); |
6103 | |
6104 | if (btf_type_is_type_tag(t)) { |
6105 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
6106 | if (strcmp(tag_value, "user" ) == 0) |
6107 | info->reg_type |= MEM_USER; |
6108 | if (strcmp(tag_value, "percpu" ) == 0) |
6109 | info->reg_type |= MEM_PERCPU; |
6110 | } |
6111 | |
6112 | /* skip modifiers */ |
6113 | while (btf_type_is_modifier(t)) { |
6114 | info->btf_id = t->type; |
6115 | t = btf_type_by_id(btf, t->type); |
6116 | } |
6117 | if (!btf_type_is_struct(t)) { |
6118 | bpf_log(log, |
6119 | fmt: "func '%s' arg%d type %s is not a struct\n" , |
6120 | tname, arg, btf_type_str(t)); |
6121 | return false; |
6122 | } |
6123 | bpf_log(log, fmt: "func '%s' arg%d has btf_id %d type %s '%s'\n" , |
6124 | tname, arg, info->btf_id, btf_type_str(t), |
6125 | __btf_name_by_offset(btf, offset: t->name_off)); |
6126 | return true; |
6127 | } |
6128 | |
6129 | enum bpf_struct_walk_result { |
6130 | /* < 0 error */ |
6131 | WALK_SCALAR = 0, |
6132 | WALK_PTR, |
6133 | WALK_STRUCT, |
6134 | }; |
6135 | |
6136 | static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, |
6137 | const struct btf_type *t, int off, int size, |
6138 | u32 *next_btf_id, enum bpf_type_flag *flag, |
6139 | const char **field_name) |
6140 | { |
6141 | u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; |
6142 | const struct btf_type *mtype, *elem_type = NULL; |
6143 | const struct btf_member *member; |
6144 | const char *tname, *mname, *tag_value; |
6145 | u32 vlen, elem_id, mid; |
6146 | |
6147 | again: |
6148 | if (btf_type_is_modifier(t)) |
6149 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6150 | tname = __btf_name_by_offset(btf, offset: t->name_off); |
6151 | if (!btf_type_is_struct(t)) { |
6152 | bpf_log(log, fmt: "Type '%s' is not a struct\n" , tname); |
6153 | return -EINVAL; |
6154 | } |
6155 | |
6156 | vlen = btf_type_vlen(t); |
6157 | if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED)) |
6158 | /* |
6159 | * walking unions yields untrusted pointers |
6160 | * with exception of __bpf_md_ptr and other |
6161 | * unions with a single member |
6162 | */ |
6163 | *flag |= PTR_UNTRUSTED; |
6164 | |
6165 | if (off + size > t->size) { |
6166 | /* If the last element is a variable size array, we may |
6167 | * need to relax the rule. |
6168 | */ |
6169 | struct btf_array *array_elem; |
6170 | |
6171 | if (vlen == 0) |
6172 | goto error; |
6173 | |
6174 | member = btf_type_member(t) + vlen - 1; |
6175 | mtype = btf_type_skip_modifiers(btf, id: member->type, |
6176 | NULL); |
6177 | if (!btf_type_is_array(t: mtype)) |
6178 | goto error; |
6179 | |
6180 | array_elem = (struct btf_array *)(mtype + 1); |
6181 | if (array_elem->nelems != 0) |
6182 | goto error; |
6183 | |
6184 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6185 | if (off < moff) |
6186 | goto error; |
6187 | |
6188 | /* allow structure and integer */ |
6189 | t = btf_type_skip_modifiers(btf, id: array_elem->type, |
6190 | NULL); |
6191 | |
6192 | if (btf_type_is_int(t)) |
6193 | return WALK_SCALAR; |
6194 | |
6195 | if (!btf_type_is_struct(t)) |
6196 | goto error; |
6197 | |
6198 | off = (off - moff) % t->size; |
6199 | goto again; |
6200 | |
6201 | error: |
6202 | bpf_log(log, fmt: "access beyond struct %s at off %u size %u\n" , |
6203 | tname, off, size); |
6204 | return -EACCES; |
6205 | } |
6206 | |
6207 | for_each_member(i, t, member) { |
6208 | /* offset of the field in bytes */ |
6209 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6210 | if (off + size <= moff) |
6211 | /* won't find anything, field is already too far */ |
6212 | break; |
6213 | |
6214 | if (__btf_member_bitfield_size(struct_type: t, member)) { |
6215 | u32 end_bit = __btf_member_bit_offset(struct_type: t, member) + |
6216 | __btf_member_bitfield_size(struct_type: t, member); |
6217 | |
6218 | /* off <= moff instead of off == moff because clang |
6219 | * does not generate a BTF member for anonymous |
6220 | * bitfield like the ":16" here: |
6221 | * struct { |
6222 | * int :16; |
6223 | * int x:8; |
6224 | * }; |
6225 | */ |
6226 | if (off <= moff && |
6227 | BITS_ROUNDUP_BYTES(end_bit) <= off + size) |
6228 | return WALK_SCALAR; |
6229 | |
6230 | /* off may be accessing a following member |
6231 | * |
6232 | * or |
6233 | * |
6234 | * Doing partial access at either end of this |
6235 | * bitfield. Continue on this case also to |
6236 | * treat it as not accessing this bitfield |
6237 | * and eventually error out as field not |
6238 | * found to keep it simple. |
6239 | * It could be relaxed if there was a legit |
6240 | * partial access case later. |
6241 | */ |
6242 | continue; |
6243 | } |
6244 | |
6245 | /* In case of "off" is pointing to holes of a struct */ |
6246 | if (off < moff) |
6247 | break; |
6248 | |
6249 | /* type of the field */ |
6250 | mid = member->type; |
6251 | mtype = btf_type_by_id(btf, member->type); |
6252 | mname = __btf_name_by_offset(btf, offset: member->name_off); |
6253 | |
6254 | mtype = __btf_resolve_size(btf, type: mtype, type_size: &msize, |
6255 | elem_type: &elem_type, elem_id: &elem_id, total_nelems: &total_nelems, |
6256 | type_id: &mid); |
6257 | if (IS_ERR(ptr: mtype)) { |
6258 | bpf_log(log, fmt: "field %s doesn't have size\n" , mname); |
6259 | return -EFAULT; |
6260 | } |
6261 | |
6262 | mtrue_end = moff + msize; |
6263 | if (off >= mtrue_end) |
6264 | /* no overlap with member, keep iterating */ |
6265 | continue; |
6266 | |
6267 | if (btf_type_is_array(t: mtype)) { |
6268 | u32 elem_idx; |
6269 | |
6270 | /* __btf_resolve_size() above helps to |
6271 | * linearize a multi-dimensional array. |
6272 | * |
6273 | * The logic here is treating an array |
6274 | * in a struct as the following way: |
6275 | * |
6276 | * struct outer { |
6277 | * struct inner array[2][2]; |
6278 | * }; |
6279 | * |
6280 | * looks like: |
6281 | * |
6282 | * struct outer { |
6283 | * struct inner array_elem0; |
6284 | * struct inner array_elem1; |
6285 | * struct inner array_elem2; |
6286 | * struct inner array_elem3; |
6287 | * }; |
6288 | * |
6289 | * When accessing outer->array[1][0], it moves |
6290 | * moff to "array_elem2", set mtype to |
6291 | * "struct inner", and msize also becomes |
6292 | * sizeof(struct inner). Then most of the |
6293 | * remaining logic will fall through without |
6294 | * caring the current member is an array or |
6295 | * not. |
6296 | * |
6297 | * Unlike mtype/msize/moff, mtrue_end does not |
6298 | * change. The naming difference ("_true") tells |
6299 | * that it is not always corresponding to |
6300 | * the current mtype/msize/moff. |
6301 | * It is the true end of the current |
6302 | * member (i.e. array in this case). That |
6303 | * will allow an int array to be accessed like |
6304 | * a scratch space, |
6305 | * i.e. allow access beyond the size of |
6306 | * the array's element as long as it is |
6307 | * within the mtrue_end boundary. |
6308 | */ |
6309 | |
6310 | /* skip empty array */ |
6311 | if (moff == mtrue_end) |
6312 | continue; |
6313 | |
6314 | msize /= total_nelems; |
6315 | elem_idx = (off - moff) / msize; |
6316 | moff += elem_idx * msize; |
6317 | mtype = elem_type; |
6318 | mid = elem_id; |
6319 | } |
6320 | |
6321 | /* the 'off' we're looking for is either equal to start |
6322 | * of this field or inside of this struct |
6323 | */ |
6324 | if (btf_type_is_struct(t: mtype)) { |
6325 | /* our field must be inside that union or struct */ |
6326 | t = mtype; |
6327 | |
6328 | /* return if the offset matches the member offset */ |
6329 | if (off == moff) { |
6330 | *next_btf_id = mid; |
6331 | return WALK_STRUCT; |
6332 | } |
6333 | |
6334 | /* adjust offset we're looking for */ |
6335 | off -= moff; |
6336 | goto again; |
6337 | } |
6338 | |
6339 | if (btf_type_is_ptr(t: mtype)) { |
6340 | const struct btf_type *stype, *t; |
6341 | enum bpf_type_flag tmp_flag = 0; |
6342 | u32 id; |
6343 | |
6344 | if (msize != size || off != moff) { |
6345 | bpf_log(log, |
6346 | fmt: "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n" , |
6347 | mname, moff, tname, off, size); |
6348 | return -EACCES; |
6349 | } |
6350 | |
6351 | /* check type tag */ |
6352 | t = btf_type_by_id(btf, mtype->type); |
6353 | if (btf_type_is_type_tag(t)) { |
6354 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
6355 | /* check __user tag */ |
6356 | if (strcmp(tag_value, "user" ) == 0) |
6357 | tmp_flag = MEM_USER; |
6358 | /* check __percpu tag */ |
6359 | if (strcmp(tag_value, "percpu" ) == 0) |
6360 | tmp_flag = MEM_PERCPU; |
6361 | /* check __rcu tag */ |
6362 | if (strcmp(tag_value, "rcu" ) == 0) |
6363 | tmp_flag = MEM_RCU; |
6364 | } |
6365 | |
6366 | stype = btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
6367 | if (btf_type_is_struct(t: stype)) { |
6368 | *next_btf_id = id; |
6369 | *flag |= tmp_flag; |
6370 | if (field_name) |
6371 | *field_name = mname; |
6372 | return WALK_PTR; |
6373 | } |
6374 | } |
6375 | |
6376 | /* Allow more flexible access within an int as long as |
6377 | * it is within mtrue_end. |
6378 | * Since mtrue_end could be the end of an array, |
6379 | * that also allows using an array of int as a scratch |
6380 | * space. e.g. skb->cb[]. |
6381 | */ |
6382 | if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) { |
6383 | bpf_log(log, |
6384 | fmt: "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n" , |
6385 | mname, mtrue_end, tname, off, size); |
6386 | return -EACCES; |
6387 | } |
6388 | |
6389 | return WALK_SCALAR; |
6390 | } |
6391 | bpf_log(log, fmt: "struct %s doesn't have field at offset %d\n" , tname, off); |
6392 | return -EINVAL; |
6393 | } |
6394 | |
6395 | int btf_struct_access(struct bpf_verifier_log *log, |
6396 | const struct bpf_reg_state *reg, |
6397 | int off, int size, enum bpf_access_type atype __maybe_unused, |
6398 | u32 *next_btf_id, enum bpf_type_flag *flag, |
6399 | const char **field_name) |
6400 | { |
6401 | const struct btf *btf = reg->btf; |
6402 | enum bpf_type_flag tmp_flag = 0; |
6403 | const struct btf_type *t; |
6404 | u32 id = reg->btf_id; |
6405 | int err; |
6406 | |
6407 | while (type_is_alloc(type: reg->type)) { |
6408 | struct btf_struct_meta *meta; |
6409 | struct btf_record *rec; |
6410 | int i; |
6411 | |
6412 | meta = btf_find_struct_meta(btf, btf_id: id); |
6413 | if (!meta) |
6414 | break; |
6415 | rec = meta->record; |
6416 | for (i = 0; i < rec->cnt; i++) { |
6417 | struct btf_field *field = &rec->fields[i]; |
6418 | u32 offset = field->offset; |
6419 | if (off < offset + btf_field_type_size(type: field->type) && offset < off + size) { |
6420 | bpf_log(log, |
6421 | fmt: "direct access to %s is disallowed\n" , |
6422 | btf_field_type_name(type: field->type)); |
6423 | return -EACCES; |
6424 | } |
6425 | } |
6426 | break; |
6427 | } |
6428 | |
6429 | t = btf_type_by_id(btf, id); |
6430 | do { |
6431 | err = btf_struct_walk(log, btf, t, off, size, next_btf_id: &id, flag: &tmp_flag, field_name); |
6432 | |
6433 | switch (err) { |
6434 | case WALK_PTR: |
6435 | /* For local types, the destination register cannot |
6436 | * become a pointer again. |
6437 | */ |
6438 | if (type_is_alloc(type: reg->type)) |
6439 | return SCALAR_VALUE; |
6440 | /* If we found the pointer or scalar on t+off, |
6441 | * we're done. |
6442 | */ |
6443 | *next_btf_id = id; |
6444 | *flag = tmp_flag; |
6445 | return PTR_TO_BTF_ID; |
6446 | case WALK_SCALAR: |
6447 | return SCALAR_VALUE; |
6448 | case WALK_STRUCT: |
6449 | /* We found nested struct, so continue the search |
6450 | * by diving in it. At this point the offset is |
6451 | * aligned with the new type, so set it to 0. |
6452 | */ |
6453 | t = btf_type_by_id(btf, id); |
6454 | off = 0; |
6455 | break; |
6456 | default: |
6457 | /* It's either error or unknown return value.. |
6458 | * scream and leave. |
6459 | */ |
6460 | if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value" )) |
6461 | return -EINVAL; |
6462 | return err; |
6463 | } |
6464 | } while (t); |
6465 | |
6466 | return -EINVAL; |
6467 | } |
6468 | |
6469 | /* Check that two BTF types, each specified as an BTF object + id, are exactly |
6470 | * the same. Trivial ID check is not enough due to module BTFs, because we can |
6471 | * end up with two different module BTFs, but IDs point to the common type in |
6472 | * vmlinux BTF. |
6473 | */ |
6474 | bool btf_types_are_same(const struct btf *btf1, u32 id1, |
6475 | const struct btf *btf2, u32 id2) |
6476 | { |
6477 | if (id1 != id2) |
6478 | return false; |
6479 | if (btf1 == btf2) |
6480 | return true; |
6481 | return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); |
6482 | } |
6483 | |
6484 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
6485 | const struct btf *btf, u32 id, int off, |
6486 | const struct btf *need_btf, u32 need_type_id, |
6487 | bool strict) |
6488 | { |
6489 | const struct btf_type *type; |
6490 | enum bpf_type_flag flag = 0; |
6491 | int err; |
6492 | |
6493 | /* Are we already done? */ |
6494 | if (off == 0 && btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) |
6495 | return true; |
6496 | /* In case of strict type match, we do not walk struct, the top level |
6497 | * type match must succeed. When strict is true, off should have already |
6498 | * been 0. |
6499 | */ |
6500 | if (strict) |
6501 | return false; |
6502 | again: |
6503 | type = btf_type_by_id(btf, id); |
6504 | if (!type) |
6505 | return false; |
6506 | err = btf_struct_walk(log, btf, t: type, off, size: 1, next_btf_id: &id, flag: &flag, NULL); |
6507 | if (err != WALK_STRUCT) |
6508 | return false; |
6509 | |
6510 | /* We found nested struct object. If it matches |
6511 | * the requested ID, we're done. Otherwise let's |
6512 | * continue the search with offset 0 in the new |
6513 | * type. |
6514 | */ |
6515 | if (!btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) { |
6516 | off = 0; |
6517 | goto again; |
6518 | } |
6519 | |
6520 | return true; |
6521 | } |
6522 | |
6523 | static int __get_type_size(struct btf *btf, u32 btf_id, |
6524 | const struct btf_type **ret_type) |
6525 | { |
6526 | const struct btf_type *t; |
6527 | |
6528 | *ret_type = btf_type_by_id(btf, 0); |
6529 | if (!btf_id) |
6530 | /* void */ |
6531 | return 0; |
6532 | t = btf_type_by_id(btf, btf_id); |
6533 | while (t && btf_type_is_modifier(t)) |
6534 | t = btf_type_by_id(btf, t->type); |
6535 | if (!t) |
6536 | return -EINVAL; |
6537 | *ret_type = t; |
6538 | if (btf_type_is_ptr(t)) |
6539 | /* kernel size of pointer. Not BPF's size of pointer*/ |
6540 | return sizeof(void *); |
6541 | if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
6542 | return t->size; |
6543 | return -EINVAL; |
6544 | } |
6545 | |
6546 | static u8 __get_type_fmodel_flags(const struct btf_type *t) |
6547 | { |
6548 | u8 flags = 0; |
6549 | |
6550 | if (__btf_type_is_struct(t)) |
6551 | flags |= BTF_FMODEL_STRUCT_ARG; |
6552 | if (btf_type_is_signed_int(t)) |
6553 | flags |= BTF_FMODEL_SIGNED_ARG; |
6554 | |
6555 | return flags; |
6556 | } |
6557 | |
6558 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
6559 | struct btf *btf, |
6560 | const struct btf_type *func, |
6561 | const char *tname, |
6562 | struct btf_func_model *m) |
6563 | { |
6564 | const struct btf_param *args; |
6565 | const struct btf_type *t; |
6566 | u32 i, nargs; |
6567 | int ret; |
6568 | |
6569 | if (!func) { |
6570 | /* BTF function prototype doesn't match the verifier types. |
6571 | * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. |
6572 | */ |
6573 | for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { |
6574 | m->arg_size[i] = 8; |
6575 | m->arg_flags[i] = 0; |
6576 | } |
6577 | m->ret_size = 8; |
6578 | m->ret_flags = 0; |
6579 | m->nr_args = MAX_BPF_FUNC_REG_ARGS; |
6580 | return 0; |
6581 | } |
6582 | args = (const struct btf_param *)(func + 1); |
6583 | nargs = btf_type_vlen(t: func); |
6584 | if (nargs > MAX_BPF_FUNC_ARGS) { |
6585 | bpf_log(log, |
6586 | fmt: "The function %s has %d arguments. Too many.\n" , |
6587 | tname, nargs); |
6588 | return -EINVAL; |
6589 | } |
6590 | ret = __get_type_size(btf, btf_id: func->type, ret_type: &t); |
6591 | if (ret < 0 || __btf_type_is_struct(t)) { |
6592 | bpf_log(log, |
6593 | fmt: "The function %s return type %s is unsupported.\n" , |
6594 | tname, btf_type_str(t)); |
6595 | return -EINVAL; |
6596 | } |
6597 | m->ret_size = ret; |
6598 | m->ret_flags = __get_type_fmodel_flags(t); |
6599 | |
6600 | for (i = 0; i < nargs; i++) { |
6601 | if (i == nargs - 1 && args[i].type == 0) { |
6602 | bpf_log(log, |
6603 | fmt: "The function %s with variable args is unsupported.\n" , |
6604 | tname); |
6605 | return -EINVAL; |
6606 | } |
6607 | ret = __get_type_size(btf, btf_id: args[i].type, ret_type: &t); |
6608 | |
6609 | /* No support of struct argument size greater than 16 bytes */ |
6610 | if (ret < 0 || ret > 16) { |
6611 | bpf_log(log, |
6612 | fmt: "The function %s arg%d type %s is unsupported.\n" , |
6613 | tname, i, btf_type_str(t)); |
6614 | return -EINVAL; |
6615 | } |
6616 | if (ret == 0) { |
6617 | bpf_log(log, |
6618 | fmt: "The function %s has malformed void argument.\n" , |
6619 | tname); |
6620 | return -EINVAL; |
6621 | } |
6622 | m->arg_size[i] = ret; |
6623 | m->arg_flags[i] = __get_type_fmodel_flags(t); |
6624 | } |
6625 | m->nr_args = nargs; |
6626 | return 0; |
6627 | } |
6628 | |
6629 | /* Compare BTFs of two functions assuming only scalars and pointers to context. |
6630 | * t1 points to BTF_KIND_FUNC in btf1 |
6631 | * t2 points to BTF_KIND_FUNC in btf2 |
6632 | * Returns: |
6633 | * EINVAL - function prototype mismatch |
6634 | * EFAULT - verifier bug |
6635 | * 0 - 99% match. The last 1% is validated by the verifier. |
6636 | */ |
6637 | static int btf_check_func_type_match(struct bpf_verifier_log *log, |
6638 | struct btf *btf1, const struct btf_type *t1, |
6639 | struct btf *btf2, const struct btf_type *t2) |
6640 | { |
6641 | const struct btf_param *args1, *args2; |
6642 | const char *fn1, *fn2, *s1, *s2; |
6643 | u32 nargs1, nargs2, i; |
6644 | |
6645 | fn1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
6646 | fn2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
6647 | |
6648 | if (btf_func_linkage(t: t1) != BTF_FUNC_GLOBAL) { |
6649 | bpf_log(log, fmt: "%s() is not a global function\n" , fn1); |
6650 | return -EINVAL; |
6651 | } |
6652 | if (btf_func_linkage(t: t2) != BTF_FUNC_GLOBAL) { |
6653 | bpf_log(log, fmt: "%s() is not a global function\n" , fn2); |
6654 | return -EINVAL; |
6655 | } |
6656 | |
6657 | t1 = btf_type_by_id(btf1, t1->type); |
6658 | if (!t1 || !btf_type_is_func_proto(t: t1)) |
6659 | return -EFAULT; |
6660 | t2 = btf_type_by_id(btf2, t2->type); |
6661 | if (!t2 || !btf_type_is_func_proto(t: t2)) |
6662 | return -EFAULT; |
6663 | |
6664 | args1 = (const struct btf_param *)(t1 + 1); |
6665 | nargs1 = btf_type_vlen(t: t1); |
6666 | args2 = (const struct btf_param *)(t2 + 1); |
6667 | nargs2 = btf_type_vlen(t: t2); |
6668 | |
6669 | if (nargs1 != nargs2) { |
6670 | bpf_log(log, fmt: "%s() has %d args while %s() has %d args\n" , |
6671 | fn1, nargs1, fn2, nargs2); |
6672 | return -EINVAL; |
6673 | } |
6674 | |
6675 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
6676 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
6677 | if (t1->info != t2->info) { |
6678 | bpf_log(log, |
6679 | fmt: "Return type %s of %s() doesn't match type %s of %s()\n" , |
6680 | btf_type_str(t: t1), fn1, |
6681 | btf_type_str(t: t2), fn2); |
6682 | return -EINVAL; |
6683 | } |
6684 | |
6685 | for (i = 0; i < nargs1; i++) { |
6686 | t1 = btf_type_skip_modifiers(btf: btf1, id: args1[i].type, NULL); |
6687 | t2 = btf_type_skip_modifiers(btf: btf2, id: args2[i].type, NULL); |
6688 | |
6689 | if (t1->info != t2->info) { |
6690 | bpf_log(log, fmt: "arg%d in %s() is %s while %s() has %s\n" , |
6691 | i, fn1, btf_type_str(t: t1), |
6692 | fn2, btf_type_str(t: t2)); |
6693 | return -EINVAL; |
6694 | } |
6695 | if (btf_type_has_size(t: t1) && t1->size != t2->size) { |
6696 | bpf_log(log, |
6697 | fmt: "arg%d in %s() has size %d while %s() has %d\n" , |
6698 | i, fn1, t1->size, |
6699 | fn2, t2->size); |
6700 | return -EINVAL; |
6701 | } |
6702 | |
6703 | /* global functions are validated with scalars and pointers |
6704 | * to context only. And only global functions can be replaced. |
6705 | * Hence type check only those types. |
6706 | */ |
6707 | if (btf_type_is_int(t: t1) || btf_is_any_enum(t: t1)) |
6708 | continue; |
6709 | if (!btf_type_is_ptr(t: t1)) { |
6710 | bpf_log(log, |
6711 | fmt: "arg%d in %s() has unrecognized type\n" , |
6712 | i, fn1); |
6713 | return -EINVAL; |
6714 | } |
6715 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
6716 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
6717 | if (!btf_type_is_struct(t: t1)) { |
6718 | bpf_log(log, |
6719 | fmt: "arg%d in %s() is not a pointer to context\n" , |
6720 | i, fn1); |
6721 | return -EINVAL; |
6722 | } |
6723 | if (!btf_type_is_struct(t: t2)) { |
6724 | bpf_log(log, |
6725 | fmt: "arg%d in %s() is not a pointer to context\n" , |
6726 | i, fn2); |
6727 | return -EINVAL; |
6728 | } |
6729 | /* This is an optional check to make program writing easier. |
6730 | * Compare names of structs and report an error to the user. |
6731 | * btf_prepare_func_args() already checked that t2 struct |
6732 | * is a context type. btf_prepare_func_args() will check |
6733 | * later that t1 struct is a context type as well. |
6734 | */ |
6735 | s1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
6736 | s2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
6737 | if (strcmp(s1, s2)) { |
6738 | bpf_log(log, |
6739 | fmt: "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n" , |
6740 | i, fn1, s1, fn2, s2); |
6741 | return -EINVAL; |
6742 | } |
6743 | } |
6744 | return 0; |
6745 | } |
6746 | |
6747 | /* Compare BTFs of given program with BTF of target program */ |
6748 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
6749 | struct btf *btf2, const struct btf_type *t2) |
6750 | { |
6751 | struct btf *btf1 = prog->aux->btf; |
6752 | const struct btf_type *t1; |
6753 | u32 btf_id = 0; |
6754 | |
6755 | if (!prog->aux->func_info) { |
6756 | bpf_log(log, fmt: "Program extension requires BTF\n" ); |
6757 | return -EINVAL; |
6758 | } |
6759 | |
6760 | btf_id = prog->aux->func_info[0].type_id; |
6761 | if (!btf_id) |
6762 | return -EFAULT; |
6763 | |
6764 | t1 = btf_type_by_id(btf1, btf_id); |
6765 | if (!t1 || !btf_type_is_func(t: t1)) |
6766 | return -EFAULT; |
6767 | |
6768 | return btf_check_func_type_match(log, btf1, t1, btf2, t2); |
6769 | } |
6770 | |
6771 | static int btf_check_func_arg_match(struct bpf_verifier_env *env, |
6772 | const struct btf *btf, u32 func_id, |
6773 | struct bpf_reg_state *regs, |
6774 | bool ptr_to_mem_ok, |
6775 | bool processing_call) |
6776 | { |
6777 | enum bpf_prog_type prog_type = resolve_prog_type(prog: env->prog); |
6778 | struct bpf_verifier_log *log = &env->log; |
6779 | const char *func_name, *ref_tname; |
6780 | const struct btf_type *t, *ref_t; |
6781 | const struct btf_param *args; |
6782 | u32 i, nargs, ref_id; |
6783 | int ret; |
6784 | |
6785 | t = btf_type_by_id(btf, func_id); |
6786 | if (!t || !btf_type_is_func(t)) { |
6787 | /* These checks were already done by the verifier while loading |
6788 | * struct bpf_func_info or in add_kfunc_call(). |
6789 | */ |
6790 | bpf_log(log, fmt: "BTF of func_id %u doesn't point to KIND_FUNC\n" , |
6791 | func_id); |
6792 | return -EFAULT; |
6793 | } |
6794 | func_name = btf_name_by_offset(btf, offset: t->name_off); |
6795 | |
6796 | t = btf_type_by_id(btf, t->type); |
6797 | if (!t || !btf_type_is_func_proto(t)) { |
6798 | bpf_log(log, fmt: "Invalid BTF of func %s\n" , func_name); |
6799 | return -EFAULT; |
6800 | } |
6801 | args = (const struct btf_param *)(t + 1); |
6802 | nargs = btf_type_vlen(t); |
6803 | if (nargs > MAX_BPF_FUNC_REG_ARGS) { |
6804 | bpf_log(log, fmt: "Function %s has %d > %d args\n" , func_name, nargs, |
6805 | MAX_BPF_FUNC_REG_ARGS); |
6806 | return -EINVAL; |
6807 | } |
6808 | |
6809 | /* check that BTF function arguments match actual types that the |
6810 | * verifier sees. |
6811 | */ |
6812 | for (i = 0; i < nargs; i++) { |
6813 | enum bpf_arg_type arg_type = ARG_DONTCARE; |
6814 | u32 regno = i + 1; |
6815 | struct bpf_reg_state *reg = ®s[regno]; |
6816 | |
6817 | t = btf_type_skip_modifiers(btf, id: args[i].type, NULL); |
6818 | if (btf_type_is_scalar(t)) { |
6819 | if (reg->type == SCALAR_VALUE) |
6820 | continue; |
6821 | bpf_log(log, fmt: "R%d is not a scalar\n" , regno); |
6822 | return -EINVAL; |
6823 | } |
6824 | |
6825 | if (!btf_type_is_ptr(t)) { |
6826 | bpf_log(log, fmt: "Unrecognized arg#%d type %s\n" , |
6827 | i, btf_type_str(t)); |
6828 | return -EINVAL; |
6829 | } |
6830 | |
6831 | ref_t = btf_type_skip_modifiers(btf, id: t->type, res_id: &ref_id); |
6832 | ref_tname = btf_name_by_offset(btf, offset: ref_t->name_off); |
6833 | |
6834 | ret = check_func_arg_reg_off(env, reg, regno, arg_type); |
6835 | if (ret < 0) |
6836 | return ret; |
6837 | |
6838 | if (btf_get_prog_ctx_type(log, btf, t, prog_type, arg: i)) { |
6839 | /* If function expects ctx type in BTF check that caller |
6840 | * is passing PTR_TO_CTX. |
6841 | */ |
6842 | if (reg->type != PTR_TO_CTX) { |
6843 | bpf_log(log, |
6844 | fmt: "arg#%d expected pointer to ctx, but got %s\n" , |
6845 | i, btf_type_str(t)); |
6846 | return -EINVAL; |
6847 | } |
6848 | } else if (ptr_to_mem_ok && processing_call) { |
6849 | const struct btf_type *resolve_ret; |
6850 | u32 type_size; |
6851 | |
6852 | resolve_ret = btf_resolve_size(btf, type: ref_t, type_size: &type_size); |
6853 | if (IS_ERR(ptr: resolve_ret)) { |
6854 | bpf_log(log, |
6855 | fmt: "arg#%d reference type('%s %s') size cannot be determined: %ld\n" , |
6856 | i, btf_type_str(t: ref_t), ref_tname, |
6857 | PTR_ERR(ptr: resolve_ret)); |
6858 | return -EINVAL; |
6859 | } |
6860 | |
6861 | if (check_mem_reg(env, reg, regno, mem_size: type_size)) |
6862 | return -EINVAL; |
6863 | } else { |
6864 | bpf_log(log, fmt: "reg type unsupported for arg#%d function %s#%d\n" , i, |
6865 | func_name, func_id); |
6866 | return -EINVAL; |
6867 | } |
6868 | } |
6869 | |
6870 | return 0; |
6871 | } |
6872 | |
6873 | /* Compare BTF of a function declaration with given bpf_reg_state. |
6874 | * Returns: |
6875 | * EFAULT - there is a verifier bug. Abort verification. |
6876 | * EINVAL - there is a type mismatch or BTF is not available. |
6877 | * 0 - BTF matches with what bpf_reg_state expects. |
6878 | * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. |
6879 | */ |
6880 | int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, |
6881 | struct bpf_reg_state *regs) |
6882 | { |
6883 | struct bpf_prog *prog = env->prog; |
6884 | struct btf *btf = prog->aux->btf; |
6885 | bool is_global; |
6886 | u32 btf_id; |
6887 | int err; |
6888 | |
6889 | if (!prog->aux->func_info) |
6890 | return -EINVAL; |
6891 | |
6892 | btf_id = prog->aux->func_info[subprog].type_id; |
6893 | if (!btf_id) |
6894 | return -EFAULT; |
6895 | |
6896 | if (prog->aux->func_info_aux[subprog].unreliable) |
6897 | return -EINVAL; |
6898 | |
6899 | is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; |
6900 | err = btf_check_func_arg_match(env, btf, func_id: btf_id, regs, ptr_to_mem_ok: is_global, processing_call: false); |
6901 | |
6902 | /* Compiler optimizations can remove arguments from static functions |
6903 | * or mismatched type can be passed into a global function. |
6904 | * In such cases mark the function as unreliable from BTF point of view. |
6905 | */ |
6906 | if (err) |
6907 | prog->aux->func_info_aux[subprog].unreliable = true; |
6908 | return err; |
6909 | } |
6910 | |
6911 | /* Compare BTF of a function call with given bpf_reg_state. |
6912 | * Returns: |
6913 | * EFAULT - there is a verifier bug. Abort verification. |
6914 | * EINVAL - there is a type mismatch or BTF is not available. |
6915 | * 0 - BTF matches with what bpf_reg_state expects. |
6916 | * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. |
6917 | * |
6918 | * NOTE: the code is duplicated from btf_check_subprog_arg_match() |
6919 | * because btf_check_func_arg_match() is still doing both. Once that |
6920 | * function is split in 2, we can call from here btf_check_subprog_arg_match() |
6921 | * first, and then treat the calling part in a new code path. |
6922 | */ |
6923 | int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, |
6924 | struct bpf_reg_state *regs) |
6925 | { |
6926 | struct bpf_prog *prog = env->prog; |
6927 | struct btf *btf = prog->aux->btf; |
6928 | bool is_global; |
6929 | u32 btf_id; |
6930 | int err; |
6931 | |
6932 | if (!prog->aux->func_info) |
6933 | return -EINVAL; |
6934 | |
6935 | btf_id = prog->aux->func_info[subprog].type_id; |
6936 | if (!btf_id) |
6937 | return -EFAULT; |
6938 | |
6939 | if (prog->aux->func_info_aux[subprog].unreliable) |
6940 | return -EINVAL; |
6941 | |
6942 | is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; |
6943 | err = btf_check_func_arg_match(env, btf, func_id: btf_id, regs, ptr_to_mem_ok: is_global, processing_call: true); |
6944 | |
6945 | /* Compiler optimizations can remove arguments from static functions |
6946 | * or mismatched type can be passed into a global function. |
6947 | * In such cases mark the function as unreliable from BTF point of view. |
6948 | */ |
6949 | if (err) |
6950 | prog->aux->func_info_aux[subprog].unreliable = true; |
6951 | return err; |
6952 | } |
6953 | |
6954 | /* Convert BTF of a function into bpf_reg_state if possible |
6955 | * Returns: |
6956 | * EFAULT - there is a verifier bug. Abort verification. |
6957 | * EINVAL - cannot convert BTF. |
6958 | * 0 - Successfully converted BTF into bpf_reg_state |
6959 | * (either PTR_TO_CTX or SCALAR_VALUE). |
6960 | */ |
6961 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, |
6962 | struct bpf_reg_state *regs, bool is_ex_cb) |
6963 | { |
6964 | struct bpf_verifier_log *log = &env->log; |
6965 | struct bpf_prog *prog = env->prog; |
6966 | enum bpf_prog_type prog_type = prog->type; |
6967 | struct btf *btf = prog->aux->btf; |
6968 | const struct btf_param *args; |
6969 | const struct btf_type *t, *ref_t; |
6970 | u32 i, nargs, btf_id; |
6971 | const char *tname; |
6972 | |
6973 | if (!prog->aux->func_info || |
6974 | prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { |
6975 | bpf_log(log, fmt: "Verifier bug\n" ); |
6976 | return -EFAULT; |
6977 | } |
6978 | |
6979 | btf_id = prog->aux->func_info[subprog].type_id; |
6980 | if (!btf_id) { |
6981 | bpf_log(log, fmt: "Global functions need valid BTF\n" ); |
6982 | return -EFAULT; |
6983 | } |
6984 | |
6985 | t = btf_type_by_id(btf, btf_id); |
6986 | if (!t || !btf_type_is_func(t)) { |
6987 | /* These checks were already done by the verifier while loading |
6988 | * struct bpf_func_info |
6989 | */ |
6990 | bpf_log(log, fmt: "BTF of func#%d doesn't point to KIND_FUNC\n" , |
6991 | subprog); |
6992 | return -EFAULT; |
6993 | } |
6994 | tname = btf_name_by_offset(btf, offset: t->name_off); |
6995 | |
6996 | if (log->level & BPF_LOG_LEVEL) |
6997 | bpf_log(log, fmt: "Validating %s() func#%d...\n" , |
6998 | tname, subprog); |
6999 | |
7000 | if (prog->aux->func_info_aux[subprog].unreliable) { |
7001 | bpf_log(log, fmt: "Verifier bug in function %s()\n" , tname); |
7002 | return -EFAULT; |
7003 | } |
7004 | if (prog_type == BPF_PROG_TYPE_EXT) |
7005 | prog_type = prog->aux->dst_prog->type; |
7006 | |
7007 | t = btf_type_by_id(btf, t->type); |
7008 | if (!t || !btf_type_is_func_proto(t)) { |
7009 | bpf_log(log, fmt: "Invalid type of function %s()\n" , tname); |
7010 | return -EFAULT; |
7011 | } |
7012 | args = (const struct btf_param *)(t + 1); |
7013 | nargs = btf_type_vlen(t); |
7014 | if (nargs > MAX_BPF_FUNC_REG_ARGS) { |
7015 | bpf_log(log, fmt: "Global function %s() with %d > %d args. Buggy compiler.\n" , |
7016 | tname, nargs, MAX_BPF_FUNC_REG_ARGS); |
7017 | return -EINVAL; |
7018 | } |
7019 | /* check that function returns int, exception cb also requires this */ |
7020 | t = btf_type_by_id(btf, t->type); |
7021 | while (btf_type_is_modifier(t)) |
7022 | t = btf_type_by_id(btf, t->type); |
7023 | if (!btf_type_is_int(t) && !btf_is_any_enum(t)) { |
7024 | bpf_log(log, |
7025 | fmt: "Global function %s() doesn't return scalar. Only those are supported.\n" , |
7026 | tname); |
7027 | return -EINVAL; |
7028 | } |
7029 | /* Convert BTF function arguments into verifier types. |
7030 | * Only PTR_TO_CTX and SCALAR are supported atm. |
7031 | */ |
7032 | for (i = 0; i < nargs; i++) { |
7033 | struct bpf_reg_state *reg = ®s[i + 1]; |
7034 | |
7035 | t = btf_type_by_id(btf, args[i].type); |
7036 | while (btf_type_is_modifier(t)) |
7037 | t = btf_type_by_id(btf, t->type); |
7038 | if (btf_type_is_int(t) || btf_is_any_enum(t)) { |
7039 | reg->type = SCALAR_VALUE; |
7040 | continue; |
7041 | } |
7042 | if (btf_type_is_ptr(t)) { |
7043 | if (btf_get_prog_ctx_type(log, btf, t, prog_type, arg: i)) { |
7044 | reg->type = PTR_TO_CTX; |
7045 | continue; |
7046 | } |
7047 | |
7048 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
7049 | |
7050 | ref_t = btf_resolve_size(btf, type: t, type_size: ®->mem_size); |
7051 | if (IS_ERR(ptr: ref_t)) { |
7052 | bpf_log(log, |
7053 | fmt: "arg#%d reference type('%s %s') size cannot be determined: %ld\n" , |
7054 | i, btf_type_str(t), btf_name_by_offset(btf, offset: t->name_off), |
7055 | PTR_ERR(ptr: ref_t)); |
7056 | return -EINVAL; |
7057 | } |
7058 | |
7059 | reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; |
7060 | reg->id = ++env->id_gen; |
7061 | |
7062 | continue; |
7063 | } |
7064 | bpf_log(log, fmt: "Arg#%d type %s in %s() is not supported yet.\n" , |
7065 | i, btf_type_str(t), tname); |
7066 | return -EINVAL; |
7067 | } |
7068 | /* We have already ensured that the callback returns an integer, just |
7069 | * like all global subprogs. We need to determine it only has a single |
7070 | * scalar argument. |
7071 | */ |
7072 | if (is_ex_cb && (nargs != 1 || regs[BPF_REG_1].type != SCALAR_VALUE)) { |
7073 | bpf_log(log, fmt: "exception cb only supports single integer argument\n" ); |
7074 | return -EINVAL; |
7075 | } |
7076 | return 0; |
7077 | } |
7078 | |
7079 | static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, |
7080 | struct btf_show *show) |
7081 | { |
7082 | const struct btf_type *t = btf_type_by_id(btf, type_id); |
7083 | |
7084 | show->btf = btf; |
7085 | memset(&show->state, 0, sizeof(show->state)); |
7086 | memset(&show->obj, 0, sizeof(show->obj)); |
7087 | |
7088 | btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); |
7089 | } |
7090 | |
7091 | static void btf_seq_show(struct btf_show *show, const char *fmt, |
7092 | va_list args) |
7093 | { |
7094 | seq_vprintf(m: (struct seq_file *)show->target, fmt, args); |
7095 | } |
7096 | |
7097 | int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, |
7098 | void *obj, struct seq_file *m, u64 flags) |
7099 | { |
7100 | struct btf_show sseq; |
7101 | |
7102 | sseq.target = m; |
7103 | sseq.showfn = btf_seq_show; |
7104 | sseq.flags = flags; |
7105 | |
7106 | btf_type_show(btf, type_id, obj, show: &sseq); |
7107 | |
7108 | return sseq.state.status; |
7109 | } |
7110 | |
7111 | void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, |
7112 | struct seq_file *m) |
7113 | { |
7114 | (void) btf_type_seq_show_flags(btf, type_id, obj, m, |
7115 | BTF_SHOW_NONAME | BTF_SHOW_COMPACT | |
7116 | BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); |
7117 | } |
7118 | |
7119 | struct btf_show_snprintf { |
7120 | struct btf_show show; |
7121 | int len_left; /* space left in string */ |
7122 | int len; /* length we would have written */ |
7123 | }; |
7124 | |
7125 | static void btf_snprintf_show(struct btf_show *show, const char *fmt, |
7126 | va_list args) |
7127 | { |
7128 | struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; |
7129 | int len; |
7130 | |
7131 | len = vsnprintf(buf: show->target, size: ssnprintf->len_left, fmt, args); |
7132 | |
7133 | if (len < 0) { |
7134 | ssnprintf->len_left = 0; |
7135 | ssnprintf->len = len; |
7136 | } else if (len >= ssnprintf->len_left) { |
7137 | /* no space, drive on to get length we would have written */ |
7138 | ssnprintf->len_left = 0; |
7139 | ssnprintf->len += len; |
7140 | } else { |
7141 | ssnprintf->len_left -= len; |
7142 | ssnprintf->len += len; |
7143 | show->target += len; |
7144 | } |
7145 | } |
7146 | |
7147 | int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, |
7148 | char *buf, int len, u64 flags) |
7149 | { |
7150 | struct btf_show_snprintf ssnprintf; |
7151 | |
7152 | ssnprintf.show.target = buf; |
7153 | ssnprintf.show.flags = flags; |
7154 | ssnprintf.show.showfn = btf_snprintf_show; |
7155 | ssnprintf.len_left = len; |
7156 | ssnprintf.len = 0; |
7157 | |
7158 | btf_type_show(btf, type_id, obj, show: (struct btf_show *)&ssnprintf); |
7159 | |
7160 | /* If we encountered an error, return it. */ |
7161 | if (ssnprintf.show.state.status) |
7162 | return ssnprintf.show.state.status; |
7163 | |
7164 | /* Otherwise return length we would have written */ |
7165 | return ssnprintf.len; |
7166 | } |
7167 | |
7168 | #ifdef CONFIG_PROC_FS |
7169 | static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) |
7170 | { |
7171 | const struct btf *btf = filp->private_data; |
7172 | |
7173 | seq_printf(m, fmt: "btf_id:\t%u\n" , btf->id); |
7174 | } |
7175 | #endif |
7176 | |
7177 | static int btf_release(struct inode *inode, struct file *filp) |
7178 | { |
7179 | btf_put(btf: filp->private_data); |
7180 | return 0; |
7181 | } |
7182 | |
7183 | const struct file_operations btf_fops = { |
7184 | #ifdef CONFIG_PROC_FS |
7185 | .show_fdinfo = bpf_btf_show_fdinfo, |
7186 | #endif |
7187 | .release = btf_release, |
7188 | }; |
7189 | |
7190 | static int __btf_new_fd(struct btf *btf) |
7191 | { |
7192 | return anon_inode_getfd(name: "btf" , fops: &btf_fops, priv: btf, O_RDONLY | O_CLOEXEC); |
7193 | } |
7194 | |
7195 | int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
7196 | { |
7197 | struct btf *btf; |
7198 | int ret; |
7199 | |
7200 | btf = btf_parse(attr, uattr, uattr_size); |
7201 | if (IS_ERR(ptr: btf)) |
7202 | return PTR_ERR(ptr: btf); |
7203 | |
7204 | ret = btf_alloc_id(btf); |
7205 | if (ret) { |
7206 | btf_free(btf); |
7207 | return ret; |
7208 | } |
7209 | |
7210 | /* |
7211 | * The BTF ID is published to the userspace. |
7212 | * All BTF free must go through call_rcu() from |
7213 | * now on (i.e. free by calling btf_put()). |
7214 | */ |
7215 | |
7216 | ret = __btf_new_fd(btf); |
7217 | if (ret < 0) |
7218 | btf_put(btf); |
7219 | |
7220 | return ret; |
7221 | } |
7222 | |
7223 | struct btf *btf_get_by_fd(int fd) |
7224 | { |
7225 | struct btf *btf; |
7226 | struct fd f; |
7227 | |
7228 | f = fdget(fd); |
7229 | |
7230 | if (!f.file) |
7231 | return ERR_PTR(error: -EBADF); |
7232 | |
7233 | if (f.file->f_op != &btf_fops) { |
7234 | fdput(fd: f); |
7235 | return ERR_PTR(error: -EINVAL); |
7236 | } |
7237 | |
7238 | btf = f.file->private_data; |
7239 | refcount_inc(r: &btf->refcnt); |
7240 | fdput(fd: f); |
7241 | |
7242 | return btf; |
7243 | } |
7244 | |
7245 | int btf_get_info_by_fd(const struct btf *btf, |
7246 | const union bpf_attr *attr, |
7247 | union bpf_attr __user *uattr) |
7248 | { |
7249 | struct bpf_btf_info __user *uinfo; |
7250 | struct bpf_btf_info info; |
7251 | u32 info_copy, btf_copy; |
7252 | void __user *ubtf; |
7253 | char __user *uname; |
7254 | u32 uinfo_len, uname_len, name_len; |
7255 | int ret = 0; |
7256 | |
7257 | uinfo = u64_to_user_ptr(attr->info.info); |
7258 | uinfo_len = attr->info.info_len; |
7259 | |
7260 | info_copy = min_t(u32, uinfo_len, sizeof(info)); |
7261 | memset(&info, 0, sizeof(info)); |
7262 | if (copy_from_user(to: &info, from: uinfo, n: info_copy)) |
7263 | return -EFAULT; |
7264 | |
7265 | info.id = btf->id; |
7266 | ubtf = u64_to_user_ptr(info.btf); |
7267 | btf_copy = min_t(u32, btf->data_size, info.btf_size); |
7268 | if (copy_to_user(to: ubtf, from: btf->data, n: btf_copy)) |
7269 | return -EFAULT; |
7270 | info.btf_size = btf->data_size; |
7271 | |
7272 | info.kernel_btf = btf->kernel_btf; |
7273 | |
7274 | uname = u64_to_user_ptr(info.name); |
7275 | uname_len = info.name_len; |
7276 | if (!uname ^ !uname_len) |
7277 | return -EINVAL; |
7278 | |
7279 | name_len = strlen(btf->name); |
7280 | info.name_len = name_len; |
7281 | |
7282 | if (uname) { |
7283 | if (uname_len >= name_len + 1) { |
7284 | if (copy_to_user(to: uname, from: btf->name, n: name_len + 1)) |
7285 | return -EFAULT; |
7286 | } else { |
7287 | char zero = '\0'; |
7288 | |
7289 | if (copy_to_user(to: uname, from: btf->name, n: uname_len - 1)) |
7290 | return -EFAULT; |
7291 | if (put_user(zero, uname + uname_len - 1)) |
7292 | return -EFAULT; |
7293 | /* let user-space know about too short buffer */ |
7294 | ret = -ENOSPC; |
7295 | } |
7296 | } |
7297 | |
7298 | if (copy_to_user(to: uinfo, from: &info, n: info_copy) || |
7299 | put_user(info_copy, &uattr->info.info_len)) |
7300 | return -EFAULT; |
7301 | |
7302 | return ret; |
7303 | } |
7304 | |
7305 | int btf_get_fd_by_id(u32 id) |
7306 | { |
7307 | struct btf *btf; |
7308 | int fd; |
7309 | |
7310 | rcu_read_lock(); |
7311 | btf = idr_find(&btf_idr, id); |
7312 | if (!btf || !refcount_inc_not_zero(r: &btf->refcnt)) |
7313 | btf = ERR_PTR(error: -ENOENT); |
7314 | rcu_read_unlock(); |
7315 | |
7316 | if (IS_ERR(ptr: btf)) |
7317 | return PTR_ERR(ptr: btf); |
7318 | |
7319 | fd = __btf_new_fd(btf); |
7320 | if (fd < 0) |
7321 | btf_put(btf); |
7322 | |
7323 | return fd; |
7324 | } |
7325 | |
7326 | u32 btf_obj_id(const struct btf *btf) |
7327 | { |
7328 | return btf->id; |
7329 | } |
7330 | |
7331 | bool btf_is_kernel(const struct btf *btf) |
7332 | { |
7333 | return btf->kernel_btf; |
7334 | } |
7335 | |
7336 | bool btf_is_module(const struct btf *btf) |
7337 | { |
7338 | return btf->kernel_btf && strcmp(btf->name, "vmlinux" ) != 0; |
7339 | } |
7340 | |
7341 | enum { |
7342 | BTF_MODULE_F_LIVE = (1 << 0), |
7343 | }; |
7344 | |
7345 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7346 | struct btf_module { |
7347 | struct list_head list; |
7348 | struct module *module; |
7349 | struct btf *btf; |
7350 | struct bin_attribute *sysfs_attr; |
7351 | int flags; |
7352 | }; |
7353 | |
7354 | static LIST_HEAD(btf_modules); |
7355 | static DEFINE_MUTEX(btf_module_mutex); |
7356 | |
7357 | static ssize_t |
7358 | btf_module_read(struct file *file, struct kobject *kobj, |
7359 | struct bin_attribute *bin_attr, |
7360 | char *buf, loff_t off, size_t len) |
7361 | { |
7362 | const struct btf *btf = bin_attr->private; |
7363 | |
7364 | memcpy(buf, btf->data + off, len); |
7365 | return len; |
7366 | } |
7367 | |
7368 | static void purge_cand_cache(struct btf *btf); |
7369 | |
7370 | static int btf_module_notify(struct notifier_block *nb, unsigned long op, |
7371 | void *module) |
7372 | { |
7373 | struct btf_module *btf_mod, *tmp; |
7374 | struct module *mod = module; |
7375 | struct btf *btf; |
7376 | int err = 0; |
7377 | |
7378 | if (mod->btf_data_size == 0 || |
7379 | (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && |
7380 | op != MODULE_STATE_GOING)) |
7381 | goto out; |
7382 | |
7383 | switch (op) { |
7384 | case MODULE_STATE_COMING: |
7385 | btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); |
7386 | if (!btf_mod) { |
7387 | err = -ENOMEM; |
7388 | goto out; |
7389 | } |
7390 | btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); |
7391 | if (IS_ERR(btf)) { |
7392 | kfree(btf_mod); |
7393 | if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { |
7394 | pr_warn("failed to validate module [%s] BTF: %ld\n" , |
7395 | mod->name, PTR_ERR(btf)); |
7396 | err = PTR_ERR(btf); |
7397 | } else { |
7398 | pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n" ); |
7399 | } |
7400 | goto out; |
7401 | } |
7402 | err = btf_alloc_id(btf); |
7403 | if (err) { |
7404 | btf_free(btf); |
7405 | kfree(btf_mod); |
7406 | goto out; |
7407 | } |
7408 | |
7409 | purge_cand_cache(NULL); |
7410 | mutex_lock(&btf_module_mutex); |
7411 | btf_mod->module = module; |
7412 | btf_mod->btf = btf; |
7413 | list_add(&btf_mod->list, &btf_modules); |
7414 | mutex_unlock(&btf_module_mutex); |
7415 | |
7416 | if (IS_ENABLED(CONFIG_SYSFS)) { |
7417 | struct bin_attribute *attr; |
7418 | |
7419 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); |
7420 | if (!attr) |
7421 | goto out; |
7422 | |
7423 | sysfs_bin_attr_init(attr); |
7424 | attr->attr.name = btf->name; |
7425 | attr->attr.mode = 0444; |
7426 | attr->size = btf->data_size; |
7427 | attr->private = btf; |
7428 | attr->read = btf_module_read; |
7429 | |
7430 | err = sysfs_create_bin_file(btf_kobj, attr); |
7431 | if (err) { |
7432 | pr_warn("failed to register module [%s] BTF in sysfs: %d\n" , |
7433 | mod->name, err); |
7434 | kfree(attr); |
7435 | err = 0; |
7436 | goto out; |
7437 | } |
7438 | |
7439 | btf_mod->sysfs_attr = attr; |
7440 | } |
7441 | |
7442 | break; |
7443 | case MODULE_STATE_LIVE: |
7444 | mutex_lock(&btf_module_mutex); |
7445 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7446 | if (btf_mod->module != module) |
7447 | continue; |
7448 | |
7449 | btf_mod->flags |= BTF_MODULE_F_LIVE; |
7450 | break; |
7451 | } |
7452 | mutex_unlock(&btf_module_mutex); |
7453 | break; |
7454 | case MODULE_STATE_GOING: |
7455 | mutex_lock(&btf_module_mutex); |
7456 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7457 | if (btf_mod->module != module) |
7458 | continue; |
7459 | |
7460 | list_del(&btf_mod->list); |
7461 | if (btf_mod->sysfs_attr) |
7462 | sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); |
7463 | purge_cand_cache(btf_mod->btf); |
7464 | btf_put(btf_mod->btf); |
7465 | kfree(btf_mod->sysfs_attr); |
7466 | kfree(btf_mod); |
7467 | break; |
7468 | } |
7469 | mutex_unlock(&btf_module_mutex); |
7470 | break; |
7471 | } |
7472 | out: |
7473 | return notifier_from_errno(err); |
7474 | } |
7475 | |
7476 | static struct notifier_block btf_module_nb = { |
7477 | .notifier_call = btf_module_notify, |
7478 | }; |
7479 | |
7480 | static int __init btf_module_init(void) |
7481 | { |
7482 | register_module_notifier(&btf_module_nb); |
7483 | return 0; |
7484 | } |
7485 | |
7486 | fs_initcall(btf_module_init); |
7487 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
7488 | |
7489 | struct module *btf_try_get_module(const struct btf *btf) |
7490 | { |
7491 | struct module *res = NULL; |
7492 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7493 | struct btf_module *btf_mod, *tmp; |
7494 | |
7495 | mutex_lock(&btf_module_mutex); |
7496 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7497 | if (btf_mod->btf != btf) |
7498 | continue; |
7499 | |
7500 | /* We must only consider module whose __init routine has |
7501 | * finished, hence we must check for BTF_MODULE_F_LIVE flag, |
7502 | * which is set from the notifier callback for |
7503 | * MODULE_STATE_LIVE. |
7504 | */ |
7505 | if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) |
7506 | res = btf_mod->module; |
7507 | |
7508 | break; |
7509 | } |
7510 | mutex_unlock(&btf_module_mutex); |
7511 | #endif |
7512 | |
7513 | return res; |
7514 | } |
7515 | |
7516 | /* Returns struct btf corresponding to the struct module. |
7517 | * This function can return NULL or ERR_PTR. |
7518 | */ |
7519 | static struct btf *btf_get_module_btf(const struct module *module) |
7520 | { |
7521 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7522 | struct btf_module *btf_mod, *tmp; |
7523 | #endif |
7524 | struct btf *btf = NULL; |
7525 | |
7526 | if (!module) { |
7527 | btf = bpf_get_btf_vmlinux(); |
7528 | if (!IS_ERR_OR_NULL(ptr: btf)) |
7529 | btf_get(btf); |
7530 | return btf; |
7531 | } |
7532 | |
7533 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7534 | mutex_lock(&btf_module_mutex); |
7535 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7536 | if (btf_mod->module != module) |
7537 | continue; |
7538 | |
7539 | btf_get(btf_mod->btf); |
7540 | btf = btf_mod->btf; |
7541 | break; |
7542 | } |
7543 | mutex_unlock(&btf_module_mutex); |
7544 | #endif |
7545 | |
7546 | return btf; |
7547 | } |
7548 | |
7549 | BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) |
7550 | { |
7551 | struct btf *btf = NULL; |
7552 | int btf_obj_fd = 0; |
7553 | long ret; |
7554 | |
7555 | if (flags) |
7556 | return -EINVAL; |
7557 | |
7558 | if (name_sz <= 1 || name[name_sz - 1]) |
7559 | return -EINVAL; |
7560 | |
7561 | ret = bpf_find_btf_id(name, kind, btf_p: &btf); |
7562 | if (ret > 0 && btf_is_module(btf)) { |
7563 | btf_obj_fd = __btf_new_fd(btf); |
7564 | if (btf_obj_fd < 0) { |
7565 | btf_put(btf); |
7566 | return btf_obj_fd; |
7567 | } |
7568 | return ret | (((u64)btf_obj_fd) << 32); |
7569 | } |
7570 | if (ret > 0) |
7571 | btf_put(btf); |
7572 | return ret; |
7573 | } |
7574 | |
7575 | const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { |
7576 | .func = bpf_btf_find_by_name_kind, |
7577 | .gpl_only = false, |
7578 | .ret_type = RET_INTEGER, |
7579 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
7580 | .arg2_type = ARG_CONST_SIZE, |
7581 | .arg3_type = ARG_ANYTHING, |
7582 | .arg4_type = ARG_ANYTHING, |
7583 | }; |
7584 | |
7585 | BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) |
7586 | #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) |
7587 | BTF_TRACING_TYPE_xxx |
7588 | #undef BTF_TRACING_TYPE |
7589 | |
7590 | static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name, |
7591 | const struct btf_type *func, u32 func_flags) |
7592 | { |
7593 | u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); |
7594 | const char *name, *sfx, *iter_name; |
7595 | const struct btf_param *arg; |
7596 | const struct btf_type *t; |
7597 | char exp_name[128]; |
7598 | u32 nr_args; |
7599 | |
7600 | /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */ |
7601 | if (!flags || (flags & (flags - 1))) |
7602 | return -EINVAL; |
7603 | |
7604 | /* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */ |
7605 | nr_args = btf_type_vlen(t: func); |
7606 | if (nr_args < 1) |
7607 | return -EINVAL; |
7608 | |
7609 | arg = &btf_params(t: func)[0]; |
7610 | t = btf_type_skip_modifiers(btf, id: arg->type, NULL); |
7611 | if (!t || !btf_type_is_ptr(t)) |
7612 | return -EINVAL; |
7613 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
7614 | if (!t || !__btf_type_is_struct(t)) |
7615 | return -EINVAL; |
7616 | |
7617 | name = btf_name_by_offset(btf, offset: t->name_off); |
7618 | if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1)) |
7619 | return -EINVAL; |
7620 | |
7621 | /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to |
7622 | * fit nicely in stack slots |
7623 | */ |
7624 | if (t->size == 0 || (t->size % 8)) |
7625 | return -EINVAL; |
7626 | |
7627 | /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *) |
7628 | * naming pattern |
7629 | */ |
7630 | iter_name = name + sizeof(ITER_PREFIX) - 1; |
7631 | if (flags & KF_ITER_NEW) |
7632 | sfx = "new" ; |
7633 | else if (flags & KF_ITER_NEXT) |
7634 | sfx = "next" ; |
7635 | else /* (flags & KF_ITER_DESTROY) */ |
7636 | sfx = "destroy" ; |
7637 | |
7638 | snprintf(buf: exp_name, size: sizeof(exp_name), fmt: "bpf_iter_%s_%s" , iter_name, sfx); |
7639 | if (strcmp(func_name, exp_name)) |
7640 | return -EINVAL; |
7641 | |
7642 | /* only iter constructor should have extra arguments */ |
7643 | if (!(flags & KF_ITER_NEW) && nr_args != 1) |
7644 | return -EINVAL; |
7645 | |
7646 | if (flags & KF_ITER_NEXT) { |
7647 | /* bpf_iter_<type>_next() should return pointer */ |
7648 | t = btf_type_skip_modifiers(btf, id: func->type, NULL); |
7649 | if (!t || !btf_type_is_ptr(t)) |
7650 | return -EINVAL; |
7651 | } |
7652 | |
7653 | if (flags & KF_ITER_DESTROY) { |
7654 | /* bpf_iter_<type>_destroy() should return void */ |
7655 | t = btf_type_by_id(btf, func->type); |
7656 | if (!t || !btf_type_is_void(t)) |
7657 | return -EINVAL; |
7658 | } |
7659 | |
7660 | return 0; |
7661 | } |
7662 | |
7663 | static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags) |
7664 | { |
7665 | const struct btf_type *func; |
7666 | const char *func_name; |
7667 | int err; |
7668 | |
7669 | /* any kfunc should be FUNC -> FUNC_PROTO */ |
7670 | func = btf_type_by_id(btf, func_id); |
7671 | if (!func || !btf_type_is_func(t: func)) |
7672 | return -EINVAL; |
7673 | |
7674 | /* sanity check kfunc name */ |
7675 | func_name = btf_name_by_offset(btf, offset: func->name_off); |
7676 | if (!func_name || !func_name[0]) |
7677 | return -EINVAL; |
7678 | |
7679 | func = btf_type_by_id(btf, func->type); |
7680 | if (!func || !btf_type_is_func_proto(t: func)) |
7681 | return -EINVAL; |
7682 | |
7683 | if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) { |
7684 | err = btf_check_iter_kfuncs(btf, func_name, func, func_flags); |
7685 | if (err) |
7686 | return err; |
7687 | } |
7688 | |
7689 | return 0; |
7690 | } |
7691 | |
7692 | /* Kernel Function (kfunc) BTF ID set registration API */ |
7693 | |
7694 | static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, |
7695 | const struct btf_kfunc_id_set *kset) |
7696 | { |
7697 | struct btf_kfunc_hook_filter *hook_filter; |
7698 | struct btf_id_set8 *add_set = kset->set; |
7699 | bool vmlinux_set = !btf_is_module(btf); |
7700 | bool add_filter = !!kset->filter; |
7701 | struct btf_kfunc_set_tab *tab; |
7702 | struct btf_id_set8 *set; |
7703 | u32 set_cnt; |
7704 | int ret; |
7705 | |
7706 | if (hook >= BTF_KFUNC_HOOK_MAX) { |
7707 | ret = -EINVAL; |
7708 | goto end; |
7709 | } |
7710 | |
7711 | if (!add_set->cnt) |
7712 | return 0; |
7713 | |
7714 | tab = btf->kfunc_set_tab; |
7715 | |
7716 | if (tab && add_filter) { |
7717 | u32 i; |
7718 | |
7719 | hook_filter = &tab->hook_filters[hook]; |
7720 | for (i = 0; i < hook_filter->nr_filters; i++) { |
7721 | if (hook_filter->filters[i] == kset->filter) { |
7722 | add_filter = false; |
7723 | break; |
7724 | } |
7725 | } |
7726 | |
7727 | if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) { |
7728 | ret = -E2BIG; |
7729 | goto end; |
7730 | } |
7731 | } |
7732 | |
7733 | if (!tab) { |
7734 | tab = kzalloc(size: sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); |
7735 | if (!tab) |
7736 | return -ENOMEM; |
7737 | btf->kfunc_set_tab = tab; |
7738 | } |
7739 | |
7740 | set = tab->sets[hook]; |
7741 | /* Warn when register_btf_kfunc_id_set is called twice for the same hook |
7742 | * for module sets. |
7743 | */ |
7744 | if (WARN_ON_ONCE(set && !vmlinux_set)) { |
7745 | ret = -EINVAL; |
7746 | goto end; |
7747 | } |
7748 | |
7749 | /* We don't need to allocate, concatenate, and sort module sets, because |
7750 | * only one is allowed per hook. Hence, we can directly assign the |
7751 | * pointer and return. |
7752 | */ |
7753 | if (!vmlinux_set) { |
7754 | tab->sets[hook] = add_set; |
7755 | goto do_add_filter; |
7756 | } |
7757 | |
7758 | /* In case of vmlinux sets, there may be more than one set being |
7759 | * registered per hook. To create a unified set, we allocate a new set |
7760 | * and concatenate all individual sets being registered. While each set |
7761 | * is individually sorted, they may become unsorted when concatenated, |
7762 | * hence re-sorting the final set again is required to make binary |
7763 | * searching the set using btf_id_set8_contains function work. |
7764 | */ |
7765 | set_cnt = set ? set->cnt : 0; |
7766 | |
7767 | if (set_cnt > U32_MAX - add_set->cnt) { |
7768 | ret = -EOVERFLOW; |
7769 | goto end; |
7770 | } |
7771 | |
7772 | if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { |
7773 | ret = -E2BIG; |
7774 | goto end; |
7775 | } |
7776 | |
7777 | /* Grow set */ |
7778 | set = krealloc(objp: tab->sets[hook], |
7779 | offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), |
7780 | GFP_KERNEL | __GFP_NOWARN); |
7781 | if (!set) { |
7782 | ret = -ENOMEM; |
7783 | goto end; |
7784 | } |
7785 | |
7786 | /* For newly allocated set, initialize set->cnt to 0 */ |
7787 | if (!tab->sets[hook]) |
7788 | set->cnt = 0; |
7789 | tab->sets[hook] = set; |
7790 | |
7791 | /* Concatenate the two sets */ |
7792 | memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); |
7793 | set->cnt += add_set->cnt; |
7794 | |
7795 | sort(base: set->pairs, num: set->cnt, size: sizeof(set->pairs[0]), cmp_func: btf_id_cmp_func, NULL); |
7796 | |
7797 | do_add_filter: |
7798 | if (add_filter) { |
7799 | hook_filter = &tab->hook_filters[hook]; |
7800 | hook_filter->filters[hook_filter->nr_filters++] = kset->filter; |
7801 | } |
7802 | return 0; |
7803 | end: |
7804 | btf_free_kfunc_set_tab(btf); |
7805 | return ret; |
7806 | } |
7807 | |
7808 | static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, |
7809 | enum btf_kfunc_hook hook, |
7810 | u32 kfunc_btf_id, |
7811 | const struct bpf_prog *prog) |
7812 | { |
7813 | struct btf_kfunc_hook_filter *hook_filter; |
7814 | struct btf_id_set8 *set; |
7815 | u32 *id, i; |
7816 | |
7817 | if (hook >= BTF_KFUNC_HOOK_MAX) |
7818 | return NULL; |
7819 | if (!btf->kfunc_set_tab) |
7820 | return NULL; |
7821 | hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; |
7822 | for (i = 0; i < hook_filter->nr_filters; i++) { |
7823 | if (hook_filter->filters[i](prog, kfunc_btf_id)) |
7824 | return NULL; |
7825 | } |
7826 | set = btf->kfunc_set_tab->sets[hook]; |
7827 | if (!set) |
7828 | return NULL; |
7829 | id = btf_id_set8_contains(set, id: kfunc_btf_id); |
7830 | if (!id) |
7831 | return NULL; |
7832 | /* The flags for BTF ID are located next to it */ |
7833 | return id + 1; |
7834 | } |
7835 | |
7836 | static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) |
7837 | { |
7838 | switch (prog_type) { |
7839 | case BPF_PROG_TYPE_UNSPEC: |
7840 | return BTF_KFUNC_HOOK_COMMON; |
7841 | case BPF_PROG_TYPE_XDP: |
7842 | return BTF_KFUNC_HOOK_XDP; |
7843 | case BPF_PROG_TYPE_SCHED_CLS: |
7844 | return BTF_KFUNC_HOOK_TC; |
7845 | case BPF_PROG_TYPE_STRUCT_OPS: |
7846 | return BTF_KFUNC_HOOK_STRUCT_OPS; |
7847 | case BPF_PROG_TYPE_TRACING: |
7848 | case BPF_PROG_TYPE_LSM: |
7849 | return BTF_KFUNC_HOOK_TRACING; |
7850 | case BPF_PROG_TYPE_SYSCALL: |
7851 | return BTF_KFUNC_HOOK_SYSCALL; |
7852 | case BPF_PROG_TYPE_CGROUP_SKB: |
7853 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
7854 | return BTF_KFUNC_HOOK_CGROUP_SKB; |
7855 | case BPF_PROG_TYPE_SCHED_ACT: |
7856 | return BTF_KFUNC_HOOK_SCHED_ACT; |
7857 | case BPF_PROG_TYPE_SK_SKB: |
7858 | return BTF_KFUNC_HOOK_SK_SKB; |
7859 | case BPF_PROG_TYPE_SOCKET_FILTER: |
7860 | return BTF_KFUNC_HOOK_SOCKET_FILTER; |
7861 | case BPF_PROG_TYPE_LWT_OUT: |
7862 | case BPF_PROG_TYPE_LWT_IN: |
7863 | case BPF_PROG_TYPE_LWT_XMIT: |
7864 | case BPF_PROG_TYPE_LWT_SEG6LOCAL: |
7865 | return BTF_KFUNC_HOOK_LWT; |
7866 | case BPF_PROG_TYPE_NETFILTER: |
7867 | return BTF_KFUNC_HOOK_NETFILTER; |
7868 | default: |
7869 | return BTF_KFUNC_HOOK_MAX; |
7870 | } |
7871 | } |
7872 | |
7873 | /* Caution: |
7874 | * Reference to the module (obtained using btf_try_get_module) corresponding to |
7875 | * the struct btf *MUST* be held when calling this function from verifier |
7876 | * context. This is usually true as we stash references in prog's kfunc_btf_tab; |
7877 | * keeping the reference for the duration of the call provides the necessary |
7878 | * protection for looking up a well-formed btf->kfunc_set_tab. |
7879 | */ |
7880 | u32 *btf_kfunc_id_set_contains(const struct btf *btf, |
7881 | u32 kfunc_btf_id, |
7882 | const struct bpf_prog *prog) |
7883 | { |
7884 | enum bpf_prog_type prog_type = resolve_prog_type(prog); |
7885 | enum btf_kfunc_hook hook; |
7886 | u32 *kfunc_flags; |
7887 | |
7888 | kfunc_flags = __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog); |
7889 | if (kfunc_flags) |
7890 | return kfunc_flags; |
7891 | |
7892 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
7893 | return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog); |
7894 | } |
7895 | |
7896 | u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, |
7897 | const struct bpf_prog *prog) |
7898 | { |
7899 | return __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog); |
7900 | } |
7901 | |
7902 | static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, |
7903 | const struct btf_kfunc_id_set *kset) |
7904 | { |
7905 | struct btf *btf; |
7906 | int ret, i; |
7907 | |
7908 | btf = btf_get_module_btf(module: kset->owner); |
7909 | if (!btf) { |
7910 | if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { |
7911 | pr_err("missing vmlinux BTF, cannot register kfuncs\n" ); |
7912 | return -ENOENT; |
7913 | } |
7914 | if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) |
7915 | pr_warn("missing module BTF, cannot register kfuncs\n" ); |
7916 | return 0; |
7917 | } |
7918 | if (IS_ERR(ptr: btf)) |
7919 | return PTR_ERR(ptr: btf); |
7920 | |
7921 | for (i = 0; i < kset->set->cnt; i++) { |
7922 | ret = btf_check_kfunc_protos(btf, func_id: kset->set->pairs[i].id, |
7923 | func_flags: kset->set->pairs[i].flags); |
7924 | if (ret) |
7925 | goto err_out; |
7926 | } |
7927 | |
7928 | ret = btf_populate_kfunc_set(btf, hook, kset); |
7929 | |
7930 | err_out: |
7931 | btf_put(btf); |
7932 | return ret; |
7933 | } |
7934 | |
7935 | /* This function must be invoked only from initcalls/module init functions */ |
7936 | int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, |
7937 | const struct btf_kfunc_id_set *kset) |
7938 | { |
7939 | enum btf_kfunc_hook hook; |
7940 | |
7941 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
7942 | return __register_btf_kfunc_id_set(hook, kset); |
7943 | } |
7944 | EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); |
7945 | |
7946 | /* This function must be invoked only from initcalls/module init functions */ |
7947 | int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset) |
7948 | { |
7949 | return __register_btf_kfunc_id_set(hook: BTF_KFUNC_HOOK_FMODRET, kset); |
7950 | } |
7951 | EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set); |
7952 | |
7953 | s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) |
7954 | { |
7955 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
7956 | struct btf_id_dtor_kfunc *dtor; |
7957 | |
7958 | if (!tab) |
7959 | return -ENOENT; |
7960 | /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need |
7961 | * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. |
7962 | */ |
7963 | BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); |
7964 | dtor = bsearch(key: &btf_id, base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp: btf_id_cmp_func); |
7965 | if (!dtor) |
7966 | return -ENOENT; |
7967 | return dtor->kfunc_btf_id; |
7968 | } |
7969 | |
7970 | static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) |
7971 | { |
7972 | const struct btf_type *dtor_func, *dtor_func_proto, *t; |
7973 | const struct btf_param *args; |
7974 | s32 dtor_btf_id; |
7975 | u32 nr_args, i; |
7976 | |
7977 | for (i = 0; i < cnt; i++) { |
7978 | dtor_btf_id = dtors[i].kfunc_btf_id; |
7979 | |
7980 | dtor_func = btf_type_by_id(btf, dtor_btf_id); |
7981 | if (!dtor_func || !btf_type_is_func(t: dtor_func)) |
7982 | return -EINVAL; |
7983 | |
7984 | dtor_func_proto = btf_type_by_id(btf, dtor_func->type); |
7985 | if (!dtor_func_proto || !btf_type_is_func_proto(t: dtor_func_proto)) |
7986 | return -EINVAL; |
7987 | |
7988 | /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ |
7989 | t = btf_type_by_id(btf, dtor_func_proto->type); |
7990 | if (!t || !btf_type_is_void(t)) |
7991 | return -EINVAL; |
7992 | |
7993 | nr_args = btf_type_vlen(t: dtor_func_proto); |
7994 | if (nr_args != 1) |
7995 | return -EINVAL; |
7996 | args = btf_params(t: dtor_func_proto); |
7997 | t = btf_type_by_id(btf, args[0].type); |
7998 | /* Allow any pointer type, as width on targets Linux supports |
7999 | * will be same for all pointer types (i.e. sizeof(void *)) |
8000 | */ |
8001 | if (!t || !btf_type_is_ptr(t)) |
8002 | return -EINVAL; |
8003 | } |
8004 | return 0; |
8005 | } |
8006 | |
8007 | /* This function must be invoked only from initcalls/module init functions */ |
8008 | int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, |
8009 | struct module *owner) |
8010 | { |
8011 | struct btf_id_dtor_kfunc_tab *tab; |
8012 | struct btf *btf; |
8013 | u32 tab_cnt; |
8014 | int ret; |
8015 | |
8016 | btf = btf_get_module_btf(module: owner); |
8017 | if (!btf) { |
8018 | if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { |
8019 | pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n" ); |
8020 | return -ENOENT; |
8021 | } |
8022 | if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { |
8023 | pr_err("missing module BTF, cannot register dtor kfuncs\n" ); |
8024 | return -ENOENT; |
8025 | } |
8026 | return 0; |
8027 | } |
8028 | if (IS_ERR(ptr: btf)) |
8029 | return PTR_ERR(ptr: btf); |
8030 | |
8031 | if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8032 | pr_err("cannot register more than %d kfunc destructors\n" , BTF_DTOR_KFUNC_MAX_CNT); |
8033 | ret = -E2BIG; |
8034 | goto end; |
8035 | } |
8036 | |
8037 | /* Ensure that the prototype of dtor kfuncs being registered is sane */ |
8038 | ret = btf_check_dtor_kfuncs(btf, dtors, cnt: add_cnt); |
8039 | if (ret < 0) |
8040 | goto end; |
8041 | |
8042 | tab = btf->dtor_kfunc_tab; |
8043 | /* Only one call allowed for modules */ |
8044 | if (WARN_ON_ONCE(tab && btf_is_module(btf))) { |
8045 | ret = -EINVAL; |
8046 | goto end; |
8047 | } |
8048 | |
8049 | tab_cnt = tab ? tab->cnt : 0; |
8050 | if (tab_cnt > U32_MAX - add_cnt) { |
8051 | ret = -EOVERFLOW; |
8052 | goto end; |
8053 | } |
8054 | if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8055 | pr_err("cannot register more than %d kfunc destructors\n" , BTF_DTOR_KFUNC_MAX_CNT); |
8056 | ret = -E2BIG; |
8057 | goto end; |
8058 | } |
8059 | |
8060 | tab = krealloc(objp: btf->dtor_kfunc_tab, |
8061 | offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), |
8062 | GFP_KERNEL | __GFP_NOWARN); |
8063 | if (!tab) { |
8064 | ret = -ENOMEM; |
8065 | goto end; |
8066 | } |
8067 | |
8068 | if (!btf->dtor_kfunc_tab) |
8069 | tab->cnt = 0; |
8070 | btf->dtor_kfunc_tab = tab; |
8071 | |
8072 | memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); |
8073 | tab->cnt += add_cnt; |
8074 | |
8075 | sort(base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp_func: btf_id_cmp_func, NULL); |
8076 | |
8077 | end: |
8078 | if (ret) |
8079 | btf_free_dtor_kfunc_tab(btf); |
8080 | btf_put(btf); |
8081 | return ret; |
8082 | } |
8083 | EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); |
8084 | |
8085 | #define MAX_TYPES_ARE_COMPAT_DEPTH 2 |
8086 | |
8087 | /* Check local and target types for compatibility. This check is used for |
8088 | * type-based CO-RE relocations and follow slightly different rules than |
8089 | * field-based relocations. This function assumes that root types were already |
8090 | * checked for name match. Beyond that initial root-level name check, names |
8091 | * are completely ignored. Compatibility rules are as follows: |
8092 | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but |
8093 | * kind should match for local and target types (i.e., STRUCT is not |
8094 | * compatible with UNION); |
8095 | * - for ENUMs/ENUM64s, the size is ignored; |
8096 | * - for INT, size and signedness are ignored; |
8097 | * - for ARRAY, dimensionality is ignored, element types are checked for |
8098 | * compatibility recursively; |
8099 | * - CONST/VOLATILE/RESTRICT modifiers are ignored; |
8100 | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; |
8101 | * - FUNC_PROTOs are compatible if they have compatible signature: same |
8102 | * number of input args and compatible return and argument types. |
8103 | * These rules are not set in stone and probably will be adjusted as we get |
8104 | * more experience with using BPF CO-RE relocations. |
8105 | */ |
8106 | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
8107 | const struct btf *targ_btf, __u32 targ_id) |
8108 | { |
8109 | return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, |
8110 | MAX_TYPES_ARE_COMPAT_DEPTH); |
8111 | } |
8112 | |
8113 | #define MAX_TYPES_MATCH_DEPTH 2 |
8114 | |
8115 | int bpf_core_types_match(const struct btf *local_btf, u32 local_id, |
8116 | const struct btf *targ_btf, u32 targ_id) |
8117 | { |
8118 | return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, behind_ptr: false, |
8119 | MAX_TYPES_MATCH_DEPTH); |
8120 | } |
8121 | |
8122 | static bool bpf_core_is_flavor_sep(const char *s) |
8123 | { |
8124 | /* check X___Y name pattern, where X and Y are not underscores */ |
8125 | return s[0] != '_' && /* X */ |
8126 | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ |
8127 | s[4] != '_'; /* Y */ |
8128 | } |
8129 | |
8130 | size_t bpf_core_essential_name_len(const char *name) |
8131 | { |
8132 | size_t n = strlen(name); |
8133 | int i; |
8134 | |
8135 | for (i = n - 5; i >= 0; i--) { |
8136 | if (bpf_core_is_flavor_sep(s: name + i)) |
8137 | return i + 1; |
8138 | } |
8139 | return n; |
8140 | } |
8141 | |
8142 | struct bpf_cand_cache { |
8143 | const char *name; |
8144 | u32 name_len; |
8145 | u16 kind; |
8146 | u16 cnt; |
8147 | struct { |
8148 | const struct btf *btf; |
8149 | u32 id; |
8150 | } cands[]; |
8151 | }; |
8152 | |
8153 | static void bpf_free_cands(struct bpf_cand_cache *cands) |
8154 | { |
8155 | if (!cands->cnt) |
8156 | /* empty candidate array was allocated on stack */ |
8157 | return; |
8158 | kfree(objp: cands); |
8159 | } |
8160 | |
8161 | static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) |
8162 | { |
8163 | kfree(objp: cands->name); |
8164 | kfree(objp: cands); |
8165 | } |
8166 | |
8167 | #define VMLINUX_CAND_CACHE_SIZE 31 |
8168 | static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; |
8169 | |
8170 | #define MODULE_CAND_CACHE_SIZE 31 |
8171 | static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; |
8172 | |
8173 | static DEFINE_MUTEX(cand_cache_mutex); |
8174 | |
8175 | static void __print_cand_cache(struct bpf_verifier_log *log, |
8176 | struct bpf_cand_cache **cache, |
8177 | int cache_size) |
8178 | { |
8179 | struct bpf_cand_cache *cc; |
8180 | int i, j; |
8181 | |
8182 | for (i = 0; i < cache_size; i++) { |
8183 | cc = cache[i]; |
8184 | if (!cc) |
8185 | continue; |
8186 | bpf_log(log, fmt: "[%d]%s(" , i, cc->name); |
8187 | for (j = 0; j < cc->cnt; j++) { |
8188 | bpf_log(log, fmt: "%d" , cc->cands[j].id); |
8189 | if (j < cc->cnt - 1) |
8190 | bpf_log(log, fmt: " " ); |
8191 | } |
8192 | bpf_log(log, fmt: "), " ); |
8193 | } |
8194 | } |
8195 | |
8196 | static void print_cand_cache(struct bpf_verifier_log *log) |
8197 | { |
8198 | mutex_lock(&cand_cache_mutex); |
8199 | bpf_log(log, fmt: "vmlinux_cand_cache:" ); |
8200 | __print_cand_cache(log, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8201 | bpf_log(log, fmt: "\nmodule_cand_cache:" ); |
8202 | __print_cand_cache(log, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8203 | bpf_log(log, fmt: "\n" ); |
8204 | mutex_unlock(lock: &cand_cache_mutex); |
8205 | } |
8206 | |
8207 | static u32 hash_cands(struct bpf_cand_cache *cands) |
8208 | { |
8209 | return jhash(key: cands->name, length: cands->name_len, initval: 0); |
8210 | } |
8211 | |
8212 | static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, |
8213 | struct bpf_cand_cache **cache, |
8214 | int cache_size) |
8215 | { |
8216 | struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; |
8217 | |
8218 | if (cc && cc->name_len == cands->name_len && |
8219 | !strncmp(cc->name, cands->name, cands->name_len)) |
8220 | return cc; |
8221 | return NULL; |
8222 | } |
8223 | |
8224 | static size_t sizeof_cands(int cnt) |
8225 | { |
8226 | return offsetof(struct bpf_cand_cache, cands[cnt]); |
8227 | } |
8228 | |
8229 | static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, |
8230 | struct bpf_cand_cache **cache, |
8231 | int cache_size) |
8232 | { |
8233 | struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; |
8234 | |
8235 | if (*cc) { |
8236 | bpf_free_cands_from_cache(cands: *cc); |
8237 | *cc = NULL; |
8238 | } |
8239 | new_cands = kmemdup(p: cands, size: sizeof_cands(cnt: cands->cnt), GFP_KERNEL); |
8240 | if (!new_cands) { |
8241 | bpf_free_cands(cands); |
8242 | return ERR_PTR(error: -ENOMEM); |
8243 | } |
8244 | /* strdup the name, since it will stay in cache. |
8245 | * the cands->name points to strings in prog's BTF and the prog can be unloaded. |
8246 | */ |
8247 | new_cands->name = kmemdup_nul(s: cands->name, len: cands->name_len, GFP_KERNEL); |
8248 | bpf_free_cands(cands); |
8249 | if (!new_cands->name) { |
8250 | kfree(objp: new_cands); |
8251 | return ERR_PTR(error: -ENOMEM); |
8252 | } |
8253 | *cc = new_cands; |
8254 | return new_cands; |
8255 | } |
8256 | |
8257 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8258 | static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, |
8259 | int cache_size) |
8260 | { |
8261 | struct bpf_cand_cache *cc; |
8262 | int i, j; |
8263 | |
8264 | for (i = 0; i < cache_size; i++) { |
8265 | cc = cache[i]; |
8266 | if (!cc) |
8267 | continue; |
8268 | if (!btf) { |
8269 | /* when new module is loaded purge all of module_cand_cache, |
8270 | * since new module might have candidates with the name |
8271 | * that matches cached cands. |
8272 | */ |
8273 | bpf_free_cands_from_cache(cc); |
8274 | cache[i] = NULL; |
8275 | continue; |
8276 | } |
8277 | /* when module is unloaded purge cache entries |
8278 | * that match module's btf |
8279 | */ |
8280 | for (j = 0; j < cc->cnt; j++) |
8281 | if (cc->cands[j].btf == btf) { |
8282 | bpf_free_cands_from_cache(cc); |
8283 | cache[i] = NULL; |
8284 | break; |
8285 | } |
8286 | } |
8287 | |
8288 | } |
8289 | |
8290 | static void purge_cand_cache(struct btf *btf) |
8291 | { |
8292 | mutex_lock(&cand_cache_mutex); |
8293 | __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8294 | mutex_unlock(&cand_cache_mutex); |
8295 | } |
8296 | #endif |
8297 | |
8298 | static struct bpf_cand_cache * |
8299 | bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, |
8300 | int targ_start_id) |
8301 | { |
8302 | struct bpf_cand_cache *new_cands; |
8303 | const struct btf_type *t; |
8304 | const char *targ_name; |
8305 | size_t targ_essent_len; |
8306 | int n, i; |
8307 | |
8308 | n = btf_nr_types(btf: targ_btf); |
8309 | for (i = targ_start_id; i < n; i++) { |
8310 | t = btf_type_by_id(targ_btf, i); |
8311 | if (btf_kind(t) != cands->kind) |
8312 | continue; |
8313 | |
8314 | targ_name = btf_name_by_offset(btf: targ_btf, offset: t->name_off); |
8315 | if (!targ_name) |
8316 | continue; |
8317 | |
8318 | /* the resched point is before strncmp to make sure that search |
8319 | * for non-existing name will have a chance to schedule(). |
8320 | */ |
8321 | cond_resched(); |
8322 | |
8323 | if (strncmp(cands->name, targ_name, cands->name_len) != 0) |
8324 | continue; |
8325 | |
8326 | targ_essent_len = bpf_core_essential_name_len(name: targ_name); |
8327 | if (targ_essent_len != cands->name_len) |
8328 | continue; |
8329 | |
8330 | /* most of the time there is only one candidate for a given kind+name pair */ |
8331 | new_cands = kmalloc(size: sizeof_cands(cnt: cands->cnt + 1), GFP_KERNEL); |
8332 | if (!new_cands) { |
8333 | bpf_free_cands(cands); |
8334 | return ERR_PTR(error: -ENOMEM); |
8335 | } |
8336 | |
8337 | memcpy(new_cands, cands, sizeof_cands(cands->cnt)); |
8338 | bpf_free_cands(cands); |
8339 | cands = new_cands; |
8340 | cands->cands[cands->cnt].btf = targ_btf; |
8341 | cands->cands[cands->cnt].id = i; |
8342 | cands->cnt++; |
8343 | } |
8344 | return cands; |
8345 | } |
8346 | |
8347 | static struct bpf_cand_cache * |
8348 | bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) |
8349 | { |
8350 | struct bpf_cand_cache *cands, *cc, local_cand = {}; |
8351 | const struct btf *local_btf = ctx->btf; |
8352 | const struct btf_type *local_type; |
8353 | const struct btf *main_btf; |
8354 | size_t local_essent_len; |
8355 | struct btf *mod_btf; |
8356 | const char *name; |
8357 | int id; |
8358 | |
8359 | main_btf = bpf_get_btf_vmlinux(); |
8360 | if (IS_ERR(ptr: main_btf)) |
8361 | return ERR_CAST(ptr: main_btf); |
8362 | if (!main_btf) |
8363 | return ERR_PTR(error: -EINVAL); |
8364 | |
8365 | local_type = btf_type_by_id(local_btf, local_type_id); |
8366 | if (!local_type) |
8367 | return ERR_PTR(error: -EINVAL); |
8368 | |
8369 | name = btf_name_by_offset(btf: local_btf, offset: local_type->name_off); |
8370 | if (str_is_empty(s: name)) |
8371 | return ERR_PTR(error: -EINVAL); |
8372 | local_essent_len = bpf_core_essential_name_len(name); |
8373 | |
8374 | cands = &local_cand; |
8375 | cands->name = name; |
8376 | cands->kind = btf_kind(t: local_type); |
8377 | cands->name_len = local_essent_len; |
8378 | |
8379 | cc = check_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8380 | /* cands is a pointer to stack here */ |
8381 | if (cc) { |
8382 | if (cc->cnt) |
8383 | return cc; |
8384 | goto check_modules; |
8385 | } |
8386 | |
8387 | /* Attempt to find target candidates in vmlinux BTF first */ |
8388 | cands = bpf_core_add_cands(cands, targ_btf: main_btf, targ_start_id: 1); |
8389 | if (IS_ERR(ptr: cands)) |
8390 | return ERR_CAST(ptr: cands); |
8391 | |
8392 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ |
8393 | |
8394 | /* populate cache even when cands->cnt == 0 */ |
8395 | cc = populate_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8396 | if (IS_ERR(ptr: cc)) |
8397 | return ERR_CAST(ptr: cc); |
8398 | |
8399 | /* if vmlinux BTF has any candidate, don't go for module BTFs */ |
8400 | if (cc->cnt) |
8401 | return cc; |
8402 | |
8403 | check_modules: |
8404 | /* cands is a pointer to stack here and cands->cnt == 0 */ |
8405 | cc = check_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8406 | if (cc) |
8407 | /* if cache has it return it even if cc->cnt == 0 */ |
8408 | return cc; |
8409 | |
8410 | /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ |
8411 | spin_lock_bh(lock: &btf_idr_lock); |
8412 | idr_for_each_entry(&btf_idr, mod_btf, id) { |
8413 | if (!btf_is_module(btf: mod_btf)) |
8414 | continue; |
8415 | /* linear search could be slow hence unlock/lock |
8416 | * the IDR to avoiding holding it for too long |
8417 | */ |
8418 | btf_get(btf: mod_btf); |
8419 | spin_unlock_bh(lock: &btf_idr_lock); |
8420 | cands = bpf_core_add_cands(cands, targ_btf: mod_btf, targ_start_id: btf_nr_types(btf: main_btf)); |
8421 | btf_put(btf: mod_btf); |
8422 | if (IS_ERR(ptr: cands)) |
8423 | return ERR_CAST(ptr: cands); |
8424 | spin_lock_bh(lock: &btf_idr_lock); |
8425 | } |
8426 | spin_unlock_bh(lock: &btf_idr_lock); |
8427 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 |
8428 | * or pointer to stack if cands->cnd == 0. |
8429 | * Copy it into the cache even when cands->cnt == 0 and |
8430 | * return the result. |
8431 | */ |
8432 | return populate_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8433 | } |
8434 | |
8435 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
8436 | int relo_idx, void *insn) |
8437 | { |
8438 | bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; |
8439 | struct bpf_core_cand_list cands = {}; |
8440 | struct bpf_core_relo_res targ_res; |
8441 | struct bpf_core_spec *specs; |
8442 | int err; |
8443 | |
8444 | /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" |
8445 | * into arrays of btf_ids of struct fields and array indices. |
8446 | */ |
8447 | specs = kcalloc(n: 3, size: sizeof(*specs), GFP_KERNEL); |
8448 | if (!specs) |
8449 | return -ENOMEM; |
8450 | |
8451 | if (need_cands) { |
8452 | struct bpf_cand_cache *cc; |
8453 | int i; |
8454 | |
8455 | mutex_lock(&cand_cache_mutex); |
8456 | cc = bpf_core_find_cands(ctx, local_type_id: relo->type_id); |
8457 | if (IS_ERR(ptr: cc)) { |
8458 | bpf_log(log: ctx->log, fmt: "target candidate search failed for %d\n" , |
8459 | relo->type_id); |
8460 | err = PTR_ERR(ptr: cc); |
8461 | goto out; |
8462 | } |
8463 | if (cc->cnt) { |
8464 | cands.cands = kcalloc(n: cc->cnt, size: sizeof(*cands.cands), GFP_KERNEL); |
8465 | if (!cands.cands) { |
8466 | err = -ENOMEM; |
8467 | goto out; |
8468 | } |
8469 | } |
8470 | for (i = 0; i < cc->cnt; i++) { |
8471 | bpf_log(log: ctx->log, |
8472 | fmt: "CO-RE relocating %s %s: found target candidate [%d]\n" , |
8473 | btf_kind_str[cc->kind], cc->name, cc->cands[i].id); |
8474 | cands.cands[i].btf = cc->cands[i].btf; |
8475 | cands.cands[i].id = cc->cands[i].id; |
8476 | } |
8477 | cands.len = cc->cnt; |
8478 | /* cand_cache_mutex needs to span the cache lookup and |
8479 | * copy of btf pointer into bpf_core_cand_list, |
8480 | * since module can be unloaded while bpf_core_calc_relo_insn |
8481 | * is working with module's btf. |
8482 | */ |
8483 | } |
8484 | |
8485 | err = bpf_core_calc_relo_insn(prog_name: (void *)ctx->log, relo, relo_idx, local_btf: ctx->btf, cands: &cands, specs_scratch: specs, |
8486 | targ_res: &targ_res); |
8487 | if (err) |
8488 | goto out; |
8489 | |
8490 | err = bpf_core_patch_insn(prog_name: (void *)ctx->log, insn, insn_idx: relo->insn_off / 8, relo, relo_idx, |
8491 | res: &targ_res); |
8492 | |
8493 | out: |
8494 | kfree(objp: specs); |
8495 | if (need_cands) { |
8496 | kfree(objp: cands.cands); |
8497 | mutex_unlock(lock: &cand_cache_mutex); |
8498 | if (ctx->log->level & BPF_LOG_LEVEL2) |
8499 | print_cand_cache(log: ctx->log); |
8500 | } |
8501 | return err; |
8502 | } |
8503 | |
8504 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
8505 | const struct bpf_reg_state *reg, |
8506 | const char *field_name, u32 btf_id, const char *suffix) |
8507 | { |
8508 | struct btf *btf = reg->btf; |
8509 | const struct btf_type *walk_type, *safe_type; |
8510 | const char *tname; |
8511 | char safe_tname[64]; |
8512 | long ret, safe_id; |
8513 | const struct btf_member *member; |
8514 | u32 i; |
8515 | |
8516 | walk_type = btf_type_by_id(btf, reg->btf_id); |
8517 | if (!walk_type) |
8518 | return false; |
8519 | |
8520 | tname = btf_name_by_offset(btf, offset: walk_type->name_off); |
8521 | |
8522 | ret = snprintf(buf: safe_tname, size: sizeof(safe_tname), fmt: "%s%s" , tname, suffix); |
8523 | if (ret >= sizeof(safe_tname)) |
8524 | return false; |
8525 | |
8526 | safe_id = btf_find_by_name_kind(btf, name: safe_tname, BTF_INFO_KIND(walk_type->info)); |
8527 | if (safe_id < 0) |
8528 | return false; |
8529 | |
8530 | safe_type = btf_type_by_id(btf, safe_id); |
8531 | if (!safe_type) |
8532 | return false; |
8533 | |
8534 | for_each_member(i, safe_type, member) { |
8535 | const char *m_name = __btf_name_by_offset(btf, offset: member->name_off); |
8536 | const struct btf_type *mtype = btf_type_by_id(btf, member->type); |
8537 | u32 id; |
8538 | |
8539 | if (!btf_type_is_ptr(t: mtype)) |
8540 | continue; |
8541 | |
8542 | btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
8543 | /* If we match on both type and name, the field is considered trusted. */ |
8544 | if (btf_id == id && !strcmp(field_name, m_name)) |
8545 | return true; |
8546 | } |
8547 | |
8548 | return false; |
8549 | } |
8550 | |
8551 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
8552 | const struct btf *reg_btf, u32 reg_id, |
8553 | const struct btf *arg_btf, u32 arg_id) |
8554 | { |
8555 | const char *reg_name, *arg_name, *search_needle; |
8556 | const struct btf_type *reg_type, *arg_type; |
8557 | int reg_len, arg_len, cmp_len; |
8558 | size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char); |
8559 | |
8560 | reg_type = btf_type_by_id(reg_btf, reg_id); |
8561 | if (!reg_type) |
8562 | return false; |
8563 | |
8564 | arg_type = btf_type_by_id(arg_btf, arg_id); |
8565 | if (!arg_type) |
8566 | return false; |
8567 | |
8568 | reg_name = btf_name_by_offset(btf: reg_btf, offset: reg_type->name_off); |
8569 | arg_name = btf_name_by_offset(btf: arg_btf, offset: arg_type->name_off); |
8570 | |
8571 | reg_len = strlen(reg_name); |
8572 | arg_len = strlen(arg_name); |
8573 | |
8574 | /* Exactly one of the two type names may be suffixed with ___init, so |
8575 | * if the strings are the same size, they can't possibly be no-cast |
8576 | * aliases of one another. If you have two of the same type names, e.g. |
8577 | * they're both nf_conn___init, it would be improper to return true |
8578 | * because they are _not_ no-cast aliases, they are the same type. |
8579 | */ |
8580 | if (reg_len == arg_len) |
8581 | return false; |
8582 | |
8583 | /* Either of the two names must be the other name, suffixed with ___init. */ |
8584 | if ((reg_len != arg_len + pattern_len) && |
8585 | (arg_len != reg_len + pattern_len)) |
8586 | return false; |
8587 | |
8588 | if (reg_len < arg_len) { |
8589 | search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX); |
8590 | cmp_len = reg_len; |
8591 | } else { |
8592 | search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX); |
8593 | cmp_len = arg_len; |
8594 | } |
8595 | |
8596 | if (!search_needle) |
8597 | return false; |
8598 | |
8599 | /* ___init suffix must come at the end of the name */ |
8600 | if (*(search_needle + pattern_len) != '\0') |
8601 | return false; |
8602 | |
8603 | return !strncmp(reg_name, arg_name, cmp_len); |
8604 | } |
8605 | |