1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | /* Copyright (c) 2019 Facebook */ |
3 | |
4 | #ifdef __KERNEL__ |
5 | #include <linux/bpf.h> |
6 | #include <linux/btf.h> |
7 | #include <linux/string.h> |
8 | #include <linux/bpf_verifier.h> |
9 | #include "relo_core.h" |
10 | |
11 | static const char *btf_kind_str(const struct btf_type *t) |
12 | { |
13 | return btf_type_str(t); |
14 | } |
15 | |
16 | static bool is_ldimm64_insn(struct bpf_insn *insn) |
17 | { |
18 | return insn->code == (BPF_LD | BPF_IMM | BPF_DW); |
19 | } |
20 | |
21 | static const struct btf_type * |
22 | skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) |
23 | { |
24 | return btf_type_skip_modifiers(btf, id, res_id); |
25 | } |
26 | |
27 | static const char *btf__name_by_offset(const struct btf *btf, u32 offset) |
28 | { |
29 | return btf_name_by_offset(btf, offset); |
30 | } |
31 | |
32 | static s64 btf__resolve_size(const struct btf *btf, u32 type_id) |
33 | { |
34 | const struct btf_type *t; |
35 | int size; |
36 | |
37 | t = btf_type_by_id(btf, type_id); |
38 | t = btf_resolve_size(btf, type: t, type_size: &size); |
39 | if (IS_ERR(ptr: t)) |
40 | return PTR_ERR(ptr: t); |
41 | return size; |
42 | } |
43 | |
44 | enum libbpf_print_level { |
45 | LIBBPF_WARN, |
46 | LIBBPF_INFO, |
47 | LIBBPF_DEBUG, |
48 | }; |
49 | |
50 | #undef pr_warn |
51 | #undef pr_info |
52 | #undef pr_debug |
53 | #define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) |
54 | #define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) |
55 | #define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) |
56 | #define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) |
57 | #else |
58 | #include <stdio.h> |
59 | #include <string.h> |
60 | #include <errno.h> |
61 | #include <ctype.h> |
62 | #include <linux/err.h> |
63 | |
64 | #include "libbpf.h" |
65 | #include "bpf.h" |
66 | #include "btf.h" |
67 | #include "str_error.h" |
68 | #include "libbpf_internal.h" |
69 | #endif |
70 | |
71 | static bool is_flex_arr(const struct btf *btf, |
72 | const struct bpf_core_accessor *acc, |
73 | const struct btf_array *arr) |
74 | { |
75 | const struct btf_type *t; |
76 | |
77 | /* not a flexible array, if not inside a struct or has non-zero size */ |
78 | if (!acc->name || arr->nelems > 0) |
79 | return false; |
80 | |
81 | /* has to be the last member of enclosing struct */ |
82 | t = btf_type_by_id(btf, type_id: acc->type_id); |
83 | return acc->idx == btf_vlen(t) - 1; |
84 | } |
85 | |
86 | static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) |
87 | { |
88 | switch (kind) { |
89 | case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off" ; |
90 | case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz" ; |
91 | case BPF_CORE_FIELD_EXISTS: return "field_exists" ; |
92 | case BPF_CORE_FIELD_SIGNED: return "signed" ; |
93 | case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64" ; |
94 | case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64" ; |
95 | case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id" ; |
96 | case BPF_CORE_TYPE_ID_TARGET: return "target_type_id" ; |
97 | case BPF_CORE_TYPE_EXISTS: return "type_exists" ; |
98 | case BPF_CORE_TYPE_MATCHES: return "type_matches" ; |
99 | case BPF_CORE_TYPE_SIZE: return "type_size" ; |
100 | case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists" ; |
101 | case BPF_CORE_ENUMVAL_VALUE: return "enumval_value" ; |
102 | default: return "unknown" ; |
103 | } |
104 | } |
105 | |
106 | static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) |
107 | { |
108 | switch (kind) { |
109 | case BPF_CORE_FIELD_BYTE_OFFSET: |
110 | case BPF_CORE_FIELD_BYTE_SIZE: |
111 | case BPF_CORE_FIELD_EXISTS: |
112 | case BPF_CORE_FIELD_SIGNED: |
113 | case BPF_CORE_FIELD_LSHIFT_U64: |
114 | case BPF_CORE_FIELD_RSHIFT_U64: |
115 | return true; |
116 | default: |
117 | return false; |
118 | } |
119 | } |
120 | |
121 | static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) |
122 | { |
123 | switch (kind) { |
124 | case BPF_CORE_TYPE_ID_LOCAL: |
125 | case BPF_CORE_TYPE_ID_TARGET: |
126 | case BPF_CORE_TYPE_EXISTS: |
127 | case BPF_CORE_TYPE_MATCHES: |
128 | case BPF_CORE_TYPE_SIZE: |
129 | return true; |
130 | default: |
131 | return false; |
132 | } |
133 | } |
134 | |
135 | static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) |
136 | { |
137 | switch (kind) { |
138 | case BPF_CORE_ENUMVAL_EXISTS: |
139 | case BPF_CORE_ENUMVAL_VALUE: |
140 | return true; |
141 | default: |
142 | return false; |
143 | } |
144 | } |
145 | |
146 | int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
147 | const struct btf *targ_btf, __u32 targ_id, int level) |
148 | { |
149 | const struct btf_type *local_type, *targ_type; |
150 | int depth = 32; /* max recursion depth */ |
151 | |
152 | /* caller made sure that names match (ignoring flavor suffix) */ |
153 | local_type = btf_type_by_id(btf: local_btf, type_id: local_id); |
154 | targ_type = btf_type_by_id(btf: targ_btf, type_id: targ_id); |
155 | if (!btf_kind_core_compat(t1: local_type, t2: targ_type)) |
156 | return 0; |
157 | |
158 | recur: |
159 | depth--; |
160 | if (depth < 0) |
161 | return -EINVAL; |
162 | |
163 | local_type = skip_mods_and_typedefs(btf: local_btf, id: local_id, res_id: &local_id); |
164 | targ_type = skip_mods_and_typedefs(btf: targ_btf, id: targ_id, res_id: &targ_id); |
165 | if (!local_type || !targ_type) |
166 | return -EINVAL; |
167 | |
168 | if (!btf_kind_core_compat(t1: local_type, t2: targ_type)) |
169 | return 0; |
170 | |
171 | switch (btf_kind(t: local_type)) { |
172 | case BTF_KIND_UNKN: |
173 | case BTF_KIND_STRUCT: |
174 | case BTF_KIND_UNION: |
175 | case BTF_KIND_ENUM: |
176 | case BTF_KIND_FWD: |
177 | case BTF_KIND_ENUM64: |
178 | return 1; |
179 | case BTF_KIND_INT: |
180 | /* just reject deprecated bitfield-like integers; all other |
181 | * integers are by default compatible between each other |
182 | */ |
183 | return btf_int_offset(t: local_type) == 0 && btf_int_offset(t: targ_type) == 0; |
184 | case BTF_KIND_PTR: |
185 | local_id = local_type->type; |
186 | targ_id = targ_type->type; |
187 | goto recur; |
188 | case BTF_KIND_ARRAY: |
189 | local_id = btf_array(t: local_type)->type; |
190 | targ_id = btf_array(t: targ_type)->type; |
191 | goto recur; |
192 | case BTF_KIND_FUNC_PROTO: { |
193 | struct btf_param *local_p = btf_params(t: local_type); |
194 | struct btf_param *targ_p = btf_params(t: targ_type); |
195 | __u16 local_vlen = btf_vlen(t: local_type); |
196 | __u16 targ_vlen = btf_vlen(t: targ_type); |
197 | int i, err; |
198 | |
199 | if (local_vlen != targ_vlen) |
200 | return 0; |
201 | |
202 | for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { |
203 | if (level <= 0) |
204 | return -EINVAL; |
205 | |
206 | skip_mods_and_typedefs(btf: local_btf, id: local_p->type, res_id: &local_id); |
207 | skip_mods_and_typedefs(btf: targ_btf, id: targ_p->type, res_id: &targ_id); |
208 | err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, |
209 | level: level - 1); |
210 | if (err <= 0) |
211 | return err; |
212 | } |
213 | |
214 | /* tail recurse for return type check */ |
215 | skip_mods_and_typedefs(btf: local_btf, id: local_type->type, res_id: &local_id); |
216 | skip_mods_and_typedefs(btf: targ_btf, id: targ_type->type, res_id: &targ_id); |
217 | goto recur; |
218 | } |
219 | default: |
220 | pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n" , |
221 | btf_kind_str(local_type), local_id, targ_id); |
222 | return 0; |
223 | } |
224 | } |
225 | |
226 | /* |
227 | * Turn bpf_core_relo into a low- and high-level spec representation, |
228 | * validating correctness along the way, as well as calculating resulting |
229 | * field bit offset, specified by accessor string. Low-level spec captures |
230 | * every single level of nestedness, including traversing anonymous |
231 | * struct/union members. High-level one only captures semantically meaningful |
232 | * "turning points": named fields and array indicies. |
233 | * E.g., for this case: |
234 | * |
235 | * struct sample { |
236 | * int __unimportant; |
237 | * struct { |
238 | * int __1; |
239 | * int __2; |
240 | * int a[7]; |
241 | * }; |
242 | * }; |
243 | * |
244 | * struct sample *s = ...; |
245 | * |
246 | * int x = &s->a[3]; // access string = '0:1:2:3' |
247 | * |
248 | * Low-level spec has 1:1 mapping with each element of access string (it's |
249 | * just a parsed access string representation): [0, 1, 2, 3]. |
250 | * |
251 | * High-level spec will capture only 3 points: |
252 | * - initial zero-index access by pointer (&s->... is the same as &s[0]...); |
253 | * - field 'a' access (corresponds to '2' in low-level spec); |
254 | * - array element #3 access (corresponds to '3' in low-level spec). |
255 | * |
256 | * Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE, |
257 | * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their |
258 | * spec and raw_spec are kept empty. |
259 | * |
260 | * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access |
261 | * string to specify enumerator's value index that need to be relocated. |
262 | */ |
263 | int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, |
264 | const struct bpf_core_relo *relo, |
265 | struct bpf_core_spec *spec) |
266 | { |
267 | int access_idx, parsed_len, i; |
268 | struct bpf_core_accessor *acc; |
269 | const struct btf_type *t; |
270 | const char *name, *spec_str; |
271 | __u32 id, name_off; |
272 | __s64 sz; |
273 | |
274 | spec_str = btf__name_by_offset(btf, offset: relo->access_str_off); |
275 | if (str_is_empty(s: spec_str) || *spec_str == ':') |
276 | return -EINVAL; |
277 | |
278 | memset(spec, 0, sizeof(*spec)); |
279 | spec->btf = btf; |
280 | spec->root_type_id = relo->type_id; |
281 | spec->relo_kind = relo->kind; |
282 | |
283 | /* type-based relocations don't have a field access string */ |
284 | if (core_relo_is_type_based(kind: relo->kind)) { |
285 | if (strcmp(spec_str, "0" )) |
286 | return -EINVAL; |
287 | return 0; |
288 | } |
289 | |
290 | /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ |
291 | while (*spec_str) { |
292 | if (*spec_str == ':') |
293 | ++spec_str; |
294 | if (sscanf(spec_str, "%d%n" , &access_idx, &parsed_len) != 1) |
295 | return -EINVAL; |
296 | if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) |
297 | return -E2BIG; |
298 | spec_str += parsed_len; |
299 | spec->raw_spec[spec->raw_len++] = access_idx; |
300 | } |
301 | |
302 | if (spec->raw_len == 0) |
303 | return -EINVAL; |
304 | |
305 | t = skip_mods_and_typedefs(btf, id: relo->type_id, res_id: &id); |
306 | if (!t) |
307 | return -EINVAL; |
308 | |
309 | access_idx = spec->raw_spec[0]; |
310 | acc = &spec->spec[0]; |
311 | acc->type_id = id; |
312 | acc->idx = access_idx; |
313 | spec->len++; |
314 | |
315 | if (core_relo_is_enumval_based(kind: relo->kind)) { |
316 | if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) |
317 | return -EINVAL; |
318 | |
319 | /* record enumerator name in a first accessor */ |
320 | name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off |
321 | : btf_enum64(t)[access_idx].name_off; |
322 | acc->name = btf__name_by_offset(btf, offset: name_off); |
323 | return 0; |
324 | } |
325 | |
326 | if (!core_relo_is_field_based(kind: relo->kind)) |
327 | return -EINVAL; |
328 | |
329 | sz = btf__resolve_size(btf, type_id: id); |
330 | if (sz < 0) |
331 | return sz; |
332 | spec->bit_offset = access_idx * sz * 8; |
333 | |
334 | for (i = 1; i < spec->raw_len; i++) { |
335 | t = skip_mods_and_typedefs(btf, id, res_id: &id); |
336 | if (!t) |
337 | return -EINVAL; |
338 | |
339 | access_idx = spec->raw_spec[i]; |
340 | acc = &spec->spec[spec->len]; |
341 | |
342 | if (btf_is_composite(t)) { |
343 | const struct btf_member *m; |
344 | __u32 bit_offset; |
345 | |
346 | if (access_idx >= btf_vlen(t)) |
347 | return -EINVAL; |
348 | |
349 | bit_offset = btf_member_bit_offset(t, member_idx: access_idx); |
350 | spec->bit_offset += bit_offset; |
351 | |
352 | m = btf_members(t) + access_idx; |
353 | if (m->name_off) { |
354 | name = btf__name_by_offset(btf, offset: m->name_off); |
355 | if (str_is_empty(s: name)) |
356 | return -EINVAL; |
357 | |
358 | acc->type_id = id; |
359 | acc->idx = access_idx; |
360 | acc->name = name; |
361 | spec->len++; |
362 | } |
363 | |
364 | id = m->type; |
365 | } else if (btf_is_array(t)) { |
366 | const struct btf_array *a = btf_array(t); |
367 | bool flex; |
368 | |
369 | t = skip_mods_and_typedefs(btf, id: a->type, res_id: &id); |
370 | if (!t) |
371 | return -EINVAL; |
372 | |
373 | flex = is_flex_arr(btf, acc: acc - 1, arr: a); |
374 | if (!flex && access_idx >= a->nelems) |
375 | return -EINVAL; |
376 | |
377 | spec->spec[spec->len].type_id = id; |
378 | spec->spec[spec->len].idx = access_idx; |
379 | spec->len++; |
380 | |
381 | sz = btf__resolve_size(btf, type_id: id); |
382 | if (sz < 0) |
383 | return sz; |
384 | spec->bit_offset += access_idx * sz * 8; |
385 | } else { |
386 | pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n" , |
387 | prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t)); |
388 | return -EINVAL; |
389 | } |
390 | } |
391 | |
392 | return 0; |
393 | } |
394 | |
395 | /* Check two types for compatibility for the purpose of field access |
396 | * relocation. const/volatile/restrict and typedefs are skipped to ensure we |
397 | * are relocating semantically compatible entities: |
398 | * - any two STRUCTs/UNIONs are compatible and can be mixed; |
399 | * - any two FWDs are compatible, if their names match (modulo flavor suffix); |
400 | * - any two PTRs are always compatible; |
401 | * - for ENUMs, names should be the same (ignoring flavor suffix) or at |
402 | * least one of enums should be anonymous; |
403 | * - for ENUMs, check sizes, names are ignored; |
404 | * - for INT, size and signedness are ignored; |
405 | * - any two FLOATs are always compatible; |
406 | * - for ARRAY, dimensionality is ignored, element types are checked for |
407 | * compatibility recursively; |
408 | * - everything else shouldn't be ever a target of relocation. |
409 | * These rules are not set in stone and probably will be adjusted as we get |
410 | * more experience with using BPF CO-RE relocations. |
411 | */ |
412 | static int bpf_core_fields_are_compat(const struct btf *local_btf, |
413 | __u32 local_id, |
414 | const struct btf *targ_btf, |
415 | __u32 targ_id) |
416 | { |
417 | const struct btf_type *local_type, *targ_type; |
418 | |
419 | recur: |
420 | local_type = skip_mods_and_typedefs(btf: local_btf, id: local_id, res_id: &local_id); |
421 | targ_type = skip_mods_and_typedefs(btf: targ_btf, id: targ_id, res_id: &targ_id); |
422 | if (!local_type || !targ_type) |
423 | return -EINVAL; |
424 | |
425 | if (btf_is_composite(t: local_type) && btf_is_composite(t: targ_type)) |
426 | return 1; |
427 | if (!btf_kind_core_compat(t1: local_type, t2: targ_type)) |
428 | return 0; |
429 | |
430 | switch (btf_kind(t: local_type)) { |
431 | case BTF_KIND_PTR: |
432 | case BTF_KIND_FLOAT: |
433 | return 1; |
434 | case BTF_KIND_FWD: |
435 | case BTF_KIND_ENUM64: |
436 | case BTF_KIND_ENUM: { |
437 | const char *local_name, *targ_name; |
438 | size_t local_len, targ_len; |
439 | |
440 | local_name = btf__name_by_offset(btf: local_btf, |
441 | offset: local_type->name_off); |
442 | targ_name = btf__name_by_offset(btf: targ_btf, offset: targ_type->name_off); |
443 | local_len = bpf_core_essential_name_len(name: local_name); |
444 | targ_len = bpf_core_essential_name_len(name: targ_name); |
445 | /* one of them is anonymous or both w/ same flavor-less names */ |
446 | return local_len == 0 || targ_len == 0 || |
447 | (local_len == targ_len && |
448 | strncmp(local_name, targ_name, local_len) == 0); |
449 | } |
450 | case BTF_KIND_INT: |
451 | /* just reject deprecated bitfield-like integers; all other |
452 | * integers are by default compatible between each other |
453 | */ |
454 | return btf_int_offset(t: local_type) == 0 && |
455 | btf_int_offset(t: targ_type) == 0; |
456 | case BTF_KIND_ARRAY: |
457 | local_id = btf_array(t: local_type)->type; |
458 | targ_id = btf_array(t: targ_type)->type; |
459 | goto recur; |
460 | default: |
461 | return 0; |
462 | } |
463 | } |
464 | |
465 | /* |
466 | * Given single high-level named field accessor in local type, find |
467 | * corresponding high-level accessor for a target type. Along the way, |
468 | * maintain low-level spec for target as well. Also keep updating target |
469 | * bit offset. |
470 | * |
471 | * Searching is performed through recursive exhaustive enumeration of all |
472 | * fields of a struct/union. If there are any anonymous (embedded) |
473 | * structs/unions, they are recursively searched as well. If field with |
474 | * desired name is found, check compatibility between local and target types, |
475 | * before returning result. |
476 | * |
477 | * 1 is returned, if field is found. |
478 | * 0 is returned if no compatible field is found. |
479 | * <0 is returned on error. |
480 | */ |
481 | static int bpf_core_match_member(const struct btf *local_btf, |
482 | const struct bpf_core_accessor *local_acc, |
483 | const struct btf *targ_btf, |
484 | __u32 targ_id, |
485 | struct bpf_core_spec *spec, |
486 | __u32 *next_targ_id) |
487 | { |
488 | const struct btf_type *local_type, *targ_type; |
489 | const struct btf_member *local_member, *m; |
490 | const char *local_name, *targ_name; |
491 | __u32 local_id; |
492 | int i, n, found; |
493 | |
494 | targ_type = skip_mods_and_typedefs(btf: targ_btf, id: targ_id, res_id: &targ_id); |
495 | if (!targ_type) |
496 | return -EINVAL; |
497 | if (!btf_is_composite(t: targ_type)) |
498 | return 0; |
499 | |
500 | local_id = local_acc->type_id; |
501 | local_type = btf_type_by_id(btf: local_btf, type_id: local_id); |
502 | local_member = btf_members(t: local_type) + local_acc->idx; |
503 | local_name = btf__name_by_offset(btf: local_btf, offset: local_member->name_off); |
504 | |
505 | n = btf_vlen(t: targ_type); |
506 | m = btf_members(t: targ_type); |
507 | for (i = 0; i < n; i++, m++) { |
508 | __u32 bit_offset; |
509 | |
510 | bit_offset = btf_member_bit_offset(t: targ_type, member_idx: i); |
511 | |
512 | /* too deep struct/union/array nesting */ |
513 | if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) |
514 | return -E2BIG; |
515 | |
516 | /* speculate this member will be the good one */ |
517 | spec->bit_offset += bit_offset; |
518 | spec->raw_spec[spec->raw_len++] = i; |
519 | |
520 | targ_name = btf__name_by_offset(btf: targ_btf, offset: m->name_off); |
521 | if (str_is_empty(s: targ_name)) { |
522 | /* embedded struct/union, we need to go deeper */ |
523 | found = bpf_core_match_member(local_btf, local_acc, |
524 | targ_btf, targ_id: m->type, |
525 | spec, next_targ_id); |
526 | if (found) /* either found or error */ |
527 | return found; |
528 | } else if (strcmp(local_name, targ_name) == 0) { |
529 | /* matching named field */ |
530 | struct bpf_core_accessor *targ_acc; |
531 | |
532 | targ_acc = &spec->spec[spec->len++]; |
533 | targ_acc->type_id = targ_id; |
534 | targ_acc->idx = i; |
535 | targ_acc->name = targ_name; |
536 | |
537 | *next_targ_id = m->type; |
538 | found = bpf_core_fields_are_compat(local_btf, |
539 | local_id: local_member->type, |
540 | targ_btf, targ_id: m->type); |
541 | if (!found) |
542 | spec->len--; /* pop accessor */ |
543 | return found; |
544 | } |
545 | /* member turned out not to be what we looked for */ |
546 | spec->bit_offset -= bit_offset; |
547 | spec->raw_len--; |
548 | } |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | /* |
554 | * Try to match local spec to a target type and, if successful, produce full |
555 | * target spec (high-level, low-level + bit offset). |
556 | */ |
557 | static int bpf_core_spec_match(struct bpf_core_spec *local_spec, |
558 | const struct btf *targ_btf, __u32 targ_id, |
559 | struct bpf_core_spec *targ_spec) |
560 | { |
561 | const struct btf_type *targ_type; |
562 | const struct bpf_core_accessor *local_acc; |
563 | struct bpf_core_accessor *targ_acc; |
564 | int i, sz, matched; |
565 | __u32 name_off; |
566 | |
567 | memset(targ_spec, 0, sizeof(*targ_spec)); |
568 | targ_spec->btf = targ_btf; |
569 | targ_spec->root_type_id = targ_id; |
570 | targ_spec->relo_kind = local_spec->relo_kind; |
571 | |
572 | if (core_relo_is_type_based(kind: local_spec->relo_kind)) { |
573 | if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES) |
574 | return bpf_core_types_match(local_btf: local_spec->btf, |
575 | local_id: local_spec->root_type_id, |
576 | targ_btf, targ_id); |
577 | else |
578 | return bpf_core_types_are_compat(local_btf: local_spec->btf, |
579 | local_id: local_spec->root_type_id, |
580 | targ_btf, targ_id); |
581 | } |
582 | |
583 | local_acc = &local_spec->spec[0]; |
584 | targ_acc = &targ_spec->spec[0]; |
585 | |
586 | if (core_relo_is_enumval_based(kind: local_spec->relo_kind)) { |
587 | size_t local_essent_len, targ_essent_len; |
588 | const char *targ_name; |
589 | |
590 | /* has to resolve to an enum */ |
591 | targ_type = skip_mods_and_typedefs(btf: targ_spec->btf, id: targ_id, res_id: &targ_id); |
592 | if (!btf_is_any_enum(t: targ_type)) |
593 | return 0; |
594 | |
595 | local_essent_len = bpf_core_essential_name_len(name: local_acc->name); |
596 | |
597 | for (i = 0; i < btf_vlen(t: targ_type); i++) { |
598 | if (btf_is_enum(t: targ_type)) |
599 | name_off = btf_enum(t: targ_type)[i].name_off; |
600 | else |
601 | name_off = btf_enum64(t: targ_type)[i].name_off; |
602 | |
603 | targ_name = btf__name_by_offset(btf: targ_spec->btf, offset: name_off); |
604 | targ_essent_len = bpf_core_essential_name_len(name: targ_name); |
605 | if (targ_essent_len != local_essent_len) |
606 | continue; |
607 | if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { |
608 | targ_acc->type_id = targ_id; |
609 | targ_acc->idx = i; |
610 | targ_acc->name = targ_name; |
611 | targ_spec->len++; |
612 | targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; |
613 | targ_spec->raw_len++; |
614 | return 1; |
615 | } |
616 | } |
617 | return 0; |
618 | } |
619 | |
620 | if (!core_relo_is_field_based(kind: local_spec->relo_kind)) |
621 | return -EINVAL; |
622 | |
623 | for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { |
624 | targ_type = skip_mods_and_typedefs(btf: targ_spec->btf, id: targ_id, |
625 | res_id: &targ_id); |
626 | if (!targ_type) |
627 | return -EINVAL; |
628 | |
629 | if (local_acc->name) { |
630 | matched = bpf_core_match_member(local_btf: local_spec->btf, |
631 | local_acc, |
632 | targ_btf, targ_id, |
633 | spec: targ_spec, next_targ_id: &targ_id); |
634 | if (matched <= 0) |
635 | return matched; |
636 | } else { |
637 | /* for i=0, targ_id is already treated as array element |
638 | * type (because it's the original struct), for others |
639 | * we should find array element type first |
640 | */ |
641 | if (i > 0) { |
642 | const struct btf_array *a; |
643 | bool flex; |
644 | |
645 | if (!btf_is_array(t: targ_type)) |
646 | return 0; |
647 | |
648 | a = btf_array(t: targ_type); |
649 | flex = is_flex_arr(btf: targ_btf, acc: targ_acc - 1, arr: a); |
650 | if (!flex && local_acc->idx >= a->nelems) |
651 | return 0; |
652 | if (!skip_mods_and_typedefs(btf: targ_btf, id: a->type, |
653 | res_id: &targ_id)) |
654 | return -EINVAL; |
655 | } |
656 | |
657 | /* too deep struct/union/array nesting */ |
658 | if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) |
659 | return -E2BIG; |
660 | |
661 | targ_acc->type_id = targ_id; |
662 | targ_acc->idx = local_acc->idx; |
663 | targ_acc->name = NULL; |
664 | targ_spec->len++; |
665 | targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; |
666 | targ_spec->raw_len++; |
667 | |
668 | sz = btf__resolve_size(btf: targ_btf, type_id: targ_id); |
669 | if (sz < 0) |
670 | return sz; |
671 | targ_spec->bit_offset += local_acc->idx * sz * 8; |
672 | } |
673 | } |
674 | |
675 | return 1; |
676 | } |
677 | |
678 | static int bpf_core_calc_field_relo(const char *prog_name, |
679 | const struct bpf_core_relo *relo, |
680 | const struct bpf_core_spec *spec, |
681 | __u64 *val, __u32 *field_sz, __u32 *type_id, |
682 | bool *validate) |
683 | { |
684 | const struct bpf_core_accessor *acc; |
685 | const struct btf_type *t; |
686 | __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; |
687 | const struct btf_member *m; |
688 | const struct btf_type *mt; |
689 | bool bitfield; |
690 | __s64 sz; |
691 | |
692 | *field_sz = 0; |
693 | |
694 | if (relo->kind == BPF_CORE_FIELD_EXISTS) { |
695 | *val = spec ? 1 : 0; |
696 | return 0; |
697 | } |
698 | |
699 | if (!spec) |
700 | return -EUCLEAN; /* request instruction poisoning */ |
701 | |
702 | acc = &spec->spec[spec->len - 1]; |
703 | t = btf_type_by_id(btf: spec->btf, type_id: acc->type_id); |
704 | |
705 | /* a[n] accessor needs special handling */ |
706 | if (!acc->name) { |
707 | if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { |
708 | *val = spec->bit_offset / 8; |
709 | /* remember field size for load/store mem size */ |
710 | sz = btf__resolve_size(btf: spec->btf, type_id: acc->type_id); |
711 | if (sz < 0) |
712 | return -EINVAL; |
713 | *field_sz = sz; |
714 | *type_id = acc->type_id; |
715 | } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { |
716 | sz = btf__resolve_size(btf: spec->btf, type_id: acc->type_id); |
717 | if (sz < 0) |
718 | return -EINVAL; |
719 | *val = sz; |
720 | } else { |
721 | pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n" , |
722 | prog_name, relo->kind, relo->insn_off / 8); |
723 | return -EINVAL; |
724 | } |
725 | if (validate) |
726 | *validate = true; |
727 | return 0; |
728 | } |
729 | |
730 | m = btf_members(t) + acc->idx; |
731 | mt = skip_mods_and_typedefs(btf: spec->btf, id: m->type, res_id: &field_type_id); |
732 | bit_off = spec->bit_offset; |
733 | bit_sz = btf_member_bitfield_size(t, member_idx: acc->idx); |
734 | |
735 | bitfield = bit_sz > 0; |
736 | if (bitfield) { |
737 | byte_sz = mt->size; |
738 | byte_off = bit_off / 8 / byte_sz * byte_sz; |
739 | /* figure out smallest int size necessary for bitfield load */ |
740 | while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { |
741 | if (byte_sz >= 8) { |
742 | /* bitfield can't be read with 64-bit read */ |
743 | pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n" , |
744 | prog_name, relo->kind, relo->insn_off / 8); |
745 | return -E2BIG; |
746 | } |
747 | byte_sz *= 2; |
748 | byte_off = bit_off / 8 / byte_sz * byte_sz; |
749 | } |
750 | } else { |
751 | sz = btf__resolve_size(btf: spec->btf, type_id: field_type_id); |
752 | if (sz < 0) |
753 | return -EINVAL; |
754 | byte_sz = sz; |
755 | byte_off = spec->bit_offset / 8; |
756 | bit_sz = byte_sz * 8; |
757 | } |
758 | |
759 | /* for bitfields, all the relocatable aspects are ambiguous and we |
760 | * might disagree with compiler, so turn off validation of expected |
761 | * value, except for signedness |
762 | */ |
763 | if (validate) |
764 | *validate = !bitfield; |
765 | |
766 | switch (relo->kind) { |
767 | case BPF_CORE_FIELD_BYTE_OFFSET: |
768 | *val = byte_off; |
769 | if (!bitfield) { |
770 | *field_sz = byte_sz; |
771 | *type_id = field_type_id; |
772 | } |
773 | break; |
774 | case BPF_CORE_FIELD_BYTE_SIZE: |
775 | *val = byte_sz; |
776 | break; |
777 | case BPF_CORE_FIELD_SIGNED: |
778 | *val = (btf_is_any_enum(t: mt) && BTF_INFO_KFLAG(mt->info)) || |
779 | (btf_is_int(t: mt) && (btf_int_encoding(t: mt) & BTF_INT_SIGNED)); |
780 | if (validate) |
781 | *validate = true; /* signedness is never ambiguous */ |
782 | break; |
783 | case BPF_CORE_FIELD_LSHIFT_U64: |
784 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
785 | *val = 64 - (bit_off + bit_sz - byte_off * 8); |
786 | #else |
787 | *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); |
788 | #endif |
789 | break; |
790 | case BPF_CORE_FIELD_RSHIFT_U64: |
791 | *val = 64 - bit_sz; |
792 | if (validate) |
793 | *validate = true; /* right shift is never ambiguous */ |
794 | break; |
795 | case BPF_CORE_FIELD_EXISTS: |
796 | default: |
797 | return -EOPNOTSUPP; |
798 | } |
799 | |
800 | return 0; |
801 | } |
802 | |
803 | static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, |
804 | const struct bpf_core_spec *spec, |
805 | __u64 *val, bool *validate) |
806 | { |
807 | __s64 sz; |
808 | |
809 | /* by default, always check expected value in bpf_insn */ |
810 | if (validate) |
811 | *validate = true; |
812 | |
813 | /* type-based relos return zero when target type is not found */ |
814 | if (!spec) { |
815 | *val = 0; |
816 | return 0; |
817 | } |
818 | |
819 | switch (relo->kind) { |
820 | case BPF_CORE_TYPE_ID_TARGET: |
821 | *val = spec->root_type_id; |
822 | /* type ID, embedded in bpf_insn, might change during linking, |
823 | * so enforcing it is pointless |
824 | */ |
825 | if (validate) |
826 | *validate = false; |
827 | break; |
828 | case BPF_CORE_TYPE_EXISTS: |
829 | case BPF_CORE_TYPE_MATCHES: |
830 | *val = 1; |
831 | break; |
832 | case BPF_CORE_TYPE_SIZE: |
833 | sz = btf__resolve_size(btf: spec->btf, type_id: spec->root_type_id); |
834 | if (sz < 0) |
835 | return -EINVAL; |
836 | *val = sz; |
837 | break; |
838 | case BPF_CORE_TYPE_ID_LOCAL: |
839 | /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ |
840 | default: |
841 | return -EOPNOTSUPP; |
842 | } |
843 | |
844 | return 0; |
845 | } |
846 | |
847 | static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, |
848 | const struct bpf_core_spec *spec, |
849 | __u64 *val) |
850 | { |
851 | const struct btf_type *t; |
852 | |
853 | switch (relo->kind) { |
854 | case BPF_CORE_ENUMVAL_EXISTS: |
855 | *val = spec ? 1 : 0; |
856 | break; |
857 | case BPF_CORE_ENUMVAL_VALUE: |
858 | if (!spec) |
859 | return -EUCLEAN; /* request instruction poisoning */ |
860 | t = btf_type_by_id(btf: spec->btf, type_id: spec->spec[0].type_id); |
861 | if (btf_is_enum(t)) |
862 | *val = btf_enum(t)[spec->spec[0].idx].val; |
863 | else |
864 | *val = btf_enum64_value(e: btf_enum64(t) + spec->spec[0].idx); |
865 | break; |
866 | default: |
867 | return -EOPNOTSUPP; |
868 | } |
869 | |
870 | return 0; |
871 | } |
872 | |
873 | /* Calculate original and target relocation values, given local and target |
874 | * specs and relocation kind. These values are calculated for each candidate. |
875 | * If there are multiple candidates, resulting values should all be consistent |
876 | * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. |
877 | * If instruction has to be poisoned, *poison will be set to true. |
878 | */ |
879 | static int bpf_core_calc_relo(const char *prog_name, |
880 | const struct bpf_core_relo *relo, |
881 | int relo_idx, |
882 | const struct bpf_core_spec *local_spec, |
883 | const struct bpf_core_spec *targ_spec, |
884 | struct bpf_core_relo_res *res) |
885 | { |
886 | int err = -EOPNOTSUPP; |
887 | |
888 | res->orig_val = 0; |
889 | res->new_val = 0; |
890 | res->poison = false; |
891 | res->validate = true; |
892 | res->fail_memsz_adjust = false; |
893 | res->orig_sz = res->new_sz = 0; |
894 | res->orig_type_id = res->new_type_id = 0; |
895 | |
896 | if (core_relo_is_field_based(kind: relo->kind)) { |
897 | err = bpf_core_calc_field_relo(prog_name, relo, spec: local_spec, |
898 | val: &res->orig_val, field_sz: &res->orig_sz, |
899 | type_id: &res->orig_type_id, validate: &res->validate); |
900 | err = err ?: bpf_core_calc_field_relo(prog_name, relo, spec: targ_spec, |
901 | val: &res->new_val, field_sz: &res->new_sz, |
902 | type_id: &res->new_type_id, NULL); |
903 | if (err) |
904 | goto done; |
905 | /* Validate if it's safe to adjust load/store memory size. |
906 | * Adjustments are performed only if original and new memory |
907 | * sizes differ. |
908 | */ |
909 | res->fail_memsz_adjust = false; |
910 | if (res->orig_sz != res->new_sz) { |
911 | const struct btf_type *orig_t, *new_t; |
912 | |
913 | orig_t = btf_type_by_id(btf: local_spec->btf, type_id: res->orig_type_id); |
914 | new_t = btf_type_by_id(btf: targ_spec->btf, type_id: res->new_type_id); |
915 | |
916 | /* There are two use cases in which it's safe to |
917 | * adjust load/store's mem size: |
918 | * - reading a 32-bit kernel pointer, while on BPF |
919 | * size pointers are always 64-bit; in this case |
920 | * it's safe to "downsize" instruction size due to |
921 | * pointer being treated as unsigned integer with |
922 | * zero-extended upper 32-bits; |
923 | * - reading unsigned integers, again due to |
924 | * zero-extension is preserving the value correctly. |
925 | * |
926 | * In all other cases it's incorrect to attempt to |
927 | * load/store field because read value will be |
928 | * incorrect, so we poison relocated instruction. |
929 | */ |
930 | if (btf_is_ptr(t: orig_t) && btf_is_ptr(t: new_t)) |
931 | goto done; |
932 | if (btf_is_int(t: orig_t) && btf_is_int(t: new_t) && |
933 | btf_int_encoding(t: orig_t) != BTF_INT_SIGNED && |
934 | btf_int_encoding(t: new_t) != BTF_INT_SIGNED) |
935 | goto done; |
936 | |
937 | /* mark as invalid mem size adjustment, but this will |
938 | * only be checked for LDX/STX/ST insns |
939 | */ |
940 | res->fail_memsz_adjust = true; |
941 | } |
942 | } else if (core_relo_is_type_based(kind: relo->kind)) { |
943 | err = bpf_core_calc_type_relo(relo, spec: local_spec, val: &res->orig_val, validate: &res->validate); |
944 | err = err ?: bpf_core_calc_type_relo(relo, spec: targ_spec, val: &res->new_val, NULL); |
945 | } else if (core_relo_is_enumval_based(kind: relo->kind)) { |
946 | err = bpf_core_calc_enumval_relo(relo, spec: local_spec, val: &res->orig_val); |
947 | err = err ?: bpf_core_calc_enumval_relo(relo, spec: targ_spec, val: &res->new_val); |
948 | } |
949 | |
950 | done: |
951 | if (err == -EUCLEAN) { |
952 | /* EUCLEAN is used to signal instruction poisoning request */ |
953 | res->poison = true; |
954 | err = 0; |
955 | } else if (err == -EOPNOTSUPP) { |
956 | /* EOPNOTSUPP means unknown/unsupported relocation */ |
957 | pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n" , |
958 | prog_name, relo_idx, core_relo_kind_str(relo->kind), |
959 | relo->kind, relo->insn_off / 8); |
960 | } |
961 | |
962 | return err; |
963 | } |
964 | |
965 | /* |
966 | * Turn instruction for which CO_RE relocation failed into invalid one with |
967 | * distinct signature. |
968 | */ |
969 | static void bpf_core_poison_insn(const char *prog_name, int relo_idx, |
970 | int insn_idx, struct bpf_insn *insn) |
971 | { |
972 | pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n" , |
973 | prog_name, relo_idx, insn_idx); |
974 | insn->code = BPF_JMP | BPF_CALL; |
975 | insn->dst_reg = 0; |
976 | insn->src_reg = 0; |
977 | insn->off = 0; |
978 | /* if this instruction is reachable (not a dead code), |
979 | * verifier will complain with the following message: |
980 | * invalid func unknown#195896080 |
981 | */ |
982 | insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ |
983 | } |
984 | |
985 | static int insn_bpf_size_to_bytes(struct bpf_insn *insn) |
986 | { |
987 | switch (BPF_SIZE(insn->code)) { |
988 | case BPF_DW: return 8; |
989 | case BPF_W: return 4; |
990 | case BPF_H: return 2; |
991 | case BPF_B: return 1; |
992 | default: return -1; |
993 | } |
994 | } |
995 | |
996 | static int insn_bytes_to_bpf_size(__u32 sz) |
997 | { |
998 | switch (sz) { |
999 | case 8: return BPF_DW; |
1000 | case 4: return BPF_W; |
1001 | case 2: return BPF_H; |
1002 | case 1: return BPF_B; |
1003 | default: return -1; |
1004 | } |
1005 | } |
1006 | |
1007 | /* |
1008 | * Patch relocatable BPF instruction. |
1009 | * |
1010 | * Patched value is determined by relocation kind and target specification. |
1011 | * For existence relocations target spec will be NULL if field/type is not found. |
1012 | * Expected insn->imm value is determined using relocation kind and local |
1013 | * spec, and is checked before patching instruction. If actual insn->imm value |
1014 | * is wrong, bail out with error. |
1015 | * |
1016 | * Currently supported classes of BPF instruction are: |
1017 | * 1. rX = <imm> (assignment with immediate operand); |
1018 | * 2. rX += <imm> (arithmetic operations with immediate operand); |
1019 | * 3. rX = <imm64> (load with 64-bit immediate value); |
1020 | * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64}; |
1021 | * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64}; |
1022 | * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}. |
1023 | */ |
1024 | int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, |
1025 | int insn_idx, const struct bpf_core_relo *relo, |
1026 | int relo_idx, const struct bpf_core_relo_res *res) |
1027 | { |
1028 | __u64 orig_val, new_val; |
1029 | __u8 class; |
1030 | |
1031 | class = BPF_CLASS(insn->code); |
1032 | |
1033 | if (res->poison) { |
1034 | poison: |
1035 | /* poison second part of ldimm64 to avoid confusing error from |
1036 | * verifier about "unknown opcode 00" |
1037 | */ |
1038 | if (is_ldimm64_insn(insn)) |
1039 | bpf_core_poison_insn(prog_name, relo_idx, insn_idx: insn_idx + 1, insn: insn + 1); |
1040 | bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn); |
1041 | return 0; |
1042 | } |
1043 | |
1044 | orig_val = res->orig_val; |
1045 | new_val = res->new_val; |
1046 | |
1047 | switch (class) { |
1048 | case BPF_ALU: |
1049 | case BPF_ALU64: |
1050 | if (BPF_SRC(insn->code) != BPF_K) |
1051 | return -EINVAL; |
1052 | if (res->validate && insn->imm != orig_val) { |
1053 | pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n" , |
1054 | prog_name, relo_idx, |
1055 | insn_idx, insn->imm, (unsigned long long)orig_val, |
1056 | (unsigned long long)new_val); |
1057 | return -EINVAL; |
1058 | } |
1059 | orig_val = insn->imm; |
1060 | insn->imm = new_val; |
1061 | pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n" , |
1062 | prog_name, relo_idx, insn_idx, |
1063 | (unsigned long long)orig_val, (unsigned long long)new_val); |
1064 | break; |
1065 | case BPF_LDX: |
1066 | case BPF_ST: |
1067 | case BPF_STX: |
1068 | if (res->validate && insn->off != orig_val) { |
1069 | pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n" , |
1070 | prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val, |
1071 | (unsigned long long)new_val); |
1072 | return -EINVAL; |
1073 | } |
1074 | if (new_val > SHRT_MAX) { |
1075 | pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n" , |
1076 | prog_name, relo_idx, insn_idx, (unsigned long long)new_val); |
1077 | return -ERANGE; |
1078 | } |
1079 | if (res->fail_memsz_adjust) { |
1080 | pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " |
1081 | "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n" , |
1082 | prog_name, relo_idx, insn_idx); |
1083 | goto poison; |
1084 | } |
1085 | |
1086 | orig_val = insn->off; |
1087 | insn->off = new_val; |
1088 | pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n" , |
1089 | prog_name, relo_idx, insn_idx, (unsigned long long)orig_val, |
1090 | (unsigned long long)new_val); |
1091 | |
1092 | if (res->new_sz != res->orig_sz) { |
1093 | int insn_bytes_sz, insn_bpf_sz; |
1094 | |
1095 | insn_bytes_sz = insn_bpf_size_to_bytes(insn); |
1096 | if (insn_bytes_sz != res->orig_sz) { |
1097 | pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n" , |
1098 | prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); |
1099 | return -EINVAL; |
1100 | } |
1101 | |
1102 | insn_bpf_sz = insn_bytes_to_bpf_size(sz: res->new_sz); |
1103 | if (insn_bpf_sz < 0) { |
1104 | pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n" , |
1105 | prog_name, relo_idx, insn_idx, res->new_sz); |
1106 | return -EINVAL; |
1107 | } |
1108 | |
1109 | insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); |
1110 | pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n" , |
1111 | prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz); |
1112 | } |
1113 | break; |
1114 | case BPF_LD: { |
1115 | __u64 imm; |
1116 | |
1117 | if (!is_ldimm64_insn(insn) || |
1118 | insn[0].src_reg != 0 || insn[0].off != 0 || |
1119 | insn[1].code != 0 || insn[1].dst_reg != 0 || |
1120 | insn[1].src_reg != 0 || insn[1].off != 0) { |
1121 | pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n" , |
1122 | prog_name, relo_idx, insn_idx); |
1123 | return -EINVAL; |
1124 | } |
1125 | |
1126 | imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32); |
1127 | if (res->validate && imm != orig_val) { |
1128 | pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n" , |
1129 | prog_name, relo_idx, |
1130 | insn_idx, (unsigned long long)imm, |
1131 | (unsigned long long)orig_val, (unsigned long long)new_val); |
1132 | return -EINVAL; |
1133 | } |
1134 | |
1135 | insn[0].imm = new_val; |
1136 | insn[1].imm = new_val >> 32; |
1137 | pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n" , |
1138 | prog_name, relo_idx, insn_idx, |
1139 | (unsigned long long)imm, (unsigned long long)new_val); |
1140 | break; |
1141 | } |
1142 | default: |
1143 | pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n" , |
1144 | prog_name, relo_idx, insn_idx, insn->code, |
1145 | insn->src_reg, insn->dst_reg, insn->off, insn->imm); |
1146 | return -EINVAL; |
1147 | } |
1148 | |
1149 | return 0; |
1150 | } |
1151 | |
1152 | /* Output spec definition in the format: |
1153 | * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, |
1154 | * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b |
1155 | */ |
1156 | int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec) |
1157 | { |
1158 | const struct btf_type *t; |
1159 | const char *s; |
1160 | __u32 type_id; |
1161 | int i, len = 0; |
1162 | |
1163 | #define append_buf(fmt, args...) \ |
1164 | ({ \ |
1165 | int r; \ |
1166 | r = snprintf(buf, buf_sz, fmt, ##args); \ |
1167 | len += r; \ |
1168 | if (r >= buf_sz) \ |
1169 | r = buf_sz; \ |
1170 | buf += r; \ |
1171 | buf_sz -= r; \ |
1172 | }) |
1173 | |
1174 | type_id = spec->root_type_id; |
1175 | t = btf_type_by_id(btf: spec->btf, type_id); |
1176 | s = btf__name_by_offset(btf: spec->btf, offset: t->name_off); |
1177 | |
1178 | append_buf("<%s> [%u] %s %s" , |
1179 | core_relo_kind_str(spec->relo_kind), |
1180 | type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); |
1181 | |
1182 | if (core_relo_is_type_based(kind: spec->relo_kind)) |
1183 | return len; |
1184 | |
1185 | if (core_relo_is_enumval_based(kind: spec->relo_kind)) { |
1186 | t = skip_mods_and_typedefs(btf: spec->btf, id: type_id, NULL); |
1187 | if (btf_is_enum(t)) { |
1188 | const struct btf_enum *e; |
1189 | const char *fmt_str; |
1190 | |
1191 | e = btf_enum(t) + spec->raw_spec[0]; |
1192 | s = btf__name_by_offset(btf: spec->btf, offset: e->name_off); |
1193 | fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u" ; |
1194 | append_buf(fmt_str, s, e->val); |
1195 | } else { |
1196 | const struct btf_enum64 *e; |
1197 | const char *fmt_str; |
1198 | |
1199 | e = btf_enum64(t) + spec->raw_spec[0]; |
1200 | s = btf__name_by_offset(btf: spec->btf, offset: e->name_off); |
1201 | fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu" ; |
1202 | append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e)); |
1203 | } |
1204 | return len; |
1205 | } |
1206 | |
1207 | if (core_relo_is_field_based(kind: spec->relo_kind)) { |
1208 | for (i = 0; i < spec->len; i++) { |
1209 | if (spec->spec[i].name) |
1210 | append_buf(".%s" , spec->spec[i].name); |
1211 | else if (i > 0 || spec->spec[i].idx > 0) |
1212 | append_buf("[%u]" , spec->spec[i].idx); |
1213 | } |
1214 | |
1215 | append_buf(" (" ); |
1216 | for (i = 0; i < spec->raw_len; i++) |
1217 | append_buf("%s%d" , i == 0 ? "" : ":" , spec->raw_spec[i]); |
1218 | |
1219 | if (spec->bit_offset % 8) |
1220 | append_buf(" @ offset %u.%u)" , spec->bit_offset / 8, spec->bit_offset % 8); |
1221 | else |
1222 | append_buf(" @ offset %u)" , spec->bit_offset / 8); |
1223 | return len; |
1224 | } |
1225 | |
1226 | return len; |
1227 | #undef append_buf |
1228 | } |
1229 | |
1230 | /* |
1231 | * Calculate CO-RE relocation target result. |
1232 | * |
1233 | * The outline and important points of the algorithm: |
1234 | * 1. For given local type, find corresponding candidate target types. |
1235 | * Candidate type is a type with the same "essential" name, ignoring |
1236 | * everything after last triple underscore (___). E.g., `sample`, |
1237 | * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates |
1238 | * for each other. Names with triple underscore are referred to as |
1239 | * "flavors" and are useful, among other things, to allow to |
1240 | * specify/support incompatible variations of the same kernel struct, which |
1241 | * might differ between different kernel versions and/or build |
1242 | * configurations. |
1243 | * |
1244 | * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C |
1245 | * converter, when deduplicated BTF of a kernel still contains more than |
1246 | * one different types with the same name. In that case, ___2, ___3, etc |
1247 | * are appended starting from second name conflict. But start flavors are |
1248 | * also useful to be defined "locally", in BPF program, to extract same |
1249 | * data from incompatible changes between different kernel |
1250 | * versions/configurations. For instance, to handle field renames between |
1251 | * kernel versions, one can use two flavors of the struct name with the |
1252 | * same common name and use conditional relocations to extract that field, |
1253 | * depending on target kernel version. |
1254 | * 2. For each candidate type, try to match local specification to this |
1255 | * candidate target type. Matching involves finding corresponding |
1256 | * high-level spec accessors, meaning that all named fields should match, |
1257 | * as well as all array accesses should be within the actual bounds. Also, |
1258 | * types should be compatible (see bpf_core_fields_are_compat for details). |
1259 | * 3. It is supported and expected that there might be multiple flavors |
1260 | * matching the spec. As long as all the specs resolve to the same set of |
1261 | * offsets across all candidates, there is no error. If there is any |
1262 | * ambiguity, CO-RE relocation will fail. This is necessary to accommodate |
1263 | * imperfection of BTF deduplication, which can cause slight duplication of |
1264 | * the same BTF type, if some directly or indirectly referenced (by |
1265 | * pointer) type gets resolved to different actual types in different |
1266 | * object files. If such a situation occurs, deduplicated BTF will end up |
1267 | * with two (or more) structurally identical types, which differ only in |
1268 | * types they refer to through pointer. This should be OK in most cases and |
1269 | * is not an error. |
1270 | * 4. Candidate types search is performed by linearly scanning through all |
1271 | * types in target BTF. It is anticipated that this is overall more |
1272 | * efficient memory-wise and not significantly worse (if not better) |
1273 | * CPU-wise compared to prebuilding a map from all local type names to |
1274 | * a list of candidate type names. It's also sped up by caching resolved |
1275 | * list of matching candidates per each local "root" type ID, that has at |
1276 | * least one bpf_core_relo associated with it. This list is shared |
1277 | * between multiple relocations for the same type ID and is updated as some |
1278 | * of the candidates are pruned due to structural incompatibility. |
1279 | */ |
1280 | int bpf_core_calc_relo_insn(const char *prog_name, |
1281 | const struct bpf_core_relo *relo, |
1282 | int relo_idx, |
1283 | const struct btf *local_btf, |
1284 | struct bpf_core_cand_list *cands, |
1285 | struct bpf_core_spec *specs_scratch, |
1286 | struct bpf_core_relo_res *targ_res) |
1287 | { |
1288 | struct bpf_core_spec *local_spec = &specs_scratch[0]; |
1289 | struct bpf_core_spec *cand_spec = &specs_scratch[1]; |
1290 | struct bpf_core_spec *targ_spec = &specs_scratch[2]; |
1291 | struct bpf_core_relo_res cand_res; |
1292 | const struct btf_type *local_type; |
1293 | const char *local_name; |
1294 | __u32 local_id; |
1295 | char spec_buf[256]; |
1296 | int i, j, err; |
1297 | |
1298 | local_id = relo->type_id; |
1299 | local_type = btf_type_by_id(btf: local_btf, type_id: local_id); |
1300 | local_name = btf__name_by_offset(btf: local_btf, offset: local_type->name_off); |
1301 | if (!local_name) |
1302 | return -EINVAL; |
1303 | |
1304 | err = bpf_core_parse_spec(prog_name, btf: local_btf, relo, spec: local_spec); |
1305 | if (err) { |
1306 | const char *spec_str; |
1307 | |
1308 | spec_str = btf__name_by_offset(btf: local_btf, offset: relo->access_str_off); |
1309 | pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n" , |
1310 | prog_name, relo_idx, local_id, btf_kind_str(local_type), |
1311 | str_is_empty(local_name) ? "<anon>" : local_name, |
1312 | spec_str ?: "<?>" , err); |
1313 | return -EINVAL; |
1314 | } |
1315 | |
1316 | bpf_core_format_spec(buf: spec_buf, buf_sz: sizeof(spec_buf), spec: local_spec); |
1317 | pr_debug("prog '%s': relo #%d: %s\n" , prog_name, relo_idx, spec_buf); |
1318 | |
1319 | /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ |
1320 | if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { |
1321 | /* bpf_insn's imm value could get out of sync during linking */ |
1322 | memset(targ_res, 0, sizeof(*targ_res)); |
1323 | targ_res->validate = false; |
1324 | targ_res->poison = false; |
1325 | targ_res->orig_val = local_spec->root_type_id; |
1326 | targ_res->new_val = local_spec->root_type_id; |
1327 | return 0; |
1328 | } |
1329 | |
1330 | /* libbpf doesn't support candidate search for anonymous types */ |
1331 | if (str_is_empty(s: local_name)) { |
1332 | pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n" , |
1333 | prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); |
1334 | return -EOPNOTSUPP; |
1335 | } |
1336 | |
1337 | for (i = 0, j = 0; i < cands->len; i++) { |
1338 | err = bpf_core_spec_match(local_spec, targ_btf: cands->cands[i].btf, |
1339 | targ_id: cands->cands[i].id, targ_spec: cand_spec); |
1340 | if (err < 0) { |
1341 | bpf_core_format_spec(buf: spec_buf, buf_sz: sizeof(spec_buf), spec: cand_spec); |
1342 | pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n " , |
1343 | prog_name, relo_idx, i, spec_buf, err); |
1344 | return err; |
1345 | } |
1346 | |
1347 | bpf_core_format_spec(buf: spec_buf, buf_sz: sizeof(spec_buf), spec: cand_spec); |
1348 | pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n" , prog_name, |
1349 | relo_idx, err == 0 ? "non-matching" : "matching" , i, spec_buf); |
1350 | |
1351 | if (err == 0) |
1352 | continue; |
1353 | |
1354 | err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, targ_spec: cand_spec, res: &cand_res); |
1355 | if (err) |
1356 | return err; |
1357 | |
1358 | if (j == 0) { |
1359 | *targ_res = cand_res; |
1360 | *targ_spec = *cand_spec; |
1361 | } else if (cand_spec->bit_offset != targ_spec->bit_offset) { |
1362 | /* if there are many field relo candidates, they |
1363 | * should all resolve to the same bit offset |
1364 | */ |
1365 | pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n" , |
1366 | prog_name, relo_idx, cand_spec->bit_offset, |
1367 | targ_spec->bit_offset); |
1368 | return -EINVAL; |
1369 | } else if (cand_res.poison != targ_res->poison || |
1370 | cand_res.new_val != targ_res->new_val) { |
1371 | /* all candidates should result in the same relocation |
1372 | * decision and value, otherwise it's dangerous to |
1373 | * proceed due to ambiguity |
1374 | */ |
1375 | pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n" , |
1376 | prog_name, relo_idx, |
1377 | cand_res.poison ? "failure" : "success" , |
1378 | (unsigned long long)cand_res.new_val, |
1379 | targ_res->poison ? "failure" : "success" , |
1380 | (unsigned long long)targ_res->new_val); |
1381 | return -EINVAL; |
1382 | } |
1383 | |
1384 | cands->cands[j++] = cands->cands[i]; |
1385 | } |
1386 | |
1387 | /* |
1388 | * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field |
1389 | * existence checks or kernel version/config checks, it's expected |
1390 | * that we might not find any candidates. In this case, if field |
1391 | * wasn't found in any candidate, the list of candidates shouldn't |
1392 | * change at all, we'll just handle relocating appropriately, |
1393 | * depending on relo's kind. |
1394 | */ |
1395 | if (j > 0) |
1396 | cands->len = j; |
1397 | |
1398 | /* |
1399 | * If no candidates were found, it might be both a programmer error, |
1400 | * as well as expected case, depending whether instruction w/ |
1401 | * relocation is guarded in some way that makes it unreachable (dead |
1402 | * code) if relocation can't be resolved. This is handled in |
1403 | * bpf_core_patch_insn() uniformly by replacing that instruction with |
1404 | * BPF helper call insn (using invalid helper ID). If that instruction |
1405 | * is indeed unreachable, then it will be ignored and eliminated by |
1406 | * verifier. If it was an error, then verifier will complain and point |
1407 | * to a specific instruction number in its log. |
1408 | */ |
1409 | if (j == 0) { |
1410 | pr_debug("prog '%s': relo #%d: no matching targets found\n" , |
1411 | prog_name, relo_idx); |
1412 | |
1413 | /* calculate single target relo result explicitly */ |
1414 | err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, res: targ_res); |
1415 | if (err) |
1416 | return err; |
1417 | } |
1418 | |
1419 | return 0; |
1420 | } |
1421 | |
1422 | static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off, |
1423 | const struct btf *targ_btf, size_t targ_name_off) |
1424 | { |
1425 | const char *local_n, *targ_n; |
1426 | size_t local_len, targ_len; |
1427 | |
1428 | local_n = btf__name_by_offset(btf: local_btf, offset: local_name_off); |
1429 | targ_n = btf__name_by_offset(btf: targ_btf, offset: targ_name_off); |
1430 | |
1431 | if (str_is_empty(s: targ_n)) |
1432 | return str_is_empty(s: local_n); |
1433 | |
1434 | targ_len = bpf_core_essential_name_len(name: targ_n); |
1435 | local_len = bpf_core_essential_name_len(name: local_n); |
1436 | |
1437 | return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0; |
1438 | } |
1439 | |
1440 | static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t, |
1441 | const struct btf *targ_btf, const struct btf_type *targ_t) |
1442 | { |
1443 | __u16 local_vlen = btf_vlen(t: local_t); |
1444 | __u16 targ_vlen = btf_vlen(t: targ_t); |
1445 | int i, j; |
1446 | |
1447 | if (local_t->size != targ_t->size) |
1448 | return 0; |
1449 | |
1450 | if (local_vlen > targ_vlen) |
1451 | return 0; |
1452 | |
1453 | /* iterate over the local enum's variants and make sure each has |
1454 | * a symbolic name correspondent in the target |
1455 | */ |
1456 | for (i = 0; i < local_vlen; i++) { |
1457 | bool matched = false; |
1458 | __u32 local_n_off, targ_n_off; |
1459 | |
1460 | local_n_off = btf_is_enum(t: local_t) ? btf_enum(t: local_t)[i].name_off : |
1461 | btf_enum64(t: local_t)[i].name_off; |
1462 | |
1463 | for (j = 0; j < targ_vlen; j++) { |
1464 | targ_n_off = btf_is_enum(t: targ_t) ? btf_enum(t: targ_t)[j].name_off : |
1465 | btf_enum64(t: targ_t)[j].name_off; |
1466 | |
1467 | if (bpf_core_names_match(local_btf, local_name_off: local_n_off, targ_btf, targ_name_off: targ_n_off)) { |
1468 | matched = true; |
1469 | break; |
1470 | } |
1471 | } |
1472 | |
1473 | if (!matched) |
1474 | return 0; |
1475 | } |
1476 | return 1; |
1477 | } |
1478 | |
1479 | static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t, |
1480 | const struct btf *targ_btf, const struct btf_type *targ_t, |
1481 | bool behind_ptr, int level) |
1482 | { |
1483 | const struct btf_member *local_m = btf_members(t: local_t); |
1484 | __u16 local_vlen = btf_vlen(t: local_t); |
1485 | __u16 targ_vlen = btf_vlen(t: targ_t); |
1486 | int i, j, err; |
1487 | |
1488 | if (local_vlen > targ_vlen) |
1489 | return 0; |
1490 | |
1491 | /* check that all local members have a match in the target */ |
1492 | for (i = 0; i < local_vlen; i++, local_m++) { |
1493 | const struct btf_member *targ_m = btf_members(t: targ_t); |
1494 | bool matched = false; |
1495 | |
1496 | for (j = 0; j < targ_vlen; j++, targ_m++) { |
1497 | if (!bpf_core_names_match(local_btf, local_name_off: local_m->name_off, |
1498 | targ_btf, targ_name_off: targ_m->name_off)) |
1499 | continue; |
1500 | |
1501 | err = __bpf_core_types_match(local_btf, local_id: local_m->type, targ_btf, |
1502 | targ_id: targ_m->type, behind_ptr, level: level - 1); |
1503 | if (err < 0) |
1504 | return err; |
1505 | if (err > 0) { |
1506 | matched = true; |
1507 | break; |
1508 | } |
1509 | } |
1510 | |
1511 | if (!matched) |
1512 | return 0; |
1513 | } |
1514 | return 1; |
1515 | } |
1516 | |
1517 | /* Check that two types "match". This function assumes that root types were |
1518 | * already checked for name match. |
1519 | * |
1520 | * The matching relation is defined as follows: |
1521 | * - modifiers and typedefs are stripped (and, hence, effectively ignored) |
1522 | * - generally speaking types need to be of same kind (struct vs. struct, union |
1523 | * vs. union, etc.) |
1524 | * - exceptions are struct/union behind a pointer which could also match a |
1525 | * forward declaration of a struct or union, respectively, and enum vs. |
1526 | * enum64 (see below) |
1527 | * Then, depending on type: |
1528 | * - integers: |
1529 | * - match if size and signedness match |
1530 | * - arrays & pointers: |
1531 | * - target types are recursively matched |
1532 | * - structs & unions: |
1533 | * - local members need to exist in target with the same name |
1534 | * - for each member we recursively check match unless it is already behind a |
1535 | * pointer, in which case we only check matching names and compatible kind |
1536 | * - enums: |
1537 | * - local variants have to have a match in target by symbolic name (but not |
1538 | * numeric value) |
1539 | * - size has to match (but enum may match enum64 and vice versa) |
1540 | * - function pointers: |
1541 | * - number and position of arguments in local type has to match target |
1542 | * - for each argument and the return value we recursively check match |
1543 | */ |
1544 | int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, |
1545 | __u32 targ_id, bool behind_ptr, int level) |
1546 | { |
1547 | const struct btf_type *local_t, *targ_t; |
1548 | int depth = 32; /* max recursion depth */ |
1549 | __u16 local_k, targ_k; |
1550 | |
1551 | if (level <= 0) |
1552 | return -EINVAL; |
1553 | |
1554 | recur: |
1555 | depth--; |
1556 | if (depth < 0) |
1557 | return -EINVAL; |
1558 | |
1559 | local_t = skip_mods_and_typedefs(btf: local_btf, id: local_id, res_id: &local_id); |
1560 | targ_t = skip_mods_and_typedefs(btf: targ_btf, id: targ_id, res_id: &targ_id); |
1561 | if (!local_t || !targ_t) |
1562 | return -EINVAL; |
1563 | |
1564 | /* While the name check happens after typedefs are skipped, root-level |
1565 | * typedefs would still be name-matched as that's the contract with |
1566 | * callers. |
1567 | */ |
1568 | if (!bpf_core_names_match(local_btf, local_name_off: local_t->name_off, targ_btf, targ_name_off: targ_t->name_off)) |
1569 | return 0; |
1570 | |
1571 | local_k = btf_kind(t: local_t); |
1572 | targ_k = btf_kind(t: targ_t); |
1573 | |
1574 | switch (local_k) { |
1575 | case BTF_KIND_UNKN: |
1576 | return local_k == targ_k; |
1577 | case BTF_KIND_FWD: { |
1578 | bool local_f = BTF_INFO_KFLAG(local_t->info); |
1579 | |
1580 | if (behind_ptr) { |
1581 | if (local_k == targ_k) |
1582 | return local_f == BTF_INFO_KFLAG(targ_t->info); |
1583 | |
1584 | /* for forward declarations kflag dictates whether the |
1585 | * target is a struct (0) or union (1) |
1586 | */ |
1587 | return (targ_k == BTF_KIND_STRUCT && !local_f) || |
1588 | (targ_k == BTF_KIND_UNION && local_f); |
1589 | } else { |
1590 | if (local_k != targ_k) |
1591 | return 0; |
1592 | |
1593 | /* match if the forward declaration is for the same kind */ |
1594 | return local_f == BTF_INFO_KFLAG(targ_t->info); |
1595 | } |
1596 | } |
1597 | case BTF_KIND_ENUM: |
1598 | case BTF_KIND_ENUM64: |
1599 | if (!btf_is_any_enum(t: targ_t)) |
1600 | return 0; |
1601 | |
1602 | return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t); |
1603 | case BTF_KIND_STRUCT: |
1604 | case BTF_KIND_UNION: |
1605 | if (behind_ptr) { |
1606 | bool targ_f = BTF_INFO_KFLAG(targ_t->info); |
1607 | |
1608 | if (local_k == targ_k) |
1609 | return 1; |
1610 | |
1611 | if (targ_k != BTF_KIND_FWD) |
1612 | return 0; |
1613 | |
1614 | return (local_k == BTF_KIND_UNION) == targ_f; |
1615 | } else { |
1616 | if (local_k != targ_k) |
1617 | return 0; |
1618 | |
1619 | return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t, |
1620 | behind_ptr, level); |
1621 | } |
1622 | case BTF_KIND_INT: { |
1623 | __u8 local_sgn; |
1624 | __u8 targ_sgn; |
1625 | |
1626 | if (local_k != targ_k) |
1627 | return 0; |
1628 | |
1629 | local_sgn = btf_int_encoding(t: local_t) & BTF_INT_SIGNED; |
1630 | targ_sgn = btf_int_encoding(t: targ_t) & BTF_INT_SIGNED; |
1631 | |
1632 | return local_t->size == targ_t->size && local_sgn == targ_sgn; |
1633 | } |
1634 | case BTF_KIND_PTR: |
1635 | if (local_k != targ_k) |
1636 | return 0; |
1637 | |
1638 | behind_ptr = true; |
1639 | |
1640 | local_id = local_t->type; |
1641 | targ_id = targ_t->type; |
1642 | goto recur; |
1643 | case BTF_KIND_ARRAY: { |
1644 | const struct btf_array *local_array = btf_array(t: local_t); |
1645 | const struct btf_array *targ_array = btf_array(t: targ_t); |
1646 | |
1647 | if (local_k != targ_k) |
1648 | return 0; |
1649 | |
1650 | if (local_array->nelems != targ_array->nelems) |
1651 | return 0; |
1652 | |
1653 | local_id = local_array->type; |
1654 | targ_id = targ_array->type; |
1655 | goto recur; |
1656 | } |
1657 | case BTF_KIND_FUNC_PROTO: { |
1658 | struct btf_param *local_p = btf_params(t: local_t); |
1659 | struct btf_param *targ_p = btf_params(t: targ_t); |
1660 | __u16 local_vlen = btf_vlen(t: local_t); |
1661 | __u16 targ_vlen = btf_vlen(t: targ_t); |
1662 | int i, err; |
1663 | |
1664 | if (local_k != targ_k) |
1665 | return 0; |
1666 | |
1667 | if (local_vlen != targ_vlen) |
1668 | return 0; |
1669 | |
1670 | for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { |
1671 | err = __bpf_core_types_match(local_btf, local_id: local_p->type, targ_btf, |
1672 | targ_id: targ_p->type, behind_ptr, level: level - 1); |
1673 | if (err <= 0) |
1674 | return err; |
1675 | } |
1676 | |
1677 | /* tail recurse for return type check */ |
1678 | local_id = local_t->type; |
1679 | targ_id = targ_t->type; |
1680 | goto recur; |
1681 | } |
1682 | default: |
1683 | pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n" , |
1684 | btf_kind_str(local_t), local_id, targ_id); |
1685 | return 0; |
1686 | } |
1687 | } |
1688 | |