1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <bpf/bpf_helpers.h> |
6 | #include "../../../include/linux/filter.h" |
7 | #include "bpf_misc.h" |
8 | |
9 | #define BPF_SK_LOOKUP(func) \ |
10 | /* struct bpf_sock_tuple tuple = {} */ \ |
11 | "r2 = 0;" \ |
12 | "*(u32*)(r10 - 8) = r2;" \ |
13 | "*(u64*)(r10 - 16) = r2;" \ |
14 | "*(u64*)(r10 - 24) = r2;" \ |
15 | "*(u64*)(r10 - 32) = r2;" \ |
16 | "*(u64*)(r10 - 40) = r2;" \ |
17 | "*(u64*)(r10 - 48) = r2;" \ |
18 | /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ |
19 | "r2 = r10;" \ |
20 | "r2 += -48;" \ |
21 | "r3 = %[sizeof_bpf_sock_tuple];"\ |
22 | "r4 = 0;" \ |
23 | "r5 = 0;" \ |
24 | "call %[" #func "];" |
25 | |
26 | struct bpf_key {} __attribute__((preserve_access_index)); |
27 | |
28 | extern void bpf_key_put(struct bpf_key *key) __ksym; |
29 | extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; |
30 | extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; |
31 | |
32 | /* BTF FUNC records are not generated for kfuncs referenced |
33 | * from inline assembly. These records are necessary for |
34 | * libbpf to link the program. The function below is a hack |
35 | * to ensure that BTF FUNC records are generated. |
36 | */ |
37 | void __kfunc_btf_root(void) |
38 | { |
39 | bpf_key_put(0); |
40 | bpf_lookup_system_key(0); |
41 | bpf_lookup_user_key(0, 0); |
42 | } |
43 | |
44 | #define MAX_ENTRIES 11 |
45 | |
46 | struct test_val { |
47 | unsigned int index; |
48 | int foo[MAX_ENTRIES]; |
49 | }; |
50 | |
51 | struct { |
52 | __uint(type, BPF_MAP_TYPE_ARRAY); |
53 | __uint(max_entries, 1); |
54 | __type(key, int); |
55 | __type(value, struct test_val); |
56 | } map_array_48b SEC(".maps" ); |
57 | |
58 | struct { |
59 | __uint(type, BPF_MAP_TYPE_RINGBUF); |
60 | __uint(max_entries, 4096); |
61 | } map_ringbuf SEC(".maps" ); |
62 | |
63 | void dummy_prog_42_tc(void); |
64 | void dummy_prog_24_tc(void); |
65 | void dummy_prog_loop1_tc(void); |
66 | |
67 | struct { |
68 | __uint(type, BPF_MAP_TYPE_PROG_ARRAY); |
69 | __uint(max_entries, 4); |
70 | __uint(key_size, sizeof(int)); |
71 | __array(values, void (void)); |
72 | } map_prog1_tc SEC(".maps" ) = { |
73 | .values = { |
74 | [0] = (void *)&dummy_prog_42_tc, |
75 | [1] = (void *)&dummy_prog_loop1_tc, |
76 | [2] = (void *)&dummy_prog_24_tc, |
77 | }, |
78 | }; |
79 | |
80 | SEC("tc" ) |
81 | __auxiliary |
82 | __naked void dummy_prog_42_tc(void) |
83 | { |
84 | asm volatile ("r0 = 42; exit;" ); |
85 | } |
86 | |
87 | SEC("tc" ) |
88 | __auxiliary |
89 | __naked void dummy_prog_24_tc(void) |
90 | { |
91 | asm volatile ("r0 = 24; exit;" ); |
92 | } |
93 | |
94 | SEC("tc" ) |
95 | __auxiliary |
96 | __naked void dummy_prog_loop1_tc(void) |
97 | { |
98 | asm volatile (" \ |
99 | r3 = 1; \ |
100 | r2 = %[map_prog1_tc] ll; \ |
101 | call %[bpf_tail_call]; \ |
102 | r0 = 41; \ |
103 | exit; \ |
104 | " : |
105 | : __imm(bpf_tail_call), |
106 | __imm_addr(map_prog1_tc) |
107 | : __clobber_all); |
108 | } |
109 | |
110 | SEC("tc" ) |
111 | __description("reference tracking: leak potential reference" ) |
112 | __failure __msg("Unreleased reference" ) |
113 | __naked void reference_tracking_leak_potential_reference(void) |
114 | { |
115 | asm volatile ( |
116 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
117 | " r6 = r0; /* leak reference */ \ |
118 | exit; \ |
119 | " : |
120 | : __imm(bpf_sk_lookup_tcp), |
121 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
122 | : __clobber_all); |
123 | } |
124 | |
125 | SEC("tc" ) |
126 | __description("reference tracking: leak potential reference to sock_common" ) |
127 | __failure __msg("Unreleased reference" ) |
128 | __naked void potential_reference_to_sock_common_1(void) |
129 | { |
130 | asm volatile ( |
131 | BPF_SK_LOOKUP(bpf_skc_lookup_tcp) |
132 | " r6 = r0; /* leak reference */ \ |
133 | exit; \ |
134 | " : |
135 | : __imm(bpf_skc_lookup_tcp), |
136 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
137 | : __clobber_all); |
138 | } |
139 | |
140 | SEC("tc" ) |
141 | __description("reference tracking: leak potential reference on stack" ) |
142 | __failure __msg("Unreleased reference" ) |
143 | __naked void leak_potential_reference_on_stack(void) |
144 | { |
145 | asm volatile ( |
146 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
147 | " r4 = r10; \ |
148 | r4 += -8; \ |
149 | *(u64*)(r4 + 0) = r0; \ |
150 | r0 = 0; \ |
151 | exit; \ |
152 | " : |
153 | : __imm(bpf_sk_lookup_tcp), |
154 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
155 | : __clobber_all); |
156 | } |
157 | |
158 | SEC("tc" ) |
159 | __description("reference tracking: leak potential reference on stack 2" ) |
160 | __failure __msg("Unreleased reference" ) |
161 | __naked void potential_reference_on_stack_2(void) |
162 | { |
163 | asm volatile ( |
164 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
165 | " r4 = r10; \ |
166 | r4 += -8; \ |
167 | *(u64*)(r4 + 0) = r0; \ |
168 | r0 = 0; \ |
169 | r1 = 0; \ |
170 | *(u64*)(r4 + 0) = r1; \ |
171 | exit; \ |
172 | " : |
173 | : __imm(bpf_sk_lookup_tcp), |
174 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
175 | : __clobber_all); |
176 | } |
177 | |
178 | SEC("tc" ) |
179 | __description("reference tracking: zero potential reference" ) |
180 | __failure __msg("Unreleased reference" ) |
181 | __naked void reference_tracking_zero_potential_reference(void) |
182 | { |
183 | asm volatile ( |
184 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
185 | " r0 = 0; /* leak reference */ \ |
186 | exit; \ |
187 | " : |
188 | : __imm(bpf_sk_lookup_tcp), |
189 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
190 | : __clobber_all); |
191 | } |
192 | |
193 | SEC("tc" ) |
194 | __description("reference tracking: zero potential reference to sock_common" ) |
195 | __failure __msg("Unreleased reference" ) |
196 | __naked void potential_reference_to_sock_common_2(void) |
197 | { |
198 | asm volatile ( |
199 | BPF_SK_LOOKUP(bpf_skc_lookup_tcp) |
200 | " r0 = 0; /* leak reference */ \ |
201 | exit; \ |
202 | " : |
203 | : __imm(bpf_skc_lookup_tcp), |
204 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
205 | : __clobber_all); |
206 | } |
207 | |
208 | SEC("tc" ) |
209 | __description("reference tracking: copy and zero potential references" ) |
210 | __failure __msg("Unreleased reference" ) |
211 | __naked void copy_and_zero_potential_references(void) |
212 | { |
213 | asm volatile ( |
214 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
215 | " r7 = r0; \ |
216 | r0 = 0; \ |
217 | r7 = 0; /* leak reference */ \ |
218 | exit; \ |
219 | " : |
220 | : __imm(bpf_sk_lookup_tcp), |
221 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
222 | : __clobber_all); |
223 | } |
224 | |
225 | SEC("lsm.s/bpf" ) |
226 | __description("reference tracking: acquire/release user key reference" ) |
227 | __success |
228 | __naked void acquire_release_user_key_reference(void) |
229 | { |
230 | asm volatile (" \ |
231 | r1 = -3; \ |
232 | r2 = 0; \ |
233 | call %[bpf_lookup_user_key]; \ |
234 | if r0 == 0 goto l0_%=; \ |
235 | r1 = r0; \ |
236 | call %[bpf_key_put]; \ |
237 | l0_%=: r0 = 0; \ |
238 | exit; \ |
239 | " : |
240 | : __imm(bpf_key_put), |
241 | __imm(bpf_lookup_user_key) |
242 | : __clobber_all); |
243 | } |
244 | |
245 | SEC("lsm.s/bpf" ) |
246 | __description("reference tracking: acquire/release system key reference" ) |
247 | __success |
248 | __naked void acquire_release_system_key_reference(void) |
249 | { |
250 | asm volatile (" \ |
251 | r1 = 1; \ |
252 | call %[bpf_lookup_system_key]; \ |
253 | if r0 == 0 goto l0_%=; \ |
254 | r1 = r0; \ |
255 | call %[bpf_key_put]; \ |
256 | l0_%=: r0 = 0; \ |
257 | exit; \ |
258 | " : |
259 | : __imm(bpf_key_put), |
260 | __imm(bpf_lookup_system_key) |
261 | : __clobber_all); |
262 | } |
263 | |
264 | SEC("lsm.s/bpf" ) |
265 | __description("reference tracking: release user key reference without check" ) |
266 | __failure __msg("Possibly NULL pointer passed to trusted arg0" ) |
267 | __naked void user_key_reference_without_check(void) |
268 | { |
269 | asm volatile (" \ |
270 | r1 = -3; \ |
271 | r2 = 0; \ |
272 | call %[bpf_lookup_user_key]; \ |
273 | r1 = r0; \ |
274 | call %[bpf_key_put]; \ |
275 | r0 = 0; \ |
276 | exit; \ |
277 | " : |
278 | : __imm(bpf_key_put), |
279 | __imm(bpf_lookup_user_key) |
280 | : __clobber_all); |
281 | } |
282 | |
283 | SEC("lsm.s/bpf" ) |
284 | __description("reference tracking: release system key reference without check" ) |
285 | __failure __msg("Possibly NULL pointer passed to trusted arg0" ) |
286 | __naked void system_key_reference_without_check(void) |
287 | { |
288 | asm volatile (" \ |
289 | r1 = 1; \ |
290 | call %[bpf_lookup_system_key]; \ |
291 | r1 = r0; \ |
292 | call %[bpf_key_put]; \ |
293 | r0 = 0; \ |
294 | exit; \ |
295 | " : |
296 | : __imm(bpf_key_put), |
297 | __imm(bpf_lookup_system_key) |
298 | : __clobber_all); |
299 | } |
300 | |
301 | SEC("lsm.s/bpf" ) |
302 | __description("reference tracking: release with NULL key pointer" ) |
303 | __failure __msg("Possibly NULL pointer passed to trusted arg0" ) |
304 | __naked void release_with_null_key_pointer(void) |
305 | { |
306 | asm volatile (" \ |
307 | r1 = 0; \ |
308 | call %[bpf_key_put]; \ |
309 | r0 = 0; \ |
310 | exit; \ |
311 | " : |
312 | : __imm(bpf_key_put) |
313 | : __clobber_all); |
314 | } |
315 | |
316 | SEC("lsm.s/bpf" ) |
317 | __description("reference tracking: leak potential reference to user key" ) |
318 | __failure __msg("Unreleased reference" ) |
319 | __naked void potential_reference_to_user_key(void) |
320 | { |
321 | asm volatile (" \ |
322 | r1 = -3; \ |
323 | r2 = 0; \ |
324 | call %[bpf_lookup_user_key]; \ |
325 | exit; \ |
326 | " : |
327 | : __imm(bpf_lookup_user_key) |
328 | : __clobber_all); |
329 | } |
330 | |
331 | SEC("lsm.s/bpf" ) |
332 | __description("reference tracking: leak potential reference to system key" ) |
333 | __failure __msg("Unreleased reference" ) |
334 | __naked void potential_reference_to_system_key(void) |
335 | { |
336 | asm volatile (" \ |
337 | r1 = 1; \ |
338 | call %[bpf_lookup_system_key]; \ |
339 | exit; \ |
340 | " : |
341 | : __imm(bpf_lookup_system_key) |
342 | : __clobber_all); |
343 | } |
344 | |
345 | SEC("tc" ) |
346 | __description("reference tracking: release reference without check" ) |
347 | __failure __msg("type=sock_or_null expected=sock" ) |
348 | __naked void tracking_release_reference_without_check(void) |
349 | { |
350 | asm volatile ( |
351 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
352 | " /* reference in r0 may be NULL */ \ |
353 | r1 = r0; \ |
354 | r2 = 0; \ |
355 | call %[bpf_sk_release]; \ |
356 | exit; \ |
357 | " : |
358 | : __imm(bpf_sk_lookup_tcp), |
359 | __imm(bpf_sk_release), |
360 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
361 | : __clobber_all); |
362 | } |
363 | |
364 | SEC("tc" ) |
365 | __description("reference tracking: release reference to sock_common without check" ) |
366 | __failure __msg("type=sock_common_or_null expected=sock" ) |
367 | __naked void to_sock_common_without_check(void) |
368 | { |
369 | asm volatile ( |
370 | BPF_SK_LOOKUP(bpf_skc_lookup_tcp) |
371 | " /* reference in r0 may be NULL */ \ |
372 | r1 = r0; \ |
373 | r2 = 0; \ |
374 | call %[bpf_sk_release]; \ |
375 | exit; \ |
376 | " : |
377 | : __imm(bpf_sk_release), |
378 | __imm(bpf_skc_lookup_tcp), |
379 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
380 | : __clobber_all); |
381 | } |
382 | |
383 | SEC("tc" ) |
384 | __description("reference tracking: release reference" ) |
385 | __success __retval(0) |
386 | __naked void reference_tracking_release_reference(void) |
387 | { |
388 | asm volatile ( |
389 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
390 | " r1 = r0; \ |
391 | if r0 == 0 goto l0_%=; \ |
392 | call %[bpf_sk_release]; \ |
393 | l0_%=: exit; \ |
394 | " : |
395 | : __imm(bpf_sk_lookup_tcp), |
396 | __imm(bpf_sk_release), |
397 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
398 | : __clobber_all); |
399 | } |
400 | |
401 | SEC("tc" ) |
402 | __description("reference tracking: release reference to sock_common" ) |
403 | __success __retval(0) |
404 | __naked void release_reference_to_sock_common(void) |
405 | { |
406 | asm volatile ( |
407 | BPF_SK_LOOKUP(bpf_skc_lookup_tcp) |
408 | " r1 = r0; \ |
409 | if r0 == 0 goto l0_%=; \ |
410 | call %[bpf_sk_release]; \ |
411 | l0_%=: exit; \ |
412 | " : |
413 | : __imm(bpf_sk_release), |
414 | __imm(bpf_skc_lookup_tcp), |
415 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
416 | : __clobber_all); |
417 | } |
418 | |
419 | SEC("tc" ) |
420 | __description("reference tracking: release reference 2" ) |
421 | __success __retval(0) |
422 | __naked void reference_tracking_release_reference_2(void) |
423 | { |
424 | asm volatile ( |
425 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
426 | " r1 = r0; \ |
427 | if r0 != 0 goto l0_%=; \ |
428 | exit; \ |
429 | l0_%=: call %[bpf_sk_release]; \ |
430 | exit; \ |
431 | " : |
432 | : __imm(bpf_sk_lookup_tcp), |
433 | __imm(bpf_sk_release), |
434 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
435 | : __clobber_all); |
436 | } |
437 | |
438 | SEC("tc" ) |
439 | __description("reference tracking: release reference twice" ) |
440 | __failure __msg("type=scalar expected=sock" ) |
441 | __naked void reference_tracking_release_reference_twice(void) |
442 | { |
443 | asm volatile ( |
444 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
445 | " r1 = r0; \ |
446 | r6 = r0; \ |
447 | if r0 == 0 goto l0_%=; \ |
448 | call %[bpf_sk_release]; \ |
449 | l0_%=: r1 = r6; \ |
450 | call %[bpf_sk_release]; \ |
451 | exit; \ |
452 | " : |
453 | : __imm(bpf_sk_lookup_tcp), |
454 | __imm(bpf_sk_release), |
455 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
456 | : __clobber_all); |
457 | } |
458 | |
459 | SEC("tc" ) |
460 | __description("reference tracking: release reference twice inside branch" ) |
461 | __failure __msg("type=scalar expected=sock" ) |
462 | __naked void release_reference_twice_inside_branch(void) |
463 | { |
464 | asm volatile ( |
465 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
466 | " r1 = r0; \ |
467 | r6 = r0; \ |
468 | if r0 == 0 goto l0_%=; /* goto end */ \ |
469 | call %[bpf_sk_release]; \ |
470 | r1 = r6; \ |
471 | call %[bpf_sk_release]; \ |
472 | l0_%=: exit; \ |
473 | " : |
474 | : __imm(bpf_sk_lookup_tcp), |
475 | __imm(bpf_sk_release), |
476 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
477 | : __clobber_all); |
478 | } |
479 | |
480 | SEC("tc" ) |
481 | __description("reference tracking: alloc, check, free in one subbranch" ) |
482 | __failure __msg("Unreleased reference" ) |
483 | __flag(BPF_F_ANY_ALIGNMENT) |
484 | __naked void check_free_in_one_subbranch(void) |
485 | { |
486 | asm volatile (" \ |
487 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
488 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
489 | r0 = r2; \ |
490 | r0 += 16; \ |
491 | /* if (offsetof(skb, mark) > data_len) exit; */ \ |
492 | if r0 <= r3 goto l0_%=; \ |
493 | exit; \ |
494 | l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ |
495 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
496 | " if r6 == 0 goto l1_%=; /* mark == 0? */\ |
497 | /* Leak reference in R0 */ \ |
498 | exit; \ |
499 | l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \ |
500 | r1 = r0; \ |
501 | call %[bpf_sk_release]; \ |
502 | l2_%=: exit; \ |
503 | " : |
504 | : __imm(bpf_sk_lookup_tcp), |
505 | __imm(bpf_sk_release), |
506 | __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
507 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), |
508 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), |
509 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
510 | : __clobber_all); |
511 | } |
512 | |
513 | SEC("tc" ) |
514 | __description("reference tracking: alloc, check, free in both subbranches" ) |
515 | __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) |
516 | __naked void check_free_in_both_subbranches(void) |
517 | { |
518 | asm volatile (" \ |
519 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
520 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
521 | r0 = r2; \ |
522 | r0 += 16; \ |
523 | /* if (offsetof(skb, mark) > data_len) exit; */ \ |
524 | if r0 <= r3 goto l0_%=; \ |
525 | exit; \ |
526 | l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ |
527 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
528 | " if r6 == 0 goto l1_%=; /* mark == 0? */\ |
529 | if r0 == 0 goto l2_%=; /* sk NULL? */ \ |
530 | r1 = r0; \ |
531 | call %[bpf_sk_release]; \ |
532 | l2_%=: exit; \ |
533 | l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \ |
534 | r1 = r0; \ |
535 | call %[bpf_sk_release]; \ |
536 | l3_%=: exit; \ |
537 | " : |
538 | : __imm(bpf_sk_lookup_tcp), |
539 | __imm(bpf_sk_release), |
540 | __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
541 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), |
542 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), |
543 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
544 | : __clobber_all); |
545 | } |
546 | |
547 | SEC("tc" ) |
548 | __description("reference tracking in call: free reference in subprog" ) |
549 | __success __retval(0) |
550 | __naked void call_free_reference_in_subprog(void) |
551 | { |
552 | asm volatile ( |
553 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
554 | " r1 = r0; /* unchecked reference */ \ |
555 | call call_free_reference_in_subprog__1; \ |
556 | r0 = 0; \ |
557 | exit; \ |
558 | " : |
559 | : __imm(bpf_sk_lookup_tcp), |
560 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
561 | : __clobber_all); |
562 | } |
563 | |
564 | static __naked __noinline __attribute__((used)) |
565 | void call_free_reference_in_subprog__1(void) |
566 | { |
567 | asm volatile (" \ |
568 | /* subprog 1 */ \ |
569 | r2 = r1; \ |
570 | if r2 == 0 goto l0_%=; \ |
571 | call %[bpf_sk_release]; \ |
572 | l0_%=: exit; \ |
573 | " : |
574 | : __imm(bpf_sk_release) |
575 | : __clobber_all); |
576 | } |
577 | |
578 | SEC("tc" ) |
579 | __description("reference tracking in call: free reference in subprog and outside" ) |
580 | __failure __msg("type=scalar expected=sock" ) |
581 | __naked void reference_in_subprog_and_outside(void) |
582 | { |
583 | asm volatile ( |
584 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
585 | " r1 = r0; /* unchecked reference */ \ |
586 | r6 = r0; \ |
587 | call reference_in_subprog_and_outside__1; \ |
588 | r1 = r6; \ |
589 | call %[bpf_sk_release]; \ |
590 | exit; \ |
591 | " : |
592 | : __imm(bpf_sk_lookup_tcp), |
593 | __imm(bpf_sk_release), |
594 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
595 | : __clobber_all); |
596 | } |
597 | |
598 | static __naked __noinline __attribute__((used)) |
599 | void reference_in_subprog_and_outside__1(void) |
600 | { |
601 | asm volatile (" \ |
602 | /* subprog 1 */ \ |
603 | r2 = r1; \ |
604 | if r2 == 0 goto l0_%=; \ |
605 | call %[bpf_sk_release]; \ |
606 | l0_%=: exit; \ |
607 | " : |
608 | : __imm(bpf_sk_release) |
609 | : __clobber_all); |
610 | } |
611 | |
612 | SEC("tc" ) |
613 | __description("reference tracking in call: alloc & leak reference in subprog" ) |
614 | __failure __msg("Unreleased reference" ) |
615 | __naked void alloc_leak_reference_in_subprog(void) |
616 | { |
617 | asm volatile (" \ |
618 | r4 = r10; \ |
619 | r4 += -8; \ |
620 | call alloc_leak_reference_in_subprog__1; \ |
621 | r1 = r0; \ |
622 | r0 = 0; \ |
623 | exit; \ |
624 | " ::: __clobber_all); |
625 | } |
626 | |
627 | static __naked __noinline __attribute__((used)) |
628 | void alloc_leak_reference_in_subprog__1(void) |
629 | { |
630 | asm volatile (" \ |
631 | /* subprog 1 */ \ |
632 | r6 = r4; \ |
633 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
634 | " /* spill unchecked sk_ptr into stack of caller */\ |
635 | *(u64*)(r6 + 0) = r0; \ |
636 | r1 = r0; \ |
637 | exit; \ |
638 | " : |
639 | : __imm(bpf_sk_lookup_tcp), |
640 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
641 | : __clobber_all); |
642 | } |
643 | |
644 | SEC("tc" ) |
645 | __description("reference tracking in call: alloc in subprog, release outside" ) |
646 | __success __retval(POINTER_VALUE) |
647 | __naked void alloc_in_subprog_release_outside(void) |
648 | { |
649 | asm volatile (" \ |
650 | r4 = r10; \ |
651 | call alloc_in_subprog_release_outside__1; \ |
652 | r1 = r0; \ |
653 | if r0 == 0 goto l0_%=; \ |
654 | call %[bpf_sk_release]; \ |
655 | l0_%=: exit; \ |
656 | " : |
657 | : __imm(bpf_sk_release) |
658 | : __clobber_all); |
659 | } |
660 | |
661 | static __naked __noinline __attribute__((used)) |
662 | void alloc_in_subprog_release_outside__1(void) |
663 | { |
664 | asm volatile (" \ |
665 | /* subprog 1 */ \ |
666 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
667 | " exit; /* return sk */ \ |
668 | " : |
669 | : __imm(bpf_sk_lookup_tcp), |
670 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
671 | : __clobber_all); |
672 | } |
673 | |
674 | SEC("tc" ) |
675 | __description("reference tracking in call: sk_ptr leak into caller stack" ) |
676 | __failure __msg("Unreleased reference" ) |
677 | __naked void ptr_leak_into_caller_stack(void) |
678 | { |
679 | asm volatile (" \ |
680 | r4 = r10; \ |
681 | r4 += -8; \ |
682 | call ptr_leak_into_caller_stack__1; \ |
683 | r0 = 0; \ |
684 | exit; \ |
685 | " ::: __clobber_all); |
686 | } |
687 | |
688 | static __naked __noinline __attribute__((used)) |
689 | void ptr_leak_into_caller_stack__1(void) |
690 | { |
691 | asm volatile (" \ |
692 | /* subprog 1 */ \ |
693 | r5 = r10; \ |
694 | r5 += -8; \ |
695 | *(u64*)(r5 + 0) = r4; \ |
696 | call ptr_leak_into_caller_stack__2; \ |
697 | /* spill unchecked sk_ptr into stack of caller */\ |
698 | r5 = r10; \ |
699 | r5 += -8; \ |
700 | r4 = *(u64*)(r5 + 0); \ |
701 | *(u64*)(r4 + 0) = r0; \ |
702 | exit; \ |
703 | " ::: __clobber_all); |
704 | } |
705 | |
706 | static __naked __noinline __attribute__((used)) |
707 | void ptr_leak_into_caller_stack__2(void) |
708 | { |
709 | asm volatile (" \ |
710 | /* subprog 2 */ \ |
711 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
712 | " exit; \ |
713 | " : |
714 | : __imm(bpf_sk_lookup_tcp), |
715 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
716 | : __clobber_all); |
717 | } |
718 | |
719 | SEC("tc" ) |
720 | __description("reference tracking in call: sk_ptr spill into caller stack" ) |
721 | __success __retval(0) |
722 | __naked void ptr_spill_into_caller_stack(void) |
723 | { |
724 | asm volatile (" \ |
725 | r4 = r10; \ |
726 | r4 += -8; \ |
727 | call ptr_spill_into_caller_stack__1; \ |
728 | r0 = 0; \ |
729 | exit; \ |
730 | " ::: __clobber_all); |
731 | } |
732 | |
733 | static __naked __noinline __attribute__((used)) |
734 | void ptr_spill_into_caller_stack__1(void) |
735 | { |
736 | asm volatile (" \ |
737 | /* subprog 1 */ \ |
738 | r5 = r10; \ |
739 | r5 += -8; \ |
740 | *(u64*)(r5 + 0) = r4; \ |
741 | call ptr_spill_into_caller_stack__2; \ |
742 | /* spill unchecked sk_ptr into stack of caller */\ |
743 | r5 = r10; \ |
744 | r5 += -8; \ |
745 | r4 = *(u64*)(r5 + 0); \ |
746 | *(u64*)(r4 + 0) = r0; \ |
747 | if r0 == 0 goto l0_%=; \ |
748 | /* now the sk_ptr is verified, free the reference */\ |
749 | r1 = *(u64*)(r4 + 0); \ |
750 | call %[bpf_sk_release]; \ |
751 | l0_%=: exit; \ |
752 | " : |
753 | : __imm(bpf_sk_release) |
754 | : __clobber_all); |
755 | } |
756 | |
757 | static __naked __noinline __attribute__((used)) |
758 | void ptr_spill_into_caller_stack__2(void) |
759 | { |
760 | asm volatile (" \ |
761 | /* subprog 2 */ \ |
762 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
763 | " exit; \ |
764 | " : |
765 | : __imm(bpf_sk_lookup_tcp), |
766 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
767 | : __clobber_all); |
768 | } |
769 | |
770 | SEC("tc" ) |
771 | __description("reference tracking: allow LD_ABS" ) |
772 | __success __retval(0) |
773 | __naked void reference_tracking_allow_ld_abs(void) |
774 | { |
775 | asm volatile (" \ |
776 | r6 = r1; \ |
777 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
778 | " r1 = r0; \ |
779 | if r0 == 0 goto l0_%=; \ |
780 | call %[bpf_sk_release]; \ |
781 | l0_%=: r0 = *(u8*)skb[0]; \ |
782 | r0 = *(u16*)skb[0]; \ |
783 | r0 = *(u32*)skb[0]; \ |
784 | exit; \ |
785 | " : |
786 | : __imm(bpf_sk_lookup_tcp), |
787 | __imm(bpf_sk_release), |
788 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
789 | : __clobber_all); |
790 | } |
791 | |
792 | SEC("tc" ) |
793 | __description("reference tracking: forbid LD_ABS while holding reference" ) |
794 | __failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references" ) |
795 | __naked void ld_abs_while_holding_reference(void) |
796 | { |
797 | asm volatile (" \ |
798 | r6 = r1; \ |
799 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
800 | " r0 = *(u8*)skb[0]; \ |
801 | r0 = *(u16*)skb[0]; \ |
802 | r0 = *(u32*)skb[0]; \ |
803 | r1 = r0; \ |
804 | if r0 == 0 goto l0_%=; \ |
805 | call %[bpf_sk_release]; \ |
806 | l0_%=: exit; \ |
807 | " : |
808 | : __imm(bpf_sk_lookup_tcp), |
809 | __imm(bpf_sk_release), |
810 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
811 | : __clobber_all); |
812 | } |
813 | |
814 | SEC("tc" ) |
815 | __description("reference tracking: allow LD_IND" ) |
816 | __success __retval(1) |
817 | __naked void reference_tracking_allow_ld_ind(void) |
818 | { |
819 | asm volatile (" \ |
820 | r6 = r1; \ |
821 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
822 | " r1 = r0; \ |
823 | if r0 == 0 goto l0_%=; \ |
824 | call %[bpf_sk_release]; \ |
825 | l0_%=: r7 = 1; \ |
826 | .8byte %[ld_ind]; \ |
827 | r0 = r7; \ |
828 | exit; \ |
829 | " : |
830 | : __imm(bpf_sk_lookup_tcp), |
831 | __imm(bpf_sk_release), |
832 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), |
833 | __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) |
834 | : __clobber_all); |
835 | } |
836 | |
837 | SEC("tc" ) |
838 | __description("reference tracking: forbid LD_IND while holding reference" ) |
839 | __failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references" ) |
840 | __naked void ld_ind_while_holding_reference(void) |
841 | { |
842 | asm volatile (" \ |
843 | r6 = r1; \ |
844 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
845 | " r4 = r0; \ |
846 | r7 = 1; \ |
847 | .8byte %[ld_ind]; \ |
848 | r0 = r7; \ |
849 | r1 = r4; \ |
850 | if r1 == 0 goto l0_%=; \ |
851 | call %[bpf_sk_release]; \ |
852 | l0_%=: exit; \ |
853 | " : |
854 | : __imm(bpf_sk_lookup_tcp), |
855 | __imm(bpf_sk_release), |
856 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), |
857 | __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) |
858 | : __clobber_all); |
859 | } |
860 | |
861 | SEC("tc" ) |
862 | __description("reference tracking: check reference or tail call" ) |
863 | __success __retval(0) |
864 | __naked void check_reference_or_tail_call(void) |
865 | { |
866 | asm volatile (" \ |
867 | r7 = r1; \ |
868 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
869 | " /* if (sk) bpf_sk_release() */ \ |
870 | r1 = r0; \ |
871 | if r1 != 0 goto l0_%=; \ |
872 | /* bpf_tail_call() */ \ |
873 | r3 = 3; \ |
874 | r2 = %[map_prog1_tc] ll; \ |
875 | r1 = r7; \ |
876 | call %[bpf_tail_call]; \ |
877 | r0 = 0; \ |
878 | exit; \ |
879 | l0_%=: call %[bpf_sk_release]; \ |
880 | exit; \ |
881 | " : |
882 | : __imm(bpf_sk_lookup_tcp), |
883 | __imm(bpf_sk_release), |
884 | __imm(bpf_tail_call), |
885 | __imm_addr(map_prog1_tc), |
886 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
887 | : __clobber_all); |
888 | } |
889 | |
890 | SEC("tc" ) |
891 | __description("reference tracking: release reference then tail call" ) |
892 | __success __retval(0) |
893 | __naked void release_reference_then_tail_call(void) |
894 | { |
895 | asm volatile (" \ |
896 | r7 = r1; \ |
897 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
898 | " /* if (sk) bpf_sk_release() */ \ |
899 | r1 = r0; \ |
900 | if r1 == 0 goto l0_%=; \ |
901 | call %[bpf_sk_release]; \ |
902 | l0_%=: /* bpf_tail_call() */ \ |
903 | r3 = 3; \ |
904 | r2 = %[map_prog1_tc] ll; \ |
905 | r1 = r7; \ |
906 | call %[bpf_tail_call]; \ |
907 | r0 = 0; \ |
908 | exit; \ |
909 | " : |
910 | : __imm(bpf_sk_lookup_tcp), |
911 | __imm(bpf_sk_release), |
912 | __imm(bpf_tail_call), |
913 | __imm_addr(map_prog1_tc), |
914 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
915 | : __clobber_all); |
916 | } |
917 | |
918 | SEC("tc" ) |
919 | __description("reference tracking: leak possible reference over tail call" ) |
920 | __failure __msg("tail_call would lead to reference leak" ) |
921 | __naked void possible_reference_over_tail_call(void) |
922 | { |
923 | asm volatile (" \ |
924 | r7 = r1; \ |
925 | /* Look up socket and store in REG_6 */ \ |
926 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
927 | " /* bpf_tail_call() */ \ |
928 | r6 = r0; \ |
929 | r3 = 3; \ |
930 | r2 = %[map_prog1_tc] ll; \ |
931 | r1 = r7; \ |
932 | call %[bpf_tail_call]; \ |
933 | r0 = 0; \ |
934 | /* if (sk) bpf_sk_release() */ \ |
935 | r1 = r6; \ |
936 | if r1 == 0 goto l0_%=; \ |
937 | call %[bpf_sk_release]; \ |
938 | l0_%=: exit; \ |
939 | " : |
940 | : __imm(bpf_sk_lookup_tcp), |
941 | __imm(bpf_sk_release), |
942 | __imm(bpf_tail_call), |
943 | __imm_addr(map_prog1_tc), |
944 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
945 | : __clobber_all); |
946 | } |
947 | |
948 | SEC("tc" ) |
949 | __description("reference tracking: leak checked reference over tail call" ) |
950 | __failure __msg("tail_call would lead to reference leak" ) |
951 | __naked void checked_reference_over_tail_call(void) |
952 | { |
953 | asm volatile (" \ |
954 | r7 = r1; \ |
955 | /* Look up socket and store in REG_6 */ \ |
956 | " BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
957 | " r6 = r0; \ |
958 | /* if (!sk) goto end */ \ |
959 | if r0 == 0 goto l0_%=; \ |
960 | /* bpf_tail_call() */ \ |
961 | r3 = 0; \ |
962 | r2 = %[map_prog1_tc] ll; \ |
963 | r1 = r7; \ |
964 | call %[bpf_tail_call]; \ |
965 | r0 = 0; \ |
966 | r1 = r6; \ |
967 | l0_%=: call %[bpf_sk_release]; \ |
968 | exit; \ |
969 | " : |
970 | : __imm(bpf_sk_lookup_tcp), |
971 | __imm(bpf_sk_release), |
972 | __imm(bpf_tail_call), |
973 | __imm_addr(map_prog1_tc), |
974 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
975 | : __clobber_all); |
976 | } |
977 | |
978 | SEC("tc" ) |
979 | __description("reference tracking: mangle and release sock_or_null" ) |
980 | __failure __msg("R1 pointer arithmetic on sock_or_null prohibited" ) |
981 | __naked void and_release_sock_or_null(void) |
982 | { |
983 | asm volatile ( |
984 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
985 | " r1 = r0; \ |
986 | r1 += 5; \ |
987 | if r0 == 0 goto l0_%=; \ |
988 | call %[bpf_sk_release]; \ |
989 | l0_%=: exit; \ |
990 | " : |
991 | : __imm(bpf_sk_lookup_tcp), |
992 | __imm(bpf_sk_release), |
993 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
994 | : __clobber_all); |
995 | } |
996 | |
997 | SEC("tc" ) |
998 | __description("reference tracking: mangle and release sock" ) |
999 | __failure __msg("R1 pointer arithmetic on sock prohibited" ) |
1000 | __naked void tracking_mangle_and_release_sock(void) |
1001 | { |
1002 | asm volatile ( |
1003 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1004 | " r1 = r0; \ |
1005 | if r0 == 0 goto l0_%=; \ |
1006 | r1 += 5; \ |
1007 | call %[bpf_sk_release]; \ |
1008 | l0_%=: exit; \ |
1009 | " : |
1010 | : __imm(bpf_sk_lookup_tcp), |
1011 | __imm(bpf_sk_release), |
1012 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1013 | : __clobber_all); |
1014 | } |
1015 | |
1016 | SEC("tc" ) |
1017 | __description("reference tracking: access member" ) |
1018 | __success __retval(0) |
1019 | __naked void reference_tracking_access_member(void) |
1020 | { |
1021 | asm volatile ( |
1022 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1023 | " r6 = r0; \ |
1024 | if r0 == 0 goto l0_%=; \ |
1025 | r2 = *(u32*)(r0 + 4); \ |
1026 | r1 = r6; \ |
1027 | call %[bpf_sk_release]; \ |
1028 | l0_%=: exit; \ |
1029 | " : |
1030 | : __imm(bpf_sk_lookup_tcp), |
1031 | __imm(bpf_sk_release), |
1032 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1033 | : __clobber_all); |
1034 | } |
1035 | |
1036 | SEC("tc" ) |
1037 | __description("reference tracking: write to member" ) |
1038 | __failure __msg("cannot write into sock" ) |
1039 | __naked void reference_tracking_write_to_member(void) |
1040 | { |
1041 | asm volatile ( |
1042 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1043 | " r6 = r0; \ |
1044 | if r0 == 0 goto l0_%=; \ |
1045 | r1 = r6; \ |
1046 | r2 = 42 ll; \ |
1047 | *(u32*)(r1 + %[bpf_sock_mark]) = r2; \ |
1048 | r1 = r6; \ |
1049 | l0_%=: call %[bpf_sk_release]; \ |
1050 | r0 = 0 ll; \ |
1051 | exit; \ |
1052 | " : |
1053 | : __imm(bpf_sk_lookup_tcp), |
1054 | __imm(bpf_sk_release), |
1055 | __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), |
1056 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1057 | : __clobber_all); |
1058 | } |
1059 | |
1060 | SEC("tc" ) |
1061 | __description("reference tracking: invalid 64-bit access of member" ) |
1062 | __failure __msg("invalid sock access off=0 size=8" ) |
1063 | __naked void _64_bit_access_of_member(void) |
1064 | { |
1065 | asm volatile ( |
1066 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1067 | " r6 = r0; \ |
1068 | if r0 == 0 goto l0_%=; \ |
1069 | r2 = *(u64*)(r0 + 0); \ |
1070 | r1 = r6; \ |
1071 | call %[bpf_sk_release]; \ |
1072 | l0_%=: exit; \ |
1073 | " : |
1074 | : __imm(bpf_sk_lookup_tcp), |
1075 | __imm(bpf_sk_release), |
1076 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1077 | : __clobber_all); |
1078 | } |
1079 | |
1080 | SEC("tc" ) |
1081 | __description("reference tracking: access after release" ) |
1082 | __failure __msg("!read_ok" ) |
1083 | __naked void reference_tracking_access_after_release(void) |
1084 | { |
1085 | asm volatile ( |
1086 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1087 | " r1 = r0; \ |
1088 | if r0 == 0 goto l0_%=; \ |
1089 | call %[bpf_sk_release]; \ |
1090 | r2 = *(u32*)(r1 + 0); \ |
1091 | l0_%=: exit; \ |
1092 | " : |
1093 | : __imm(bpf_sk_lookup_tcp), |
1094 | __imm(bpf_sk_release), |
1095 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1096 | : __clobber_all); |
1097 | } |
1098 | |
1099 | SEC("tc" ) |
1100 | __description("reference tracking: direct access for lookup" ) |
1101 | __success __retval(0) |
1102 | __naked void tracking_direct_access_for_lookup(void) |
1103 | { |
1104 | asm volatile (" \ |
1105 | /* Check that the packet is at least 64B long */\ |
1106 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
1107 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
1108 | r0 = r2; \ |
1109 | r0 += 64; \ |
1110 | if r0 > r3 goto l0_%=; \ |
1111 | /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \ |
1112 | r3 = %[sizeof_bpf_sock_tuple]; \ |
1113 | r4 = 0; \ |
1114 | r5 = 0; \ |
1115 | call %[bpf_sk_lookup_tcp]; \ |
1116 | r6 = r0; \ |
1117 | if r0 == 0 goto l0_%=; \ |
1118 | r2 = *(u32*)(r0 + 4); \ |
1119 | r1 = r6; \ |
1120 | call %[bpf_sk_release]; \ |
1121 | l0_%=: exit; \ |
1122 | " : |
1123 | : __imm(bpf_sk_lookup_tcp), |
1124 | __imm(bpf_sk_release), |
1125 | __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
1126 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), |
1127 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1128 | : __clobber_all); |
1129 | } |
1130 | |
1131 | SEC("tc" ) |
1132 | __description("reference tracking: use ptr from bpf_tcp_sock() after release" ) |
1133 | __failure __msg("invalid mem access" ) |
1134 | __flag(BPF_F_ANY_ALIGNMENT) |
1135 | __naked void bpf_tcp_sock_after_release(void) |
1136 | { |
1137 | asm volatile ( |
1138 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1139 | " if r0 != 0 goto l0_%=; \ |
1140 | exit; \ |
1141 | l0_%=: r6 = r0; \ |
1142 | r1 = r0; \ |
1143 | call %[bpf_tcp_sock]; \ |
1144 | if r0 != 0 goto l1_%=; \ |
1145 | r1 = r6; \ |
1146 | call %[bpf_sk_release]; \ |
1147 | exit; \ |
1148 | l1_%=: r7 = r0; \ |
1149 | r1 = r6; \ |
1150 | call %[bpf_sk_release]; \ |
1151 | r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \ |
1152 | exit; \ |
1153 | " : |
1154 | : __imm(bpf_sk_lookup_tcp), |
1155 | __imm(bpf_sk_release), |
1156 | __imm(bpf_tcp_sock), |
1157 | __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), |
1158 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1159 | : __clobber_all); |
1160 | } |
1161 | |
1162 | SEC("tc" ) |
1163 | __description("reference tracking: use ptr from bpf_sk_fullsock() after release" ) |
1164 | __failure __msg("invalid mem access" ) |
1165 | __flag(BPF_F_ANY_ALIGNMENT) |
1166 | __naked void bpf_sk_fullsock_after_release(void) |
1167 | { |
1168 | asm volatile ( |
1169 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1170 | " if r0 != 0 goto l0_%=; \ |
1171 | exit; \ |
1172 | l0_%=: r6 = r0; \ |
1173 | r1 = r0; \ |
1174 | call %[bpf_sk_fullsock]; \ |
1175 | if r0 != 0 goto l1_%=; \ |
1176 | r1 = r6; \ |
1177 | call %[bpf_sk_release]; \ |
1178 | exit; \ |
1179 | l1_%=: r7 = r0; \ |
1180 | r1 = r6; \ |
1181 | call %[bpf_sk_release]; \ |
1182 | r0 = *(u32*)(r7 + %[bpf_sock_type]); \ |
1183 | exit; \ |
1184 | " : |
1185 | : __imm(bpf_sk_fullsock), |
1186 | __imm(bpf_sk_lookup_tcp), |
1187 | __imm(bpf_sk_release), |
1188 | __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), |
1189 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1190 | : __clobber_all); |
1191 | } |
1192 | |
1193 | SEC("tc" ) |
1194 | __description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release" ) |
1195 | __failure __msg("invalid mem access" ) |
1196 | __flag(BPF_F_ANY_ALIGNMENT) |
1197 | __naked void sk_fullsock_tp_after_release(void) |
1198 | { |
1199 | asm volatile ( |
1200 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1201 | " if r0 != 0 goto l0_%=; \ |
1202 | exit; \ |
1203 | l0_%=: r6 = r0; \ |
1204 | r1 = r0; \ |
1205 | call %[bpf_tcp_sock]; \ |
1206 | if r0 != 0 goto l1_%=; \ |
1207 | r1 = r6; \ |
1208 | call %[bpf_sk_release]; \ |
1209 | exit; \ |
1210 | l1_%=: r1 = r0; \ |
1211 | call %[bpf_sk_fullsock]; \ |
1212 | r1 = r6; \ |
1213 | r6 = r0; \ |
1214 | call %[bpf_sk_release]; \ |
1215 | if r6 != 0 goto l2_%=; \ |
1216 | exit; \ |
1217 | l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \ |
1218 | exit; \ |
1219 | " : |
1220 | : __imm(bpf_sk_fullsock), |
1221 | __imm(bpf_sk_lookup_tcp), |
1222 | __imm(bpf_sk_release), |
1223 | __imm(bpf_tcp_sock), |
1224 | __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), |
1225 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1226 | : __clobber_all); |
1227 | } |
1228 | |
1229 | SEC("tc" ) |
1230 | __description("reference tracking: use sk after bpf_sk_release(tp)" ) |
1231 | __failure __msg("invalid mem access" ) |
1232 | __flag(BPF_F_ANY_ALIGNMENT) |
1233 | __naked void after_bpf_sk_release_tp(void) |
1234 | { |
1235 | asm volatile ( |
1236 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1237 | " if r0 != 0 goto l0_%=; \ |
1238 | exit; \ |
1239 | l0_%=: r6 = r0; \ |
1240 | r1 = r0; \ |
1241 | call %[bpf_tcp_sock]; \ |
1242 | if r0 != 0 goto l1_%=; \ |
1243 | r1 = r6; \ |
1244 | call %[bpf_sk_release]; \ |
1245 | exit; \ |
1246 | l1_%=: r1 = r0; \ |
1247 | call %[bpf_sk_release]; \ |
1248 | r0 = *(u32*)(r6 + %[bpf_sock_type]); \ |
1249 | exit; \ |
1250 | " : |
1251 | : __imm(bpf_sk_lookup_tcp), |
1252 | __imm(bpf_sk_release), |
1253 | __imm(bpf_tcp_sock), |
1254 | __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), |
1255 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1256 | : __clobber_all); |
1257 | } |
1258 | |
1259 | SEC("tc" ) |
1260 | __description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)" ) |
1261 | __success __retval(0) |
1262 | __naked void after_bpf_sk_release_sk(void) |
1263 | { |
1264 | asm volatile ( |
1265 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1266 | " if r0 != 0 goto l0_%=; \ |
1267 | exit; \ |
1268 | l0_%=: r6 = r0; \ |
1269 | r1 = r0; \ |
1270 | call %[bpf_get_listener_sock]; \ |
1271 | if r0 != 0 goto l1_%=; \ |
1272 | r1 = r6; \ |
1273 | call %[bpf_sk_release]; \ |
1274 | exit; \ |
1275 | l1_%=: r1 = r6; \ |
1276 | r6 = r0; \ |
1277 | call %[bpf_sk_release]; \ |
1278 | r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \ |
1279 | exit; \ |
1280 | " : |
1281 | : __imm(bpf_get_listener_sock), |
1282 | __imm(bpf_sk_lookup_tcp), |
1283 | __imm(bpf_sk_release), |
1284 | __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)), |
1285 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1286 | : __clobber_all); |
1287 | } |
1288 | |
1289 | SEC("tc" ) |
1290 | __description("reference tracking: bpf_sk_release(listen_sk)" ) |
1291 | __failure __msg("R1 must be referenced when passed to release function" ) |
1292 | __naked void bpf_sk_release_listen_sk(void) |
1293 | { |
1294 | asm volatile ( |
1295 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1296 | " if r0 != 0 goto l0_%=; \ |
1297 | exit; \ |
1298 | l0_%=: r6 = r0; \ |
1299 | r1 = r0; \ |
1300 | call %[bpf_get_listener_sock]; \ |
1301 | if r0 != 0 goto l1_%=; \ |
1302 | r1 = r6; \ |
1303 | call %[bpf_sk_release]; \ |
1304 | exit; \ |
1305 | l1_%=: r1 = r0; \ |
1306 | call %[bpf_sk_release]; \ |
1307 | r0 = *(u32*)(r6 + %[bpf_sock_type]); \ |
1308 | r1 = r6; \ |
1309 | call %[bpf_sk_release]; \ |
1310 | exit; \ |
1311 | " : |
1312 | : __imm(bpf_get_listener_sock), |
1313 | __imm(bpf_sk_lookup_tcp), |
1314 | __imm(bpf_sk_release), |
1315 | __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), |
1316 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1317 | : __clobber_all); |
1318 | } |
1319 | |
1320 | /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ |
1321 | SEC("tc" ) |
1322 | __description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)" ) |
1323 | __failure __msg("invalid mem access" ) |
1324 | __naked void and_bpf_tcp_sock_sk(void) |
1325 | { |
1326 | asm volatile ( |
1327 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1328 | " if r0 != 0 goto l0_%=; \ |
1329 | exit; \ |
1330 | l0_%=: r6 = r0; \ |
1331 | r1 = r0; \ |
1332 | call %[bpf_sk_fullsock]; \ |
1333 | r7 = r0; \ |
1334 | r1 = r6; \ |
1335 | call %[bpf_tcp_sock]; \ |
1336 | r8 = r0; \ |
1337 | if r7 != 0 goto l1_%=; \ |
1338 | r1 = r6; \ |
1339 | call %[bpf_sk_release]; \ |
1340 | exit; \ |
1341 | l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \ |
1342 | r1 = r6; \ |
1343 | call %[bpf_sk_release]; \ |
1344 | exit; \ |
1345 | " : |
1346 | : __imm(bpf_sk_fullsock), |
1347 | __imm(bpf_sk_lookup_tcp), |
1348 | __imm(bpf_sk_release), |
1349 | __imm(bpf_tcp_sock), |
1350 | __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), |
1351 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1352 | : __clobber_all); |
1353 | } |
1354 | |
1355 | SEC("tc" ) |
1356 | __description("reference tracking: branch tracking valid pointer null comparison" ) |
1357 | __success __retval(0) |
1358 | __naked void tracking_valid_pointer_null_comparison(void) |
1359 | { |
1360 | asm volatile ( |
1361 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1362 | " r6 = r0; \ |
1363 | r3 = 1; \ |
1364 | if r6 != 0 goto l0_%=; \ |
1365 | r3 = 0; \ |
1366 | l0_%=: if r6 == 0 goto l1_%=; \ |
1367 | r1 = r6; \ |
1368 | call %[bpf_sk_release]; \ |
1369 | l1_%=: exit; \ |
1370 | " : |
1371 | : __imm(bpf_sk_lookup_tcp), |
1372 | __imm(bpf_sk_release), |
1373 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1374 | : __clobber_all); |
1375 | } |
1376 | |
1377 | SEC("tc" ) |
1378 | __description("reference tracking: branch tracking valid pointer value comparison" ) |
1379 | __failure __msg("Unreleased reference" ) |
1380 | __naked void tracking_valid_pointer_value_comparison(void) |
1381 | { |
1382 | asm volatile ( |
1383 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1384 | " r6 = r0; \ |
1385 | r3 = 1; \ |
1386 | if r6 == 0 goto l0_%=; \ |
1387 | r3 = 0; \ |
1388 | if r6 == 1234 goto l0_%=; \ |
1389 | r1 = r6; \ |
1390 | call %[bpf_sk_release]; \ |
1391 | l0_%=: exit; \ |
1392 | " : |
1393 | : __imm(bpf_sk_lookup_tcp), |
1394 | __imm(bpf_sk_release), |
1395 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1396 | : __clobber_all); |
1397 | } |
1398 | |
1399 | SEC("tc" ) |
1400 | __description("reference tracking: bpf_sk_release(btf_tcp_sock)" ) |
1401 | __success |
1402 | __retval(0) |
1403 | __naked void sk_release_btf_tcp_sock(void) |
1404 | { |
1405 | asm volatile ( |
1406 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1407 | " if r0 != 0 goto l0_%=; \ |
1408 | exit; \ |
1409 | l0_%=: r6 = r0; \ |
1410 | r1 = r0; \ |
1411 | call %[bpf_skc_to_tcp_sock]; \ |
1412 | if r0 != 0 goto l1_%=; \ |
1413 | r1 = r6; \ |
1414 | call %[bpf_sk_release]; \ |
1415 | exit; \ |
1416 | l1_%=: r1 = r0; \ |
1417 | call %[bpf_sk_release]; \ |
1418 | exit; \ |
1419 | " : |
1420 | : __imm(bpf_sk_lookup_tcp), |
1421 | __imm(bpf_sk_release), |
1422 | __imm(bpf_skc_to_tcp_sock), |
1423 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1424 | : __clobber_all); |
1425 | } |
1426 | |
1427 | SEC("tc" ) |
1428 | __description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release" ) |
1429 | __failure __msg("invalid mem access" ) |
1430 | __naked void to_tcp_sock_after_release(void) |
1431 | { |
1432 | asm volatile ( |
1433 | BPF_SK_LOOKUP(bpf_sk_lookup_tcp) |
1434 | " if r0 != 0 goto l0_%=; \ |
1435 | exit; \ |
1436 | l0_%=: r6 = r0; \ |
1437 | r1 = r0; \ |
1438 | call %[bpf_skc_to_tcp_sock]; \ |
1439 | if r0 != 0 goto l1_%=; \ |
1440 | r1 = r6; \ |
1441 | call %[bpf_sk_release]; \ |
1442 | exit; \ |
1443 | l1_%=: r7 = r0; \ |
1444 | r1 = r6; \ |
1445 | call %[bpf_sk_release]; \ |
1446 | r0 = *(u8*)(r7 + 0); \ |
1447 | exit; \ |
1448 | " : |
1449 | : __imm(bpf_sk_lookup_tcp), |
1450 | __imm(bpf_sk_release), |
1451 | __imm(bpf_skc_to_tcp_sock), |
1452 | __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) |
1453 | : __clobber_all); |
1454 | } |
1455 | |
1456 | SEC("socket" ) |
1457 | __description("reference tracking: try to leak released ptr reg" ) |
1458 | __success __failure_unpriv __msg_unpriv("R8 !read_ok" ) |
1459 | __retval(0) |
1460 | __naked void to_leak_released_ptr_reg(void) |
1461 | { |
1462 | asm volatile (" \ |
1463 | r0 = 0; \ |
1464 | *(u32*)(r10 - 4) = r0; \ |
1465 | r2 = r10; \ |
1466 | r2 += -4; \ |
1467 | r1 = %[map_array_48b] ll; \ |
1468 | call %[bpf_map_lookup_elem]; \ |
1469 | if r0 != 0 goto l0_%=; \ |
1470 | exit; \ |
1471 | l0_%=: r9 = r0; \ |
1472 | r0 = 0; \ |
1473 | r1 = %[map_ringbuf] ll; \ |
1474 | r2 = 8; \ |
1475 | r3 = 0; \ |
1476 | call %[bpf_ringbuf_reserve]; \ |
1477 | if r0 != 0 goto l1_%=; \ |
1478 | exit; \ |
1479 | l1_%=: r8 = r0; \ |
1480 | r1 = r8; \ |
1481 | r2 = 0; \ |
1482 | call %[bpf_ringbuf_discard]; \ |
1483 | r0 = 0; \ |
1484 | *(u64*)(r9 + 0) = r8; \ |
1485 | exit; \ |
1486 | " : |
1487 | : __imm(bpf_map_lookup_elem), |
1488 | __imm(bpf_ringbuf_discard), |
1489 | __imm(bpf_ringbuf_reserve), |
1490 | __imm_addr(map_array_48b), |
1491 | __imm_addr(map_ringbuf) |
1492 | : __clobber_all); |
1493 | } |
1494 | |
1495 | char _license[] SEC("license" ) = "GPL" ; |
1496 | |