1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "../../../include/linux/filter.h"
7#include "bpf_misc.h"
8
9#define BPF_SK_LOOKUP(func) \
10 /* struct bpf_sock_tuple tuple = {} */ \
11 "r2 = 0;" \
12 "*(u32*)(r10 - 8) = r2;" \
13 "*(u64*)(r10 - 16) = r2;" \
14 "*(u64*)(r10 - 24) = r2;" \
15 "*(u64*)(r10 - 32) = r2;" \
16 "*(u64*)(r10 - 40) = r2;" \
17 "*(u64*)(r10 - 48) = r2;" \
18 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
19 "r2 = r10;" \
20 "r2 += -48;" \
21 "r3 = %[sizeof_bpf_sock_tuple];"\
22 "r4 = 0;" \
23 "r5 = 0;" \
24 "call %[" #func "];"
25
26struct {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __uint(max_entries, 1);
29 __type(key, long long);
30 __type(value, long long);
31} map_hash_8b SEC(".maps");
32
33void dummy_prog_42_socket(void);
34void dummy_prog_24_socket(void);
35void dummy_prog_loop1_socket(void);
36
37struct {
38 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
39 __uint(max_entries, 4);
40 __uint(key_size, sizeof(int));
41 __array(values, void (void));
42} map_prog1_socket SEC(".maps") = {
43 .values = {
44 [0] = (void *)&dummy_prog_42_socket,
45 [1] = (void *)&dummy_prog_loop1_socket,
46 [2] = (void *)&dummy_prog_24_socket,
47 },
48};
49
50SEC("socket")
51__auxiliary __auxiliary_unpriv
52__naked void dummy_prog_42_socket(void)
53{
54 asm volatile ("r0 = 42; exit;");
55}
56
57SEC("socket")
58__auxiliary __auxiliary_unpriv
59__naked void dummy_prog_24_socket(void)
60{
61 asm volatile ("r0 = 24; exit;");
62}
63
64SEC("socket")
65__auxiliary __auxiliary_unpriv
66__naked void dummy_prog_loop1_socket(void)
67{
68 asm volatile (" \
69 r3 = 1; \
70 r2 = %[map_prog1_socket] ll; \
71 call %[bpf_tail_call]; \
72 r0 = 41; \
73 exit; \
74" :
75 : __imm(bpf_tail_call),
76 __imm_addr(map_prog1_socket)
77 : __clobber_all);
78}
79
80SEC("socket")
81__description("unpriv: return pointer")
82__success __failure_unpriv __msg_unpriv("R0 leaks addr")
83__retval(POINTER_VALUE)
84__naked void unpriv_return_pointer(void)
85{
86 asm volatile (" \
87 r0 = r10; \
88 exit; \
89" ::: __clobber_all);
90}
91
92SEC("socket")
93__description("unpriv: add const to pointer")
94__success __success_unpriv __retval(0)
95__naked void unpriv_add_const_to_pointer(void)
96{
97 asm volatile (" \
98 r1 += 8; \
99 r0 = 0; \
100 exit; \
101" ::: __clobber_all);
102}
103
104SEC("socket")
105__description("unpriv: add pointer to pointer")
106__failure __msg("R1 pointer += pointer")
107__failure_unpriv
108__naked void unpriv_add_pointer_to_pointer(void)
109{
110 asm volatile (" \
111 r1 += r10; \
112 r0 = 0; \
113 exit; \
114" ::: __clobber_all);
115}
116
117SEC("socket")
118__description("unpriv: neg pointer")
119__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
120__retval(0)
121__naked void unpriv_neg_pointer(void)
122{
123 asm volatile (" \
124 r1 = -r1; \
125 r0 = 0; \
126 exit; \
127" ::: __clobber_all);
128}
129
130SEC("socket")
131__description("unpriv: cmp pointer with const")
132__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
133__retval(0)
134__naked void unpriv_cmp_pointer_with_const(void)
135{
136 asm volatile (" \
137 if r1 == 0 goto l0_%=; \
138l0_%=: r0 = 0; \
139 exit; \
140" ::: __clobber_all);
141}
142
143SEC("socket")
144__description("unpriv: cmp pointer with pointer")
145__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
146__retval(0)
147__naked void unpriv_cmp_pointer_with_pointer(void)
148{
149 asm volatile (" \
150 if r1 == r10 goto l0_%=; \
151l0_%=: r0 = 0; \
152 exit; \
153" ::: __clobber_all);
154}
155
156SEC("tracepoint")
157__description("unpriv: check that printk is disallowed")
158__success
159__naked void check_that_printk_is_disallowed(void)
160{
161 asm volatile (" \
162 r1 = 0; \
163 *(u64*)(r10 - 8) = r1; \
164 r1 = r10; \
165 r1 += -8; \
166 r2 = 8; \
167 r3 = r1; \
168 call %[bpf_trace_printk]; \
169 r0 = 0; \
170 exit; \
171" :
172 : __imm(bpf_trace_printk)
173 : __clobber_all);
174}
175
176SEC("socket")
177__description("unpriv: pass pointer to helper function")
178__success __failure_unpriv __msg_unpriv("R4 leaks addr")
179__retval(0)
180__naked void pass_pointer_to_helper_function(void)
181{
182 asm volatile (" \
183 r1 = 0; \
184 *(u64*)(r10 - 8) = r1; \
185 r2 = r10; \
186 r2 += -8; \
187 r1 = %[map_hash_8b] ll; \
188 r3 = r2; \
189 r4 = r2; \
190 call %[bpf_map_update_elem]; \
191 r0 = 0; \
192 exit; \
193" :
194 : __imm(bpf_map_update_elem),
195 __imm_addr(map_hash_8b)
196 : __clobber_all);
197}
198
199SEC("socket")
200__description("unpriv: indirectly pass pointer on stack to helper function")
201__success __failure_unpriv
202__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8")
203__retval(0)
204__naked void on_stack_to_helper_function(void)
205{
206 asm volatile (" \
207 *(u64*)(r10 - 8) = r10; \
208 r2 = r10; \
209 r2 += -8; \
210 r1 = %[map_hash_8b] ll; \
211 call %[bpf_map_lookup_elem]; \
212 r0 = 0; \
213 exit; \
214" :
215 : __imm(bpf_map_lookup_elem),
216 __imm_addr(map_hash_8b)
217 : __clobber_all);
218}
219
220SEC("socket")
221__description("unpriv: mangle pointer on stack 1")
222__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
223__retval(0)
224__naked void mangle_pointer_on_stack_1(void)
225{
226 asm volatile (" \
227 *(u64*)(r10 - 8) = r10; \
228 r0 = 0; \
229 *(u32*)(r10 - 8) = r0; \
230 r0 = 0; \
231 exit; \
232" ::: __clobber_all);
233}
234
235SEC("socket")
236__description("unpriv: mangle pointer on stack 2")
237__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
238__retval(0)
239__naked void mangle_pointer_on_stack_2(void)
240{
241 asm volatile (" \
242 *(u64*)(r10 - 8) = r10; \
243 r0 = 0; \
244 *(u8*)(r10 - 1) = r0; \
245 r0 = 0; \
246 exit; \
247" ::: __clobber_all);
248}
249
250SEC("socket")
251__description("unpriv: read pointer from stack in small chunks")
252__failure __msg("invalid size")
253__failure_unpriv
254__naked void from_stack_in_small_chunks(void)
255{
256 asm volatile (" \
257 *(u64*)(r10 - 8) = r10; \
258 r0 = *(u32*)(r10 - 8); \
259 r0 = 0; \
260 exit; \
261" ::: __clobber_all);
262}
263
264SEC("socket")
265__description("unpriv: write pointer into ctx")
266__failure __msg("invalid bpf_context access")
267__failure_unpriv __msg_unpriv("R1 leaks addr")
268__naked void unpriv_write_pointer_into_ctx(void)
269{
270 asm volatile (" \
271 *(u64*)(r1 + 0) = r1; \
272 r0 = 0; \
273 exit; \
274" ::: __clobber_all);
275}
276
277SEC("socket")
278__description("unpriv: spill/fill of ctx")
279__success __success_unpriv __retval(0)
280__naked void unpriv_spill_fill_of_ctx(void)
281{
282 asm volatile (" \
283 r6 = r10; \
284 r6 += -8; \
285 *(u64*)(r6 + 0) = r1; \
286 r1 = *(u64*)(r6 + 0); \
287 r0 = 0; \
288 exit; \
289" ::: __clobber_all);
290}
291
292SEC("tc")
293__description("unpriv: spill/fill of ctx 2")
294__success __retval(0)
295__naked void spill_fill_of_ctx_2(void)
296{
297 asm volatile (" \
298 r6 = r10; \
299 r6 += -8; \
300 *(u64*)(r6 + 0) = r1; \
301 r1 = *(u64*)(r6 + 0); \
302 call %[bpf_get_hash_recalc]; \
303 r0 = 0; \
304 exit; \
305" :
306 : __imm(bpf_get_hash_recalc)
307 : __clobber_all);
308}
309
310SEC("tc")
311__description("unpriv: spill/fill of ctx 3")
312__failure __msg("R1 type=fp expected=ctx")
313__naked void spill_fill_of_ctx_3(void)
314{
315 asm volatile (" \
316 r6 = r10; \
317 r6 += -8; \
318 *(u64*)(r6 + 0) = r1; \
319 *(u64*)(r6 + 0) = r10; \
320 r1 = *(u64*)(r6 + 0); \
321 call %[bpf_get_hash_recalc]; \
322 exit; \
323" :
324 : __imm(bpf_get_hash_recalc)
325 : __clobber_all);
326}
327
328SEC("tc")
329__description("unpriv: spill/fill of ctx 4")
330__failure __msg("R1 type=scalar expected=ctx")
331__naked void spill_fill_of_ctx_4(void)
332{
333 asm volatile (" \
334 r6 = r10; \
335 r6 += -8; \
336 *(u64*)(r6 + 0) = r1; \
337 r0 = 1; \
338 lock *(u64 *)(r10 - 8) += r0; \
339 r1 = *(u64*)(r6 + 0); \
340 call %[bpf_get_hash_recalc]; \
341 exit; \
342" :
343 : __imm(bpf_get_hash_recalc)
344 : __clobber_all);
345}
346
347SEC("tc")
348__description("unpriv: spill/fill of different pointers stx")
349__failure __msg("same insn cannot be used with different pointers")
350__naked void fill_of_different_pointers_stx(void)
351{
352 asm volatile (" \
353 r3 = 42; \
354 r6 = r10; \
355 r6 += -8; \
356 if r1 == 0 goto l0_%=; \
357 r2 = r10; \
358 r2 += -16; \
359 *(u64*)(r6 + 0) = r2; \
360l0_%=: if r1 != 0 goto l1_%=; \
361 *(u64*)(r6 + 0) = r1; \
362l1_%=: r1 = *(u64*)(r6 + 0); \
363 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
364 r0 = 0; \
365 exit; \
366" :
367 : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
368 : __clobber_all);
369}
370
371/* Same as above, but use BPF_ST_MEM to save 42
372 * instead of BPF_STX_MEM.
373 */
374SEC("tc")
375__description("unpriv: spill/fill of different pointers st")
376__failure __msg("same insn cannot be used with different pointers")
377__naked void fill_of_different_pointers_st(void)
378{
379 asm volatile (" \
380 r6 = r10; \
381 r6 += -8; \
382 if r1 == 0 goto l0_%=; \
383 r2 = r10; \
384 r2 += -16; \
385 *(u64*)(r6 + 0) = r2; \
386l0_%=: if r1 != 0 goto l1_%=; \
387 *(u64*)(r6 + 0) = r1; \
388l1_%=: r1 = *(u64*)(r6 + 0); \
389 .8byte %[st_mem]; \
390 r0 = 0; \
391 exit; \
392" :
393 : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
394 __imm_insn(st_mem,
395 BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
396 : __clobber_all);
397}
398
399SEC("tc")
400__description("unpriv: spill/fill of different pointers stx - ctx and sock")
401__failure __msg("type=ctx expected=sock")
402__naked void pointers_stx_ctx_and_sock(void)
403{
404 asm volatile (" \
405 r8 = r1; \
406 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
407" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
408" r2 = r0; \
409 /* u64 foo; */ \
410 /* void *target = &foo; */ \
411 r6 = r10; \
412 r6 += -8; \
413 r1 = r8; \
414 /* if (skb == NULL) *target = sock; */ \
415 if r1 == 0 goto l0_%=; \
416 *(u64*)(r6 + 0) = r2; \
417l0_%=: /* else *target = skb; */ \
418 if r1 != 0 goto l1_%=; \
419 *(u64*)(r6 + 0) = r1; \
420l1_%=: /* struct __sk_buff *skb = *target; */ \
421 r1 = *(u64*)(r6 + 0); \
422 /* skb->mark = 42; */ \
423 r3 = 42; \
424 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
425 /* if (sk) bpf_sk_release(sk) */ \
426 if r1 == 0 goto l2_%=; \
427 call %[bpf_sk_release]; \
428l2_%=: r0 = 0; \
429 exit; \
430" :
431 : __imm(bpf_sk_lookup_tcp),
432 __imm(bpf_sk_release),
433 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
434 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
435 : __clobber_all);
436}
437
438SEC("tc")
439__description("unpriv: spill/fill of different pointers stx - leak sock")
440__failure
441//.errstr = "same insn cannot be used with different pointers",
442__msg("Unreleased reference")
443__naked void different_pointers_stx_leak_sock(void)
444{
445 asm volatile (" \
446 r8 = r1; \
447 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
448" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
449" r2 = r0; \
450 /* u64 foo; */ \
451 /* void *target = &foo; */ \
452 r6 = r10; \
453 r6 += -8; \
454 r1 = r8; \
455 /* if (skb == NULL) *target = sock; */ \
456 if r1 == 0 goto l0_%=; \
457 *(u64*)(r6 + 0) = r2; \
458l0_%=: /* else *target = skb; */ \
459 if r1 != 0 goto l1_%=; \
460 *(u64*)(r6 + 0) = r1; \
461l1_%=: /* struct __sk_buff *skb = *target; */ \
462 r1 = *(u64*)(r6 + 0); \
463 /* skb->mark = 42; */ \
464 r3 = 42; \
465 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
466 exit; \
467" :
468 : __imm(bpf_sk_lookup_tcp),
469 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
470 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
471 : __clobber_all);
472}
473
474SEC("tc")
475__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
476__failure __msg("same insn cannot be used with different pointers")
477__naked void stx_sock_and_ctx_read(void)
478{
479 asm volatile (" \
480 r8 = r1; \
481 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
482" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
483" r2 = r0; \
484 /* u64 foo; */ \
485 /* void *target = &foo; */ \
486 r6 = r10; \
487 r6 += -8; \
488 r1 = r8; \
489 /* if (skb) *target = skb */ \
490 if r1 == 0 goto l0_%=; \
491 *(u64*)(r6 + 0) = r1; \
492l0_%=: /* else *target = sock */ \
493 if r1 != 0 goto l1_%=; \
494 *(u64*)(r6 + 0) = r2; \
495l1_%=: /* struct bpf_sock *sk = *target; */ \
496 r1 = *(u64*)(r6 + 0); \
497 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
498 if r1 == 0 goto l2_%=; \
499 r3 = *(u32*)(r1 + %[bpf_sock_mark]); \
500 call %[bpf_sk_release]; \
501l2_%=: r0 = 0; \
502 exit; \
503" :
504 : __imm(bpf_sk_lookup_tcp),
505 __imm(bpf_sk_release),
506 __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
507 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
508 : __clobber_all);
509}
510
511SEC("tc")
512__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
513__failure
514//.errstr = "same insn cannot be used with different pointers",
515__msg("cannot write into sock")
516__naked void stx_sock_and_ctx_write(void)
517{
518 asm volatile (" \
519 r8 = r1; \
520 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
521" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
522" r2 = r0; \
523 /* u64 foo; */ \
524 /* void *target = &foo; */ \
525 r6 = r10; \
526 r6 += -8; \
527 r1 = r8; \
528 /* if (skb) *target = skb */ \
529 if r1 == 0 goto l0_%=; \
530 *(u64*)(r6 + 0) = r1; \
531l0_%=: /* else *target = sock */ \
532 if r1 != 0 goto l1_%=; \
533 *(u64*)(r6 + 0) = r2; \
534l1_%=: /* struct bpf_sock *sk = *target; */ \
535 r1 = *(u64*)(r6 + 0); \
536 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
537 if r1 == 0 goto l2_%=; \
538 r3 = 42; \
539 *(u32*)(r1 + %[bpf_sock_mark]) = r3; \
540 call %[bpf_sk_release]; \
541l2_%=: r0 = 0; \
542 exit; \
543" :
544 : __imm(bpf_sk_lookup_tcp),
545 __imm(bpf_sk_release),
546 __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
547 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
548 : __clobber_all);
549}
550
551SEC("socket")
552__description("unpriv: write pointer into map elem value")
553__success __failure_unpriv __msg_unpriv("R0 leaks addr")
554__retval(0)
555__naked void pointer_into_map_elem_value(void)
556{
557 asm volatile (" \
558 r1 = 0; \
559 *(u64*)(r10 - 8) = r1; \
560 r2 = r10; \
561 r2 += -8; \
562 r1 = %[map_hash_8b] ll; \
563 call %[bpf_map_lookup_elem]; \
564 if r0 == 0 goto l0_%=; \
565 *(u64*)(r0 + 0) = r0; \
566l0_%=: exit; \
567" :
568 : __imm(bpf_map_lookup_elem),
569 __imm_addr(map_hash_8b)
570 : __clobber_all);
571}
572
573SEC("socket")
574__description("alu32: mov u32 const")
575__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'")
576__retval(0)
577__naked void alu32_mov_u32_const(void)
578{
579 asm volatile (" \
580 w7 = 0; \
581 w7 &= 1; \
582 w0 = w7; \
583 if r0 == 0 goto l0_%=; \
584 r0 = *(u64*)(r7 + 0); \
585l0_%=: exit; \
586" ::: __clobber_all);
587}
588
589SEC("socket")
590__description("unpriv: partial copy of pointer")
591__success __failure_unpriv __msg_unpriv("R10 partial copy")
592__retval(0)
593__naked void unpriv_partial_copy_of_pointer(void)
594{
595 asm volatile (" \
596 w1 = w10; \
597 r0 = 0; \
598 exit; \
599" ::: __clobber_all);
600}
601
602SEC("socket")
603__description("unpriv: pass pointer to tail_call")
604__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
605__retval(0)
606__naked void pass_pointer_to_tail_call(void)
607{
608 asm volatile (" \
609 r3 = r1; \
610 r2 = %[map_prog1_socket] ll; \
611 call %[bpf_tail_call]; \
612 r0 = 0; \
613 exit; \
614" :
615 : __imm(bpf_tail_call),
616 __imm_addr(map_prog1_socket)
617 : __clobber_all);
618}
619
620SEC("socket")
621__description("unpriv: cmp map pointer with zero")
622__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
623__retval(0)
624__naked void cmp_map_pointer_with_zero(void)
625{
626 asm volatile (" \
627 r1 = 0; \
628 r1 = %[map_hash_8b] ll; \
629 if r1 == 0 goto l0_%=; \
630l0_%=: r0 = 0; \
631 exit; \
632" :
633 : __imm_addr(map_hash_8b)
634 : __clobber_all);
635}
636
637SEC("socket")
638__description("unpriv: write into frame pointer")
639__failure __msg("frame pointer is read only")
640__failure_unpriv
641__naked void unpriv_write_into_frame_pointer(void)
642{
643 asm volatile (" \
644 r10 = r1; \
645 r0 = 0; \
646 exit; \
647" ::: __clobber_all);
648}
649
650SEC("socket")
651__description("unpriv: spill/fill frame pointer")
652__failure __msg("frame pointer is read only")
653__failure_unpriv
654__naked void unpriv_spill_fill_frame_pointer(void)
655{
656 asm volatile (" \
657 r6 = r10; \
658 r6 += -8; \
659 *(u64*)(r6 + 0) = r10; \
660 r10 = *(u64*)(r6 + 0); \
661 r0 = 0; \
662 exit; \
663" ::: __clobber_all);
664}
665
666SEC("socket")
667__description("unpriv: cmp of frame pointer")
668__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
669__retval(0)
670__naked void unpriv_cmp_of_frame_pointer(void)
671{
672 asm volatile (" \
673 if r10 == 0 goto l0_%=; \
674l0_%=: r0 = 0; \
675 exit; \
676" ::: __clobber_all);
677}
678
679SEC("socket")
680__description("unpriv: adding of fp, reg")
681__success __failure_unpriv
682__msg_unpriv("R1 stack pointer arithmetic goes out of range")
683__retval(0)
684__naked void unpriv_adding_of_fp_reg(void)
685{
686 asm volatile (" \
687 r0 = 0; \
688 r1 = 0; \
689 r1 += r10; \
690 *(u64*)(r1 - 8) = r0; \
691 exit; \
692" ::: __clobber_all);
693}
694
695SEC("socket")
696__description("unpriv: adding of fp, imm")
697__success __failure_unpriv
698__msg_unpriv("R1 stack pointer arithmetic goes out of range")
699__retval(0)
700__naked void unpriv_adding_of_fp_imm(void)
701{
702 asm volatile (" \
703 r0 = 0; \
704 r1 = r10; \
705 r1 += 0; \
706 *(u64*)(r1 - 8) = r0; \
707 exit; \
708" ::: __clobber_all);
709}
710
711SEC("socket")
712__description("unpriv: cmp of stack pointer")
713__success __failure_unpriv __msg_unpriv("R2 pointer comparison")
714__retval(0)
715__naked void unpriv_cmp_of_stack_pointer(void)
716{
717 asm volatile (" \
718 r2 = r10; \
719 r2 += -8; \
720 if r2 == 0 goto l0_%=; \
721l0_%=: r0 = 0; \
722 exit; \
723" ::: __clobber_all);
724}
725
726char _license[] SEC("license") = "GPL";
727

source code of linux/tools/testing/selftests/bpf/progs/verifier_unpriv.c