1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <bpf/bpf_helpers.h> |
6 | #include "bpf_misc.h" |
7 | |
8 | #define MAX_ENTRIES 11 |
9 | |
10 | struct test_val { |
11 | unsigned int index; |
12 | int foo[MAX_ENTRIES]; |
13 | }; |
14 | |
15 | struct { |
16 | __uint(type, BPF_MAP_TYPE_ARRAY); |
17 | __uint(max_entries, 1); |
18 | __type(key, int); |
19 | __type(value, struct test_val); |
20 | } map_array_48b SEC(".maps" ); |
21 | |
22 | struct { |
23 | __uint(type, BPF_MAP_TYPE_HASH); |
24 | __uint(max_entries, 1); |
25 | __type(key, long long); |
26 | __type(value, struct test_val); |
27 | } map_hash_48b SEC(".maps" ); |
28 | |
29 | SEC("tracepoint" ) |
30 | __description("bpf_get_stack return R0 within range" ) |
31 | __success |
32 | __naked void stack_return_r0_within_range(void) |
33 | { |
34 | asm volatile (" \ |
35 | r6 = r1; \ |
36 | r1 = 0; \ |
37 | *(u64*)(r10 - 8) = r1; \ |
38 | r2 = r10; \ |
39 | r2 += -8; \ |
40 | r1 = %[map_hash_48b] ll; \ |
41 | call %[bpf_map_lookup_elem]; \ |
42 | if r0 == 0 goto l0_%=; \ |
43 | r7 = r0; \ |
44 | r9 = %[__imm_0]; \ |
45 | r1 = r6; \ |
46 | r2 = r7; \ |
47 | r3 = %[__imm_0]; \ |
48 | r4 = 256; \ |
49 | call %[bpf_get_stack]; \ |
50 | r1 = 0; \ |
51 | r8 = r0; \ |
52 | r8 <<= 32; \ |
53 | r8 s>>= 32; \ |
54 | if r1 s> r8 goto l0_%=; \ |
55 | r9 -= r8; \ |
56 | r2 = r7; \ |
57 | r2 += r8; \ |
58 | r1 = r9; \ |
59 | r1 <<= 32; \ |
60 | r1 s>>= 32; \ |
61 | r3 = r2; \ |
62 | r3 += r1; \ |
63 | r1 = r7; \ |
64 | r5 = %[__imm_0]; \ |
65 | r1 += r5; \ |
66 | if r3 >= r1 goto l0_%=; \ |
67 | r1 = r6; \ |
68 | r3 = r9; \ |
69 | r4 = 0; \ |
70 | call %[bpf_get_stack]; \ |
71 | l0_%=: exit; \ |
72 | " : |
73 | : __imm(bpf_get_stack), |
74 | __imm(bpf_map_lookup_elem), |
75 | __imm_addr(map_hash_48b), |
76 | __imm_const(__imm_0, sizeof(struct test_val) / 2) |
77 | : __clobber_all); |
78 | } |
79 | |
80 | SEC("iter/task" ) |
81 | __description("bpf_get_task_stack return R0 range is refined" ) |
82 | __success |
83 | __naked void return_r0_range_is_refined(void) |
84 | { |
85 | asm volatile (" \ |
86 | r6 = *(u64*)(r1 + 0); \ |
87 | r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\ |
88 | r7 = *(u64*)(r1 + 8); /* ctx->task */\ |
89 | r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\ |
90 | r2 = 0; \ |
91 | *(u64*)(r10 - 8) = r2; \ |
92 | r2 = r10; \ |
93 | r2 += -8; \ |
94 | call %[bpf_map_lookup_elem]; \ |
95 | if r0 != 0 goto l0_%=; \ |
96 | r0 = 0; \ |
97 | exit; \ |
98 | l0_%=: if r7 != 0 goto l1_%=; \ |
99 | r0 = 0; \ |
100 | exit; \ |
101 | l1_%=: r1 = r7; \ |
102 | r2 = r0; \ |
103 | r9 = r0; /* keep buf for seq_write */\ |
104 | r3 = 48; \ |
105 | r4 = 0; \ |
106 | call %[bpf_get_task_stack]; \ |
107 | if r0 s> 0 goto l2_%=; \ |
108 | r0 = 0; \ |
109 | exit; \ |
110 | l2_%=: r1 = r6; \ |
111 | r2 = r9; \ |
112 | r3 = r0; \ |
113 | call %[bpf_seq_write]; \ |
114 | r0 = 0; \ |
115 | exit; \ |
116 | " : |
117 | : __imm(bpf_get_task_stack), |
118 | __imm(bpf_map_lookup_elem), |
119 | __imm(bpf_seq_write), |
120 | __imm_addr(map_array_48b) |
121 | : __clobber_all); |
122 | } |
123 | |
124 | char _license[] SEC("license" ) = "GPL" ; |
125 | |