1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <bpf/bpf_helpers.h> |
6 | #include "bpf_misc.h" |
7 | |
8 | SEC("socket" ) |
9 | __description("raw_stack: no skb_load_bytes" ) |
10 | __success |
11 | __failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8" ) |
12 | __naked void stack_no_skb_load_bytes(void) |
13 | { |
14 | asm volatile (" \ |
15 | r2 = 4; \ |
16 | r6 = r10; \ |
17 | r6 += -8; \ |
18 | r3 = r6; \ |
19 | r4 = 8; \ |
20 | /* Call to skb_load_bytes() omitted. */ \ |
21 | r0 = *(u64*)(r6 + 0); \ |
22 | exit; \ |
23 | " ::: __clobber_all); |
24 | } |
25 | |
26 | SEC("tc" ) |
27 | __description("raw_stack: skb_load_bytes, negative len" ) |
28 | __failure __msg("R4 min value is negative" ) |
29 | __naked void skb_load_bytes_negative_len(void) |
30 | { |
31 | asm volatile (" \ |
32 | r2 = 4; \ |
33 | r6 = r10; \ |
34 | r6 += -8; \ |
35 | r3 = r6; \ |
36 | r4 = -8; \ |
37 | call %[bpf_skb_load_bytes]; \ |
38 | r0 = *(u64*)(r6 + 0); \ |
39 | exit; \ |
40 | " : |
41 | : __imm(bpf_skb_load_bytes) |
42 | : __clobber_all); |
43 | } |
44 | |
45 | SEC("tc" ) |
46 | __description("raw_stack: skb_load_bytes, negative len 2" ) |
47 | __failure __msg("R4 min value is negative" ) |
48 | __naked void load_bytes_negative_len_2(void) |
49 | { |
50 | asm volatile (" \ |
51 | r2 = 4; \ |
52 | r6 = r10; \ |
53 | r6 += -8; \ |
54 | r3 = r6; \ |
55 | r4 = %[__imm_0]; \ |
56 | call %[bpf_skb_load_bytes]; \ |
57 | r0 = *(u64*)(r6 + 0); \ |
58 | exit; \ |
59 | " : |
60 | : __imm(bpf_skb_load_bytes), |
61 | __imm_const(__imm_0, ~0) |
62 | : __clobber_all); |
63 | } |
64 | |
65 | SEC("tc" ) |
66 | __description("raw_stack: skb_load_bytes, zero len" ) |
67 | __failure __msg("R4 invalid zero-sized read: u64=[0,0]" ) |
68 | __naked void skb_load_bytes_zero_len(void) |
69 | { |
70 | asm volatile (" \ |
71 | r2 = 4; \ |
72 | r6 = r10; \ |
73 | r6 += -8; \ |
74 | r3 = r6; \ |
75 | r4 = 0; \ |
76 | call %[bpf_skb_load_bytes]; \ |
77 | r0 = *(u64*)(r6 + 0); \ |
78 | exit; \ |
79 | " : |
80 | : __imm(bpf_skb_load_bytes) |
81 | : __clobber_all); |
82 | } |
83 | |
84 | SEC("tc" ) |
85 | __description("raw_stack: skb_load_bytes, no init" ) |
86 | __success __retval(0) |
87 | __naked void skb_load_bytes_no_init(void) |
88 | { |
89 | asm volatile (" \ |
90 | r2 = 4; \ |
91 | r6 = r10; \ |
92 | r6 += -8; \ |
93 | r3 = r6; \ |
94 | r4 = 8; \ |
95 | call %[bpf_skb_load_bytes]; \ |
96 | r0 = *(u64*)(r6 + 0); \ |
97 | exit; \ |
98 | " : |
99 | : __imm(bpf_skb_load_bytes) |
100 | : __clobber_all); |
101 | } |
102 | |
103 | SEC("tc" ) |
104 | __description("raw_stack: skb_load_bytes, init" ) |
105 | __success __retval(0) |
106 | __naked void stack_skb_load_bytes_init(void) |
107 | { |
108 | asm volatile (" \ |
109 | r2 = 4; \ |
110 | r6 = r10; \ |
111 | r6 += -8; \ |
112 | r3 = 0xcafe; \ |
113 | *(u64*)(r6 + 0) = r3; \ |
114 | r3 = r6; \ |
115 | r4 = 8; \ |
116 | call %[bpf_skb_load_bytes]; \ |
117 | r0 = *(u64*)(r6 + 0); \ |
118 | exit; \ |
119 | " : |
120 | : __imm(bpf_skb_load_bytes) |
121 | : __clobber_all); |
122 | } |
123 | |
124 | SEC("tc" ) |
125 | __description("raw_stack: skb_load_bytes, spilled regs around bounds" ) |
126 | __success __retval(0) |
127 | __naked void bytes_spilled_regs_around_bounds(void) |
128 | { |
129 | asm volatile (" \ |
130 | r2 = 4; \ |
131 | r6 = r10; \ |
132 | r6 += -16; \ |
133 | *(u64*)(r6 - 8) = r1; \ |
134 | *(u64*)(r6 + 8) = r1; \ |
135 | r3 = r6; \ |
136 | r4 = 8; \ |
137 | call %[bpf_skb_load_bytes]; \ |
138 | r0 = *(u64*)(r6 - 8); \ |
139 | r2 = *(u64*)(r6 + 8); \ |
140 | r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ |
141 | r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ |
142 | r0 += r2; \ |
143 | exit; \ |
144 | " : |
145 | : __imm(bpf_skb_load_bytes), |
146 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), |
147 | __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) |
148 | : __clobber_all); |
149 | } |
150 | |
151 | SEC("tc" ) |
152 | __description("raw_stack: skb_load_bytes, spilled regs corruption" ) |
153 | __failure __msg("R0 invalid mem access 'scalar'" ) |
154 | __flag(BPF_F_ANY_ALIGNMENT) |
155 | __naked void load_bytes_spilled_regs_corruption(void) |
156 | { |
157 | asm volatile (" \ |
158 | r2 = 4; \ |
159 | r6 = r10; \ |
160 | r6 += -8; \ |
161 | *(u64*)(r6 + 0) = r1; \ |
162 | r3 = r6; \ |
163 | r4 = 8; \ |
164 | call %[bpf_skb_load_bytes]; \ |
165 | r0 = *(u64*)(r6 + 0); \ |
166 | r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ |
167 | exit; \ |
168 | " : |
169 | : __imm(bpf_skb_load_bytes), |
170 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) |
171 | : __clobber_all); |
172 | } |
173 | |
174 | SEC("tc" ) |
175 | __description("raw_stack: skb_load_bytes, spilled regs corruption 2" ) |
176 | __failure __msg("R3 invalid mem access 'scalar'" ) |
177 | __flag(BPF_F_ANY_ALIGNMENT) |
178 | __naked void bytes_spilled_regs_corruption_2(void) |
179 | { |
180 | asm volatile (" \ |
181 | r2 = 4; \ |
182 | r6 = r10; \ |
183 | r6 += -16; \ |
184 | *(u64*)(r6 - 8) = r1; \ |
185 | *(u64*)(r6 + 0) = r1; \ |
186 | *(u64*)(r6 + 8) = r1; \ |
187 | r3 = r6; \ |
188 | r4 = 8; \ |
189 | call %[bpf_skb_load_bytes]; \ |
190 | r0 = *(u64*)(r6 - 8); \ |
191 | r2 = *(u64*)(r6 + 8); \ |
192 | r3 = *(u64*)(r6 + 0); \ |
193 | r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ |
194 | r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ |
195 | r0 += r2; \ |
196 | r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \ |
197 | r0 += r3; \ |
198 | exit; \ |
199 | " : |
200 | : __imm(bpf_skb_load_bytes), |
201 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), |
202 | __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)), |
203 | __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) |
204 | : __clobber_all); |
205 | } |
206 | |
207 | SEC("tc" ) |
208 | __description("raw_stack: skb_load_bytes, spilled regs + data" ) |
209 | __success __retval(0) |
210 | __naked void load_bytes_spilled_regs_data(void) |
211 | { |
212 | asm volatile (" \ |
213 | r2 = 4; \ |
214 | r6 = r10; \ |
215 | r6 += -16; \ |
216 | *(u64*)(r6 - 8) = r1; \ |
217 | *(u64*)(r6 + 0) = r1; \ |
218 | *(u64*)(r6 + 8) = r1; \ |
219 | r3 = r6; \ |
220 | r4 = 8; \ |
221 | call %[bpf_skb_load_bytes]; \ |
222 | r0 = *(u64*)(r6 - 8); \ |
223 | r2 = *(u64*)(r6 + 8); \ |
224 | r3 = *(u64*)(r6 + 0); \ |
225 | r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ |
226 | r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ |
227 | r0 += r2; \ |
228 | r0 += r3; \ |
229 | exit; \ |
230 | " : |
231 | : __imm(bpf_skb_load_bytes), |
232 | __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), |
233 | __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) |
234 | : __clobber_all); |
235 | } |
236 | |
237 | SEC("tc" ) |
238 | __description("raw_stack: skb_load_bytes, invalid access 1" ) |
239 | __failure __msg("invalid indirect access to stack R3 off=-513 size=8" ) |
240 | __naked void load_bytes_invalid_access_1(void) |
241 | { |
242 | asm volatile (" \ |
243 | r2 = 4; \ |
244 | r6 = r10; \ |
245 | r6 += -513; \ |
246 | r3 = r6; \ |
247 | r4 = 8; \ |
248 | call %[bpf_skb_load_bytes]; \ |
249 | r0 = *(u64*)(r6 + 0); \ |
250 | exit; \ |
251 | " : |
252 | : __imm(bpf_skb_load_bytes) |
253 | : __clobber_all); |
254 | } |
255 | |
256 | SEC("tc" ) |
257 | __description("raw_stack: skb_load_bytes, invalid access 2" ) |
258 | __failure __msg("invalid indirect access to stack R3 off=-1 size=8" ) |
259 | __naked void load_bytes_invalid_access_2(void) |
260 | { |
261 | asm volatile (" \ |
262 | r2 = 4; \ |
263 | r6 = r10; \ |
264 | r6 += -1; \ |
265 | r3 = r6; \ |
266 | r4 = 8; \ |
267 | call %[bpf_skb_load_bytes]; \ |
268 | r0 = *(u64*)(r6 + 0); \ |
269 | exit; \ |
270 | " : |
271 | : __imm(bpf_skb_load_bytes) |
272 | : __clobber_all); |
273 | } |
274 | |
275 | SEC("tc" ) |
276 | __description("raw_stack: skb_load_bytes, invalid access 3" ) |
277 | __failure __msg("R4 min value is negative" ) |
278 | __naked void load_bytes_invalid_access_3(void) |
279 | { |
280 | asm volatile (" \ |
281 | r2 = 4; \ |
282 | r6 = r10; \ |
283 | r6 += 0xffffffff; \ |
284 | r3 = r6; \ |
285 | r4 = 0xffffffff; \ |
286 | call %[bpf_skb_load_bytes]; \ |
287 | r0 = *(u64*)(r6 + 0); \ |
288 | exit; \ |
289 | " : |
290 | : __imm(bpf_skb_load_bytes) |
291 | : __clobber_all); |
292 | } |
293 | |
294 | SEC("tc" ) |
295 | __description("raw_stack: skb_load_bytes, invalid access 4" ) |
296 | __failure |
297 | __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'" ) |
298 | __naked void load_bytes_invalid_access_4(void) |
299 | { |
300 | asm volatile (" \ |
301 | r2 = 4; \ |
302 | r6 = r10; \ |
303 | r6 += -1; \ |
304 | r3 = r6; \ |
305 | r4 = 0x7fffffff; \ |
306 | call %[bpf_skb_load_bytes]; \ |
307 | r0 = *(u64*)(r6 + 0); \ |
308 | exit; \ |
309 | " : |
310 | : __imm(bpf_skb_load_bytes) |
311 | : __clobber_all); |
312 | } |
313 | |
314 | SEC("tc" ) |
315 | __description("raw_stack: skb_load_bytes, invalid access 5" ) |
316 | __failure |
317 | __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'" ) |
318 | __naked void load_bytes_invalid_access_5(void) |
319 | { |
320 | asm volatile (" \ |
321 | r2 = 4; \ |
322 | r6 = r10; \ |
323 | r6 += -512; \ |
324 | r3 = r6; \ |
325 | r4 = 0x7fffffff; \ |
326 | call %[bpf_skb_load_bytes]; \ |
327 | r0 = *(u64*)(r6 + 0); \ |
328 | exit; \ |
329 | " : |
330 | : __imm(bpf_skb_load_bytes) |
331 | : __clobber_all); |
332 | } |
333 | |
334 | SEC("tc" ) |
335 | __description("raw_stack: skb_load_bytes, invalid access 6" ) |
336 | __failure __msg("invalid zero-sized read" ) |
337 | __naked void load_bytes_invalid_access_6(void) |
338 | { |
339 | asm volatile (" \ |
340 | r2 = 4; \ |
341 | r6 = r10; \ |
342 | r6 += -512; \ |
343 | r3 = r6; \ |
344 | r4 = 0; \ |
345 | call %[bpf_skb_load_bytes]; \ |
346 | r0 = *(u64*)(r6 + 0); \ |
347 | exit; \ |
348 | " : |
349 | : __imm(bpf_skb_load_bytes) |
350 | : __clobber_all); |
351 | } |
352 | |
353 | SEC("tc" ) |
354 | __description("raw_stack: skb_load_bytes, large access" ) |
355 | __success __retval(0) |
356 | __naked void skb_load_bytes_large_access(void) |
357 | { |
358 | asm volatile (" \ |
359 | r2 = 4; \ |
360 | r6 = r10; \ |
361 | r6 += -512; \ |
362 | r3 = r6; \ |
363 | r4 = 512; \ |
364 | call %[bpf_skb_load_bytes]; \ |
365 | r0 = *(u64*)(r6 + 0); \ |
366 | exit; \ |
367 | " : |
368 | : __imm(bpf_skb_load_bytes) |
369 | : __clobber_all); |
370 | } |
371 | |
372 | char _license[] SEC("license" ) = "GPL" ; |
373 | |