1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <bpf/bpf_helpers.h> |
6 | #include "bpf_misc.h" |
7 | #include <../../../tools/include/linux/filter.h> |
8 | |
9 | struct { |
10 | __uint(type, BPF_MAP_TYPE_RINGBUF); |
11 | __uint(max_entries, 4096); |
12 | } map_ringbuf SEC(".maps" ); |
13 | |
14 | SEC("socket" ) |
15 | __description("check valid spill/fill" ) |
16 | __success __failure_unpriv __msg_unpriv("R0 leaks addr" ) |
17 | __retval(POINTER_VALUE) |
18 | __naked void check_valid_spill_fill(void) |
19 | { |
20 | asm volatile (" \ |
21 | /* spill R1(ctx) into stack */ \ |
22 | *(u64*)(r10 - 8) = r1; \ |
23 | /* fill it back into R2 */ \ |
24 | r2 = *(u64*)(r10 - 8); \ |
25 | /* should be able to access R0 = *(R2 + 8) */ \ |
26 | /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\ |
27 | r0 = r2; \ |
28 | exit; \ |
29 | " ::: __clobber_all); |
30 | } |
31 | |
32 | SEC("socket" ) |
33 | __description("check valid spill/fill, skb mark" ) |
34 | __success __success_unpriv __retval(0) |
35 | __naked void valid_spill_fill_skb_mark(void) |
36 | { |
37 | asm volatile (" \ |
38 | r6 = r1; \ |
39 | *(u64*)(r10 - 8) = r6; \ |
40 | r0 = *(u64*)(r10 - 8); \ |
41 | r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ |
42 | exit; \ |
43 | " : |
44 | : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) |
45 | : __clobber_all); |
46 | } |
47 | |
48 | SEC("socket" ) |
49 | __description("check valid spill/fill, ptr to mem" ) |
50 | __success __success_unpriv __retval(0) |
51 | __naked void spill_fill_ptr_to_mem(void) |
52 | { |
53 | asm volatile (" \ |
54 | /* reserve 8 byte ringbuf memory */ \ |
55 | r1 = 0; \ |
56 | *(u64*)(r10 - 8) = r1; \ |
57 | r1 = %[map_ringbuf] ll; \ |
58 | r2 = 8; \ |
59 | r3 = 0; \ |
60 | call %[bpf_ringbuf_reserve]; \ |
61 | /* store a pointer to the reserved memory in R6 */\ |
62 | r6 = r0; \ |
63 | /* check whether the reservation was successful */\ |
64 | if r0 == 0 goto l0_%=; \ |
65 | /* spill R6(mem) into the stack */ \ |
66 | *(u64*)(r10 - 8) = r6; \ |
67 | /* fill it back in R7 */ \ |
68 | r7 = *(u64*)(r10 - 8); \ |
69 | /* should be able to access *(R7) = 0 */ \ |
70 | r1 = 0; \ |
71 | *(u64*)(r7 + 0) = r1; \ |
72 | /* submit the reserved ringbuf memory */ \ |
73 | r1 = r7; \ |
74 | r2 = 0; \ |
75 | call %[bpf_ringbuf_submit]; \ |
76 | l0_%=: r0 = 0; \ |
77 | exit; \ |
78 | " : |
79 | : __imm(bpf_ringbuf_reserve), |
80 | __imm(bpf_ringbuf_submit), |
81 | __imm_addr(map_ringbuf) |
82 | : __clobber_all); |
83 | } |
84 | |
85 | SEC("socket" ) |
86 | __description("check with invalid reg offset 0" ) |
87 | __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited" ) |
88 | __failure_unpriv |
89 | __naked void with_invalid_reg_offset_0(void) |
90 | { |
91 | asm volatile (" \ |
92 | /* reserve 8 byte ringbuf memory */ \ |
93 | r1 = 0; \ |
94 | *(u64*)(r10 - 8) = r1; \ |
95 | r1 = %[map_ringbuf] ll; \ |
96 | r2 = 8; \ |
97 | r3 = 0; \ |
98 | call %[bpf_ringbuf_reserve]; \ |
99 | /* store a pointer to the reserved memory in R6 */\ |
100 | r6 = r0; \ |
101 | /* add invalid offset to memory or NULL */ \ |
102 | r0 += 1; \ |
103 | /* check whether the reservation was successful */\ |
104 | if r0 == 0 goto l0_%=; \ |
105 | /* should not be able to access *(R7) = 0 */ \ |
106 | r1 = 0; \ |
107 | *(u32*)(r6 + 0) = r1; \ |
108 | /* submit the reserved ringbuf memory */ \ |
109 | r1 = r6; \ |
110 | r2 = 0; \ |
111 | call %[bpf_ringbuf_submit]; \ |
112 | l0_%=: r0 = 0; \ |
113 | exit; \ |
114 | " : |
115 | : __imm(bpf_ringbuf_reserve), |
116 | __imm(bpf_ringbuf_submit), |
117 | __imm_addr(map_ringbuf) |
118 | : __clobber_all); |
119 | } |
120 | |
121 | SEC("socket" ) |
122 | __description("check corrupted spill/fill" ) |
123 | __failure __msg("R0 invalid mem access 'scalar'" ) |
124 | __msg_unpriv("attempt to corrupt spilled" ) |
125 | __flag(BPF_F_ANY_ALIGNMENT) |
126 | __naked void check_corrupted_spill_fill(void) |
127 | { |
128 | asm volatile (" \ |
129 | /* spill R1(ctx) into stack */ \ |
130 | *(u64*)(r10 - 8) = r1; \ |
131 | /* mess up with R1 pointer on stack */ \ |
132 | r0 = 0x23; \ |
133 | *(u8*)(r10 - 7) = r0; \ |
134 | /* fill back into R0 is fine for priv. \ |
135 | * R0 now becomes SCALAR_VALUE. \ |
136 | */ \ |
137 | r0 = *(u64*)(r10 - 8); \ |
138 | /* Load from R0 should fail. */ \ |
139 | r0 = *(u64*)(r0 + 8); \ |
140 | exit; \ |
141 | " ::: __clobber_all); |
142 | } |
143 | |
144 | SEC("socket" ) |
145 | __description("check corrupted spill/fill, LSB" ) |
146 | __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled" ) |
147 | __retval(POINTER_VALUE) |
148 | __naked void check_corrupted_spill_fill_lsb(void) |
149 | { |
150 | asm volatile (" \ |
151 | *(u64*)(r10 - 8) = r1; \ |
152 | r0 = 0xcafe; \ |
153 | *(u16*)(r10 - 8) = r0; \ |
154 | r0 = *(u64*)(r10 - 8); \ |
155 | exit; \ |
156 | " ::: __clobber_all); |
157 | } |
158 | |
159 | SEC("socket" ) |
160 | __description("check corrupted spill/fill, MSB" ) |
161 | __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled" ) |
162 | __retval(POINTER_VALUE) |
163 | __naked void check_corrupted_spill_fill_msb(void) |
164 | { |
165 | asm volatile (" \ |
166 | *(u64*)(r10 - 8) = r1; \ |
167 | r0 = 0x12345678; \ |
168 | *(u32*)(r10 - 4) = r0; \ |
169 | r0 = *(u64*)(r10 - 8); \ |
170 | exit; \ |
171 | " ::: __clobber_all); |
172 | } |
173 | |
174 | SEC("tc" ) |
175 | __description("Spill and refill a u32 const scalar. Offset to skb->data" ) |
176 | __success __retval(0) |
177 | __naked void scalar_offset_to_skb_data_1(void) |
178 | { |
179 | asm volatile (" \ |
180 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
181 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
182 | w4 = 20; \ |
183 | *(u32*)(r10 - 8) = r4; \ |
184 | r4 = *(u32*)(r10 - 8); \ |
185 | r0 = r2; \ |
186 | /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \ |
187 | r0 += r4; \ |
188 | /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ |
189 | if r0 > r3 goto l0_%=; \ |
190 | /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\ |
191 | r0 = *(u32*)(r2 + 0); \ |
192 | l0_%=: r0 = 0; \ |
193 | exit; \ |
194 | " : |
195 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
196 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) |
197 | : __clobber_all); |
198 | } |
199 | |
200 | SEC("socket" ) |
201 | __description("Spill a u32 const, refill from another half of the uninit u32 from the stack" ) |
202 | /* in privileged mode reads from uninitialized stack locations are permitted */ |
203 | __success __failure_unpriv |
204 | __msg_unpriv("invalid read from stack off -4+0 size 4" ) |
205 | __retval(0) |
206 | __naked void uninit_u32_from_the_stack(void) |
207 | { |
208 | asm volatile (" \ |
209 | w4 = 20; \ |
210 | *(u32*)(r10 - 8) = r4; \ |
211 | /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \ |
212 | r4 = *(u32*)(r10 - 4); \ |
213 | r0 = 0; \ |
214 | exit; \ |
215 | " ::: __clobber_all); |
216 | } |
217 | |
218 | SEC("tc" ) |
219 | __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data" ) |
220 | __success __retval(0) |
221 | __naked void u16_offset_to_skb_data(void) |
222 | { |
223 | asm volatile (" \ |
224 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
225 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
226 | w4 = 20; \ |
227 | *(u32*)(r10 - 8) = r4; \ |
228 | " |
229 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
230 | "r4 = *(u16*)(r10 - 8);" |
231 | #else |
232 | "r4 = *(u16*)(r10 - 6);" |
233 | #endif |
234 | " \ |
235 | r0 = r2; \ |
236 | /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\ |
237 | r0 += r4; \ |
238 | /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ |
239 | if r0 > r3 goto l0_%=; \ |
240 | /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ |
241 | r0 = *(u32*)(r2 + 0); \ |
242 | l0_%=: r0 = 0; \ |
243 | exit; \ |
244 | " : |
245 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
246 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) |
247 | : __clobber_all); |
248 | } |
249 | |
250 | SEC("tc" ) |
251 | __description("Spill u32 const scalars. Refill as u64. Offset to skb->data" ) |
252 | __failure __msg("math between pkt pointer and register with unbounded min value is not allowed" ) |
253 | __naked void u64_offset_to_skb_data(void) |
254 | { |
255 | asm volatile (" \ |
256 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
257 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
258 | w6 = 0; \ |
259 | w7 = 20; \ |
260 | *(u32*)(r10 - 4) = r6; \ |
261 | *(u32*)(r10 - 8) = r7; \ |
262 | r4 = *(u64*)(r10 - 8); \ |
263 | r0 = r2; \ |
264 | /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \ |
265 | r0 += r4; \ |
266 | if r0 > r3 goto l0_%=; \ |
267 | r0 = *(u32*)(r2 + 0); \ |
268 | l0_%=: r0 = 0; \ |
269 | exit; \ |
270 | " : |
271 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
272 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) |
273 | : __clobber_all); |
274 | } |
275 | |
276 | SEC("tc" ) |
277 | __description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data" ) |
278 | __failure __msg("invalid access to packet" ) |
279 | __naked void _6_offset_to_skb_data(void) |
280 | { |
281 | asm volatile (" \ |
282 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
283 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
284 | w4 = 20; \ |
285 | *(u32*)(r10 - 8) = r4; \ |
286 | " |
287 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
288 | "r4 = *(u16*)(r10 - 6);" |
289 | #else |
290 | "r4 = *(u16*)(r10 - 8);" |
291 | #endif |
292 | " \ |
293 | r0 = r2; \ |
294 | /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ |
295 | r0 += r4; \ |
296 | /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ |
297 | if r0 > r3 goto l0_%=; \ |
298 | /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ |
299 | r0 = *(u32*)(r2 + 0); \ |
300 | l0_%=: r0 = 0; \ |
301 | exit; \ |
302 | " : |
303 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
304 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) |
305 | : __clobber_all); |
306 | } |
307 | |
308 | SEC("tc" ) |
309 | __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data" ) |
310 | __failure __msg("invalid access to packet" ) |
311 | __naked void addr_offset_to_skb_data(void) |
312 | { |
313 | asm volatile (" \ |
314 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
315 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
316 | w4 = 20; \ |
317 | *(u32*)(r10 - 8) = r4; \ |
318 | *(u32*)(r10 - 4) = r4; \ |
319 | r4 = *(u32*)(r10 - 4); \ |
320 | r0 = r2; \ |
321 | /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\ |
322 | r0 += r4; \ |
323 | /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ |
324 | if r0 > r3 goto l0_%=; \ |
325 | /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ |
326 | r0 = *(u32*)(r2 + 0); \ |
327 | l0_%=: r0 = 0; \ |
328 | exit; \ |
329 | " : |
330 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
331 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) |
332 | : __clobber_all); |
333 | } |
334 | |
335 | SEC("tc" ) |
336 | __description("Spill and refill a umax=40 bounded scalar. Offset to skb->data" ) |
337 | __success __retval(0) |
338 | __naked void scalar_offset_to_skb_data_2(void) |
339 | { |
340 | asm volatile (" \ |
341 | r2 = *(u32*)(r1 + %[__sk_buff_data]); \ |
342 | r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ |
343 | r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ |
344 | if r4 <= 40 goto l0_%=; \ |
345 | r0 = 0; \ |
346 | exit; \ |
347 | l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \ |
348 | *(u32*)(r10 - 8) = r4; \ |
349 | /* r4 = (*u32 *)(r10 - 8) */ \ |
350 | r4 = *(u32*)(r10 - 8); \ |
351 | /* r2 += r4 R2=pkt R4=umax=40 */ \ |
352 | r2 += r4; \ |
353 | /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \ |
354 | r0 = r2; \ |
355 | /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \ |
356 | r2 += 20; \ |
357 | /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\ |
358 | if r2 > r3 goto l1_%=; \ |
359 | /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\ |
360 | r0 = *(u32*)(r0 + 0); \ |
361 | l1_%=: r0 = 0; \ |
362 | exit; \ |
363 | " : |
364 | : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), |
365 | __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), |
366 | __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) |
367 | : __clobber_all); |
368 | } |
369 | |
370 | SEC("tc" ) |
371 | __description("Spill a u32 scalar at fp-4 and then at fp-8" ) |
372 | __success __retval(0) |
373 | __naked void and_then_at_fp_8(void) |
374 | { |
375 | asm volatile (" \ |
376 | w4 = 4321; \ |
377 | *(u32*)(r10 - 4) = r4; \ |
378 | *(u32*)(r10 - 8) = r4; \ |
379 | r4 = *(u64*)(r10 - 8); \ |
380 | r0 = 0; \ |
381 | exit; \ |
382 | " ::: __clobber_all); |
383 | } |
384 | |
385 | SEC("xdp" ) |
386 | __description("32-bit spill of 64-bit reg should clear ID" ) |
387 | __failure __msg("math between ctx pointer and 4294967295 is not allowed" ) |
388 | __naked void spill_32bit_of_64bit_fail(void) |
389 | { |
390 | asm volatile (" \ |
391 | r6 = r1; \ |
392 | /* Roll one bit to force the verifier to track both branches. */\ |
393 | call %[bpf_get_prandom_u32]; \ |
394 | r0 &= 0x8; \ |
395 | /* Put a large number into r1. */ \ |
396 | r1 = 0xffffffff; \ |
397 | r1 <<= 32; \ |
398 | r1 += r0; \ |
399 | /* Assign an ID to r1. */ \ |
400 | r2 = r1; \ |
401 | /* 32-bit spill r1 to stack - should clear the ID! */\ |
402 | *(u32*)(r10 - 8) = r1; \ |
403 | /* 32-bit fill r2 from stack. */ \ |
404 | r2 = *(u32*)(r10 - 8); \ |
405 | /* Compare r2 with another register to trigger find_equal_scalars.\ |
406 | * Having one random bit is important here, otherwise the verifier cuts\ |
407 | * the corners. If the ID was mistakenly preserved on spill, this would\ |
408 | * cause the verifier to think that r1 is also equal to zero in one of\ |
409 | * the branches, and equal to eight on the other branch.\ |
410 | */ \ |
411 | r3 = 0; \ |
412 | if r2 != r3 goto l0_%=; \ |
413 | l0_%=: r1 >>= 32; \ |
414 | /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\ |
415 | * read will happen, because it actually contains 0xffffffff.\ |
416 | */ \ |
417 | r6 += r1; \ |
418 | r0 = *(u32*)(r6 + 0); \ |
419 | exit; \ |
420 | " : |
421 | : __imm(bpf_get_prandom_u32) |
422 | : __clobber_all); |
423 | } |
424 | |
425 | SEC("xdp" ) |
426 | __description("16-bit spill of 32-bit reg should clear ID" ) |
427 | __failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed" ) |
428 | __naked void spill_16bit_of_32bit_fail(void) |
429 | { |
430 | asm volatile (" \ |
431 | r6 = r1; \ |
432 | /* Roll one bit to force the verifier to track both branches. */\ |
433 | call %[bpf_get_prandom_u32]; \ |
434 | r0 &= 0x8; \ |
435 | /* Put a large number into r1. */ \ |
436 | w1 = 0xffff0000; \ |
437 | r1 += r0; \ |
438 | /* Assign an ID to r1. */ \ |
439 | r2 = r1; \ |
440 | /* 16-bit spill r1 to stack - should clear the ID! */\ |
441 | *(u16*)(r10 - 8) = r1; \ |
442 | /* 16-bit fill r2 from stack. */ \ |
443 | r2 = *(u16*)(r10 - 8); \ |
444 | /* Compare r2 with another register to trigger find_equal_scalars.\ |
445 | * Having one random bit is important here, otherwise the verifier cuts\ |
446 | * the corners. If the ID was mistakenly preserved on spill, this would\ |
447 | * cause the verifier to think that r1 is also equal to zero in one of\ |
448 | * the branches, and equal to eight on the other branch.\ |
449 | */ \ |
450 | r3 = 0; \ |
451 | if r2 != r3 goto l0_%=; \ |
452 | l0_%=: r1 >>= 16; \ |
453 | /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\ |
454 | * read will happen, because it actually contains 0xffff.\ |
455 | */ \ |
456 | r6 += r1; \ |
457 | r0 = *(u32*)(r6 + 0); \ |
458 | exit; \ |
459 | " : |
460 | : __imm(bpf_get_prandom_u32) |
461 | : __clobber_all); |
462 | } |
463 | |
464 | SEC("raw_tp" ) |
465 | __log_level(2) |
466 | __success |
467 | __msg("fp-8=0m??scalar()" ) |
468 | __msg("fp-16=00mm??scalar()" ) |
469 | __msg("fp-24=00mm???scalar()" ) |
470 | __naked void spill_subregs_preserve_stack_zero(void) |
471 | { |
472 | asm volatile ( |
473 | "call %[bpf_get_prandom_u32];" |
474 | |
475 | /* 32-bit subreg spill with ZERO, MISC, and INVALID */ |
476 | ".8byte %[fp1_u8_st_zero];" /* ZERO, LLVM-18+: *(u8 *)(r10 -1) = 0; */ |
477 | "*(u8 *)(r10 -2) = r0;" /* MISC */ |
478 | /* fp-3 and fp-4 stay INVALID */ |
479 | "*(u32 *)(r10 -8) = r0;" |
480 | |
481 | /* 16-bit subreg spill with ZERO, MISC, and INVALID */ |
482 | ".8byte %[fp10_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r10 -10) = 0; */ |
483 | "*(u16 *)(r10 -12) = r0;" /* MISC */ |
484 | /* fp-13 and fp-14 stay INVALID */ |
485 | "*(u16 *)(r10 -16) = r0;" |
486 | |
487 | /* 8-bit subreg spill with ZERO, MISC, and INVALID */ |
488 | ".8byte %[fp18_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r18 -10) = 0; */ |
489 | "*(u16 *)(r10 -20) = r0;" /* MISC */ |
490 | /* fp-21, fp-22, and fp-23 stay INVALID */ |
491 | "*(u8 *)(r10 -24) = r0;" |
492 | |
493 | "r0 = 0;" |
494 | "exit;" |
495 | : |
496 | : __imm(bpf_get_prandom_u32), |
497 | __imm_insn(fp1_u8_st_zero, BPF_ST_MEM(BPF_B, BPF_REG_FP, -1, 0)), |
498 | __imm_insn(fp10_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -10, 0)), |
499 | __imm_insn(fp18_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -18, 0)) |
500 | : __clobber_all); |
501 | } |
502 | |
503 | char single_byte_buf[1] SEC(".data.single_byte_buf" ); |
504 | |
505 | SEC("raw_tp" ) |
506 | __log_level(2) |
507 | __success |
508 | /* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */ |
509 | __msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0" ) |
510 | /* but fp-16 is spilled IMPRECISE zero const reg */ |
511 | __msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0" ) |
512 | /* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register |
513 | * precise immediately; if necessary, it will be marked precise later |
514 | */ |
515 | __msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0" ) |
516 | /* similarly, when R2 is assigned from spilled register, it is initially |
517 | * imprecise, but will be marked precise later once it is used in precise context |
518 | */ |
519 | __msg("10: (71) r2 = *(u8 *)(r10 -9) ; R2_w=0 R10=fp0 fp-16_w=0" ) |
520 | __msg("11: (0f) r1 += r2" ) |
521 | __msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1" ) |
522 | __msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)" ) |
523 | __msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6" ) |
524 | __msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2" ) |
525 | __msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2" ) |
526 | __msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)" ) |
527 | __msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6" ) |
528 | __msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0" ) |
529 | __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0" ) |
530 | __naked void partial_stack_load_preserves_zeros(void) |
531 | { |
532 | asm volatile ( |
533 | /* fp-8 is value zero (represented by a zero value fake reg) */ |
534 | ".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */ |
535 | |
536 | /* fp-16 is const zero register */ |
537 | "r0 = 0;" |
538 | "*(u64 *)(r10 -16) = r0;" |
539 | |
540 | /* load single U8 from non-aligned spilled value zero slot */ |
541 | "r1 = %[single_byte_buf];" |
542 | "r2 = *(u8 *)(r10 -1);" |
543 | "r1 += r2;" |
544 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
545 | |
546 | /* load single U8 from non-aligned ZERO REG slot */ |
547 | "r1 = %[single_byte_buf];" |
548 | "r2 = *(u8 *)(r10 -9);" |
549 | "r1 += r2;" |
550 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
551 | |
552 | /* load single U16 from non-aligned spilled value zero slot */ |
553 | "r1 = %[single_byte_buf];" |
554 | "r2 = *(u16 *)(r10 -2);" |
555 | "r1 += r2;" |
556 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
557 | |
558 | /* load single U16 from non-aligned ZERO REG slot */ |
559 | "r1 = %[single_byte_buf];" |
560 | "r2 = *(u16 *)(r10 -10);" |
561 | "r1 += r2;" |
562 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
563 | |
564 | /* load single U32 from non-aligned spilled value zero slot */ |
565 | "r1 = %[single_byte_buf];" |
566 | "r2 = *(u32 *)(r10 -4);" |
567 | "r1 += r2;" |
568 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
569 | |
570 | /* load single U32 from non-aligned ZERO REG slot */ |
571 | "r1 = %[single_byte_buf];" |
572 | "r2 = *(u32 *)(r10 -12);" |
573 | "r1 += r2;" |
574 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
575 | |
576 | /* for completeness, load U64 from STACK_ZERO slot */ |
577 | "r1 = %[single_byte_buf];" |
578 | "r2 = *(u64 *)(r10 -8);" |
579 | "r1 += r2;" |
580 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
581 | |
582 | /* for completeness, load U64 from ZERO REG slot */ |
583 | "r1 = %[single_byte_buf];" |
584 | "r2 = *(u64 *)(r10 -16);" |
585 | "r1 += r2;" |
586 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
587 | |
588 | "r0 = 0;" |
589 | "exit;" |
590 | : |
591 | : __imm_ptr(single_byte_buf), |
592 | __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0)) |
593 | : __clobber_common); |
594 | } |
595 | |
596 | SEC("raw_tp" ) |
597 | __log_level(2) |
598 | __success |
599 | /* fp-4 is STACK_ZERO */ |
600 | __msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????" ) |
601 | __msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????" ) |
602 | __msg("5: (0f) r1 += r2" ) |
603 | __msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1" ) |
604 | __msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)" ) |
605 | __naked void partial_stack_load_preserves_partial_zeros(void) |
606 | { |
607 | asm volatile ( |
608 | /* fp-4 is value zero */ |
609 | ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */ |
610 | |
611 | /* load single U8 from non-aligned stack zero slot */ |
612 | "r1 = %[single_byte_buf];" |
613 | "r2 = *(u8 *)(r10 -1);" |
614 | "r1 += r2;" |
615 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
616 | |
617 | /* load single U16 from non-aligned stack zero slot */ |
618 | "r1 = %[single_byte_buf];" |
619 | "r2 = *(u16 *)(r10 -2);" |
620 | "r1 += r2;" |
621 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
622 | |
623 | /* load single U32 from non-aligned stack zero slot */ |
624 | "r1 = %[single_byte_buf];" |
625 | "r2 = *(u32 *)(r10 -4);" |
626 | "r1 += r2;" |
627 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
628 | |
629 | "r0 = 0;" |
630 | "exit;" |
631 | : |
632 | : __imm_ptr(single_byte_buf), |
633 | __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0)) |
634 | : __clobber_common); |
635 | } |
636 | |
637 | char two_byte_buf[2] SEC(".data.two_byte_buf" ); |
638 | |
639 | SEC("raw_tp" ) |
640 | __log_level(2) __flag(BPF_F_TEST_STATE_FREQ) |
641 | __success |
642 | /* make sure fp-8 is IMPRECISE fake register spill */ |
643 | __msg("3: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8_w=1" ) |
644 | /* and fp-16 is spilled IMPRECISE const reg */ |
645 | __msg("5: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16_w=1" ) |
646 | /* validate load from fp-8, which was initialized using BPF_ST_MEM */ |
647 | __msg("8: (79) r2 = *(u64 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=1" ) |
648 | __msg("9: (0f) r1 += r2" ) |
649 | __msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1" ) |
650 | __msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)" ) |
651 | __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6" ) |
652 | /* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */ |
653 | __msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_w=1" ) |
654 | __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" ) |
655 | __msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0" ) |
656 | __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0" ) |
657 | __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1" ) |
658 | __msg("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1" ) |
659 | __msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" ) |
660 | /* validate load from fp-16, which was initialized using BPF_STX_MEM */ |
661 | __msg("12: (79) r2 = *(u64 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=1" ) |
662 | __msg("13: (0f) r1 += r2" ) |
663 | __msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1" ) |
664 | __msg("mark_precise: frame0: regs=r2 stack= before 12: (79) r2 = *(u64 *)(r10 -16)" ) |
665 | __msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6" ) |
666 | __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2" ) |
667 | __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2" ) |
668 | __msg("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)" ) |
669 | __msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6" ) |
670 | /* now both fp-8 and fp-16 are precise, very good */ |
671 | __msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_rw=P1" ) |
672 | __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" ) |
673 | __msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0" ) |
674 | __msg("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0" ) |
675 | __msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1" ) |
676 | __msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" ) |
677 | __naked void stack_load_preserves_const_precision(void) |
678 | { |
679 | asm volatile ( |
680 | /* establish checkpoint with state that has no stack slots; |
681 | * if we bubble up to this state without finding desired stack |
682 | * slot, then it's a bug and should be caught |
683 | */ |
684 | "goto +0;" |
685 | |
686 | /* fp-8 is const 1 *fake* register */ |
687 | ".8byte %[fp8_st_one];" /* LLVM-18+: *(u64 *)(r10 -8) = 1; */ |
688 | |
689 | /* fp-16 is const 1 register */ |
690 | "r0 = 1;" |
691 | "*(u64 *)(r10 -16) = r0;" |
692 | |
693 | /* force checkpoint to check precision marks preserved in parent states */ |
694 | "goto +0;" |
695 | |
696 | /* load single U64 from aligned FAKE_REG=1 slot */ |
697 | "r1 = %[two_byte_buf];" |
698 | "r2 = *(u64 *)(r10 -8);" |
699 | "r1 += r2;" |
700 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
701 | |
702 | /* load single U64 from aligned REG=1 slot */ |
703 | "r1 = %[two_byte_buf];" |
704 | "r2 = *(u64 *)(r10 -16);" |
705 | "r1 += r2;" |
706 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
707 | |
708 | "r0 = 0;" |
709 | "exit;" |
710 | : |
711 | : __imm_ptr(two_byte_buf), |
712 | __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 1)) |
713 | : __clobber_common); |
714 | } |
715 | |
716 | SEC("raw_tp" ) |
717 | __log_level(2) __flag(BPF_F_TEST_STATE_FREQ) |
718 | __success |
719 | /* make sure fp-8 is 32-bit FAKE subregister spill */ |
720 | __msg("3: (62) *(u32 *)(r10 -8) = 1 ; R10=fp0 fp-8=????1" ) |
721 | /* but fp-16 is spilled IMPRECISE zero const reg */ |
722 | __msg("5: (63) *(u32 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16=????1" ) |
723 | /* validate load from fp-8, which was initialized using BPF_ST_MEM */ |
724 | __msg("8: (61) r2 = *(u32 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=????1" ) |
725 | __msg("9: (0f) r1 += r2" ) |
726 | __msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1" ) |
727 | __msg("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)" ) |
728 | __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6" ) |
729 | __msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16=????1" ) |
730 | __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" ) |
731 | __msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0" ) |
732 | __msg("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0" ) |
733 | __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1" ) |
734 | __msg("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1" ) |
735 | __msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" ) |
736 | /* validate load from fp-16, which was initialized using BPF_STX_MEM */ |
737 | __msg("12: (61) r2 = *(u32 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=????1" ) |
738 | __msg("13: (0f) r1 += r2" ) |
739 | __msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1" ) |
740 | __msg("mark_precise: frame0: regs=r2 stack= before 12: (61) r2 = *(u32 *)(r10 -16)" ) |
741 | __msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6" ) |
742 | __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2" ) |
743 | __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2" ) |
744 | __msg("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)" ) |
745 | __msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6" ) |
746 | __msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16_r=????P1" ) |
747 | __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" ) |
748 | __msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0" ) |
749 | __msg("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0" ) |
750 | __msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1" ) |
751 | __msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" ) |
752 | __naked void stack_load_preserves_const_precision_subreg(void) |
753 | { |
754 | asm volatile ( |
755 | /* establish checkpoint with state that has no stack slots; |
756 | * if we bubble up to this state without finding desired stack |
757 | * slot, then it's a bug and should be caught |
758 | */ |
759 | "goto +0;" |
760 | |
761 | /* fp-8 is const 1 *fake* SUB-register */ |
762 | ".8byte %[fp8_st_one];" /* LLVM-18+: *(u32 *)(r10 -8) = 1; */ |
763 | |
764 | /* fp-16 is const 1 SUB-register */ |
765 | "r0 = 1;" |
766 | "*(u32 *)(r10 -16) = r0;" |
767 | |
768 | /* force checkpoint to check precision marks preserved in parent states */ |
769 | "goto +0;" |
770 | |
771 | /* load single U32 from aligned FAKE_REG=1 slot */ |
772 | "r1 = %[two_byte_buf];" |
773 | "r2 = *(u32 *)(r10 -8);" |
774 | "r1 += r2;" |
775 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
776 | |
777 | /* load single U32 from aligned REG=1 slot */ |
778 | "r1 = %[two_byte_buf];" |
779 | "r2 = *(u32 *)(r10 -16);" |
780 | "r1 += r2;" |
781 | "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ |
782 | |
783 | "r0 = 0;" |
784 | "exit;" |
785 | : |
786 | : __imm_ptr(two_byte_buf), |
787 | __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_W, BPF_REG_FP, -8, 1)) /* 32-bit spill */ |
788 | : __clobber_common); |
789 | } |
790 | |
791 | SEC("xdp" ) |
792 | __description("32-bit spilled reg range should be tracked" ) |
793 | __success __retval(0) |
794 | __naked void spill_32bit_range_track(void) |
795 | { |
796 | asm volatile(" \ |
797 | call %[bpf_ktime_get_ns]; \ |
798 | /* Make r0 bounded. */ \ |
799 | r0 &= 65535; \ |
800 | /* Assign an ID to r0. */ \ |
801 | r1 = r0; \ |
802 | /* 32-bit spill r0 to stack. */ \ |
803 | *(u32*)(r10 - 8) = r0; \ |
804 | /* Boundary check on r0. */ \ |
805 | if r0 < 1 goto l0_%=; \ |
806 | /* 32-bit fill r1 from stack. */ \ |
807 | r1 = *(u32*)(r10 - 8); \ |
808 | /* r1 == r0 => r1 >= 1 always. */ \ |
809 | if r1 >= 1 goto l0_%=; \ |
810 | /* Dead branch: the verifier should prune it. \ |
811 | * Do an invalid memory access if the verifier \ |
812 | * follows it. \ |
813 | */ \ |
814 | r0 = *(u64*)(r9 + 0); \ |
815 | l0_%=: r0 = 0; \ |
816 | exit; \ |
817 | " : |
818 | : __imm(bpf_ktime_get_ns) |
819 | : __clobber_all); |
820 | } |
821 | |
822 | SEC("xdp" ) |
823 | __description("64-bit spill of 64-bit reg should assign ID" ) |
824 | __success __retval(0) |
825 | __naked void spill_64bit_of_64bit_ok(void) |
826 | { |
827 | asm volatile (" \ |
828 | /* Roll one bit to make the register inexact. */\ |
829 | call %[bpf_get_prandom_u32]; \ |
830 | r0 &= 0x80000000; \ |
831 | r0 <<= 32; \ |
832 | /* 64-bit spill r0 to stack - should assign an ID. */\ |
833 | *(u64*)(r10 - 8) = r0; \ |
834 | /* 64-bit fill r1 from stack - should preserve the ID. */\ |
835 | r1 = *(u64*)(r10 - 8); \ |
836 | /* Compare r1 with another register to trigger find_equal_scalars.\ |
837 | * Having one random bit is important here, otherwise the verifier cuts\ |
838 | * the corners. \ |
839 | */ \ |
840 | r2 = 0; \ |
841 | if r1 != r2 goto l0_%=; \ |
842 | /* The result of this comparison is predefined. */\ |
843 | if r0 == r2 goto l0_%=; \ |
844 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
845 | * access if the verifier follows it. \ |
846 | */ \ |
847 | r0 = *(u64*)(r9 + 0); \ |
848 | exit; \ |
849 | l0_%=: r0 = 0; \ |
850 | exit; \ |
851 | " : |
852 | : __imm(bpf_get_prandom_u32) |
853 | : __clobber_all); |
854 | } |
855 | |
856 | SEC("xdp" ) |
857 | __description("32-bit spill of 32-bit reg should assign ID" ) |
858 | __success __retval(0) |
859 | __naked void spill_32bit_of_32bit_ok(void) |
860 | { |
861 | asm volatile (" \ |
862 | /* Roll one bit to make the register inexact. */\ |
863 | call %[bpf_get_prandom_u32]; \ |
864 | w0 &= 0x80000000; \ |
865 | /* 32-bit spill r0 to stack - should assign an ID. */\ |
866 | *(u32*)(r10 - 8) = r0; \ |
867 | /* 32-bit fill r1 from stack - should preserve the ID. */\ |
868 | r1 = *(u32*)(r10 - 8); \ |
869 | /* Compare r1 with another register to trigger find_equal_scalars.\ |
870 | * Having one random bit is important here, otherwise the verifier cuts\ |
871 | * the corners. \ |
872 | */ \ |
873 | r2 = 0; \ |
874 | if r1 != r2 goto l0_%=; \ |
875 | /* The result of this comparison is predefined. */\ |
876 | if r0 == r2 goto l0_%=; \ |
877 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
878 | * access if the verifier follows it. \ |
879 | */ \ |
880 | r0 = *(u64*)(r9 + 0); \ |
881 | exit; \ |
882 | l0_%=: r0 = 0; \ |
883 | exit; \ |
884 | " : |
885 | : __imm(bpf_get_prandom_u32) |
886 | : __clobber_all); |
887 | } |
888 | |
889 | SEC("xdp" ) |
890 | __description("16-bit spill of 16-bit reg should assign ID" ) |
891 | __success __retval(0) |
892 | __naked void spill_16bit_of_16bit_ok(void) |
893 | { |
894 | asm volatile (" \ |
895 | /* Roll one bit to make the register inexact. */\ |
896 | call %[bpf_get_prandom_u32]; \ |
897 | r0 &= 0x8000; \ |
898 | /* 16-bit spill r0 to stack - should assign an ID. */\ |
899 | *(u16*)(r10 - 8) = r0; \ |
900 | /* 16-bit fill r1 from stack - should preserve the ID. */\ |
901 | r1 = *(u16*)(r10 - 8); \ |
902 | /* Compare r1 with another register to trigger find_equal_scalars.\ |
903 | * Having one random bit is important here, otherwise the verifier cuts\ |
904 | * the corners. \ |
905 | */ \ |
906 | r2 = 0; \ |
907 | if r1 != r2 goto l0_%=; \ |
908 | /* The result of this comparison is predefined. */\ |
909 | if r0 == r2 goto l0_%=; \ |
910 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
911 | * access if the verifier follows it. \ |
912 | */ \ |
913 | r0 = *(u64*)(r9 + 0); \ |
914 | exit; \ |
915 | l0_%=: r0 = 0; \ |
916 | exit; \ |
917 | " : |
918 | : __imm(bpf_get_prandom_u32) |
919 | : __clobber_all); |
920 | } |
921 | |
922 | SEC("xdp" ) |
923 | __description("8-bit spill of 8-bit reg should assign ID" ) |
924 | __success __retval(0) |
925 | __naked void spill_8bit_of_8bit_ok(void) |
926 | { |
927 | asm volatile (" \ |
928 | /* Roll one bit to make the register inexact. */\ |
929 | call %[bpf_get_prandom_u32]; \ |
930 | r0 &= 0x80; \ |
931 | /* 8-bit spill r0 to stack - should assign an ID. */\ |
932 | *(u8*)(r10 - 8) = r0; \ |
933 | /* 8-bit fill r1 from stack - should preserve the ID. */\ |
934 | r1 = *(u8*)(r10 - 8); \ |
935 | /* Compare r1 with another register to trigger find_equal_scalars.\ |
936 | * Having one random bit is important here, otherwise the verifier cuts\ |
937 | * the corners. \ |
938 | */ \ |
939 | r2 = 0; \ |
940 | if r1 != r2 goto l0_%=; \ |
941 | /* The result of this comparison is predefined. */\ |
942 | if r0 == r2 goto l0_%=; \ |
943 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
944 | * access if the verifier follows it. \ |
945 | */ \ |
946 | r0 = *(u64*)(r9 + 0); \ |
947 | exit; \ |
948 | l0_%=: r0 = 0; \ |
949 | exit; \ |
950 | " : |
951 | : __imm(bpf_get_prandom_u32) |
952 | : __clobber_all); |
953 | } |
954 | |
955 | SEC("xdp" ) |
956 | __description("spill unbounded reg, then range check src" ) |
957 | __success __retval(0) |
958 | __naked void spill_unbounded(void) |
959 | { |
960 | asm volatile (" \ |
961 | /* Produce an unbounded scalar. */ \ |
962 | call %[bpf_get_prandom_u32]; \ |
963 | /* Spill r0 to stack. */ \ |
964 | *(u64*)(r10 - 8) = r0; \ |
965 | /* Boundary check on r0. */ \ |
966 | if r0 > 16 goto l0_%=; \ |
967 | /* Fill r0 from stack. */ \ |
968 | r0 = *(u64*)(r10 - 8); \ |
969 | /* Boundary check on r0 with predetermined result. */\ |
970 | if r0 <= 16 goto l0_%=; \ |
971 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
972 | * access if the verifier follows it. \ |
973 | */ \ |
974 | r0 = *(u64*)(r9 + 0); \ |
975 | l0_%=: r0 = 0; \ |
976 | exit; \ |
977 | " : |
978 | : __imm(bpf_get_prandom_u32) |
979 | : __clobber_all); |
980 | } |
981 | |
982 | SEC("xdp" ) |
983 | __description("32-bit fill after 64-bit spill" ) |
984 | __success __retval(0) |
985 | __naked void fill_32bit_after_spill_64bit(void) |
986 | { |
987 | asm volatile(" \ |
988 | /* Randomize the upper 32 bits. */ \ |
989 | call %[bpf_get_prandom_u32]; \ |
990 | r0 <<= 32; \ |
991 | /* 64-bit spill r0 to stack. */ \ |
992 | *(u64*)(r10 - 8) = r0; \ |
993 | /* 32-bit fill r0 from stack. */ \ |
994 | " |
995 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
996 | "r0 = *(u32*)(r10 - 8);" |
997 | #else |
998 | "r0 = *(u32*)(r10 - 4);" |
999 | #endif |
1000 | " \ |
1001 | /* Boundary check on r0 with predetermined result. */\ |
1002 | if r0 == 0 goto l0_%=; \ |
1003 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
1004 | * access if the verifier follows it. \ |
1005 | */ \ |
1006 | r0 = *(u64*)(r9 + 0); \ |
1007 | l0_%=: exit; \ |
1008 | " : |
1009 | : __imm(bpf_get_prandom_u32) |
1010 | : __clobber_all); |
1011 | } |
1012 | |
1013 | SEC("xdp" ) |
1014 | __description("32-bit fill after 64-bit spill of 32-bit value should preserve ID" ) |
1015 | __success __retval(0) |
1016 | __naked void fill_32bit_after_spill_64bit_preserve_id(void) |
1017 | { |
1018 | asm volatile (" \ |
1019 | /* Randomize the lower 32 bits. */ \ |
1020 | call %[bpf_get_prandom_u32]; \ |
1021 | w0 &= 0xffffffff; \ |
1022 | /* 64-bit spill r0 to stack - should assign an ID. */\ |
1023 | *(u64*)(r10 - 8) = r0; \ |
1024 | /* 32-bit fill r1 from stack - should preserve the ID. */\ |
1025 | " |
1026 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
1027 | "r1 = *(u32*)(r10 - 8);" |
1028 | #else |
1029 | "r1 = *(u32*)(r10 - 4);" |
1030 | #endif |
1031 | " \ |
1032 | /* Compare r1 with another register to trigger find_equal_scalars. */\ |
1033 | r2 = 0; \ |
1034 | if r1 != r2 goto l0_%=; \ |
1035 | /* The result of this comparison is predefined. */\ |
1036 | if r0 == r2 goto l0_%=; \ |
1037 | /* Dead branch: the verifier should prune it. Do an invalid memory\ |
1038 | * access if the verifier follows it. \ |
1039 | */ \ |
1040 | r0 = *(u64*)(r9 + 0); \ |
1041 | exit; \ |
1042 | l0_%=: r0 = 0; \ |
1043 | exit; \ |
1044 | " : |
1045 | : __imm(bpf_get_prandom_u32) |
1046 | : __clobber_all); |
1047 | } |
1048 | |
1049 | SEC("xdp" ) |
1050 | __description("32-bit fill after 64-bit spill should clear ID" ) |
1051 | __failure __msg("math between ctx pointer and 4294967295 is not allowed" ) |
1052 | __naked void fill_32bit_after_spill_64bit_clear_id(void) |
1053 | { |
1054 | asm volatile (" \ |
1055 | r6 = r1; \ |
1056 | /* Roll one bit to force the verifier to track both branches. */\ |
1057 | call %[bpf_get_prandom_u32]; \ |
1058 | r0 &= 0x8; \ |
1059 | /* Put a large number into r1. */ \ |
1060 | r1 = 0xffffffff; \ |
1061 | r1 <<= 32; \ |
1062 | r1 += r0; \ |
1063 | /* 64-bit spill r1 to stack - should assign an ID. */\ |
1064 | *(u64*)(r10 - 8) = r1; \ |
1065 | /* 32-bit fill r2 from stack - should clear the ID. */\ |
1066 | " |
1067 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
1068 | "r2 = *(u32*)(r10 - 8);" |
1069 | #else |
1070 | "r2 = *(u32*)(r10 - 4);" |
1071 | #endif |
1072 | " \ |
1073 | /* Compare r2 with another register to trigger find_equal_scalars.\ |
1074 | * Having one random bit is important here, otherwise the verifier cuts\ |
1075 | * the corners. If the ID was mistakenly preserved on fill, this would\ |
1076 | * cause the verifier to think that r1 is also equal to zero in one of\ |
1077 | * the branches, and equal to eight on the other branch.\ |
1078 | */ \ |
1079 | r3 = 0; \ |
1080 | if r2 != r3 goto l0_%=; \ |
1081 | l0_%=: r1 >>= 32; \ |
1082 | /* The verifier shouldn't propagate r2's range to r1, so it should\ |
1083 | * still remember r1 = 0xffffffff and reject the below.\ |
1084 | */ \ |
1085 | r6 += r1; \ |
1086 | r0 = *(u32*)(r6 + 0); \ |
1087 | exit; \ |
1088 | " : |
1089 | : __imm(bpf_get_prandom_u32) |
1090 | : __clobber_all); |
1091 | } |
1092 | |
1093 | /* stacksafe(): check if stack spill of an imprecise scalar in old state |
1094 | * is considered equivalent to STACK_{MISC,INVALID} in cur state. |
1095 | */ |
1096 | SEC("socket" ) |
1097 | __success __log_level(2) |
1098 | __msg("8: (79) r1 = *(u64 *)(r10 -8)" ) |
1099 | __msg("8: safe" ) |
1100 | __msg("processed 11 insns" ) |
1101 | /* STACK_INVALID should prevent verifier in unpriv mode from |
1102 | * considering states equivalent and force an error on second |
1103 | * verification path (entry - label 1 - label 2). |
1104 | */ |
1105 | __failure_unpriv |
1106 | __msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)" ) |
1107 | __msg_unpriv("9: (95) exit" ) |
1108 | __msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)" ) |
1109 | __msg_unpriv("invalid read from stack off -8+2 size 8" ) |
1110 | __flag(BPF_F_TEST_STATE_FREQ) |
1111 | __naked void old_imprecise_scalar_vs_cur_stack_misc(void) |
1112 | { |
1113 | asm volatile( |
1114 | /* get a random value for branching */ |
1115 | "call %[bpf_ktime_get_ns];" |
1116 | "if r0 == 0 goto 1f;" |
1117 | /* conjure scalar at fp-8 */ |
1118 | "r0 = 42;" |
1119 | "*(u64*)(r10 - 8) = r0;" |
1120 | "goto 2f;" |
1121 | "1:" |
1122 | /* conjure STACK_{MISC,INVALID} at fp-8 */ |
1123 | "call %[bpf_ktime_get_ns];" |
1124 | "*(u16*)(r10 - 8) = r0;" |
1125 | "*(u16*)(r10 - 4) = r0;" |
1126 | "2:" |
1127 | /* read fp-8, should be considered safe on second visit */ |
1128 | "r1 = *(u64*)(r10 - 8);" |
1129 | "exit;" |
1130 | : |
1131 | : __imm(bpf_ktime_get_ns) |
1132 | : __clobber_all); |
1133 | } |
1134 | |
1135 | /* stacksafe(): check that stack spill of a precise scalar in old state |
1136 | * is not considered equivalent to STACK_MISC in cur state. |
1137 | */ |
1138 | SEC("socket" ) |
1139 | __success __log_level(2) |
1140 | /* verifier should visit 'if r1 == 0x2a ...' two times: |
1141 | * - once for path entry - label 2; |
1142 | * - once for path entry - label 1 - label 2. |
1143 | */ |
1144 | __msg("if r1 == 0x2a goto pc+0" ) |
1145 | __msg("if r1 == 0x2a goto pc+0" ) |
1146 | __msg("processed 15 insns" ) |
1147 | __flag(BPF_F_TEST_STATE_FREQ) |
1148 | __naked void old_precise_scalar_vs_cur_stack_misc(void) |
1149 | { |
1150 | asm volatile( |
1151 | /* get a random value for branching */ |
1152 | "call %[bpf_ktime_get_ns];" |
1153 | "if r0 == 0 goto 1f;" |
1154 | /* conjure scalar at fp-8 */ |
1155 | "r0 = 42;" |
1156 | "*(u64*)(r10 - 8) = r0;" |
1157 | "goto 2f;" |
1158 | "1:" |
1159 | /* conjure STACK_MISC at fp-8 */ |
1160 | "call %[bpf_ktime_get_ns];" |
1161 | "*(u64*)(r10 - 8) = r0;" |
1162 | "*(u32*)(r10 - 4) = r0;" |
1163 | "2:" |
1164 | /* read fp-8, should not be considered safe on second visit */ |
1165 | "r1 = *(u64*)(r10 - 8);" |
1166 | /* use r1 in precise context */ |
1167 | "if r1 == 42 goto +0;" |
1168 | "exit;" |
1169 | : |
1170 | : __imm(bpf_ktime_get_ns) |
1171 | : __clobber_all); |
1172 | } |
1173 | |
1174 | /* stacksafe(): check if STACK_MISC in old state is considered |
1175 | * equivalent to stack spill of a scalar in cur state. |
1176 | */ |
1177 | SEC("socket" ) |
1178 | __success __log_level(2) |
1179 | __msg("8: (79) r0 = *(u64 *)(r10 -8)" ) |
1180 | __msg("8: safe" ) |
1181 | __msg("processed 11 insns" ) |
1182 | __flag(BPF_F_TEST_STATE_FREQ) |
1183 | __naked void old_stack_misc_vs_cur_scalar(void) |
1184 | { |
1185 | asm volatile( |
1186 | /* get a random value for branching */ |
1187 | "call %[bpf_ktime_get_ns];" |
1188 | "if r0 == 0 goto 1f;" |
1189 | /* conjure STACK_{MISC,INVALID} at fp-8 */ |
1190 | "call %[bpf_ktime_get_ns];" |
1191 | "*(u16*)(r10 - 8) = r0;" |
1192 | "*(u16*)(r10 - 4) = r0;" |
1193 | "goto 2f;" |
1194 | "1:" |
1195 | /* conjure scalar at fp-8 */ |
1196 | "r0 = 42;" |
1197 | "*(u64*)(r10 - 8) = r0;" |
1198 | "2:" |
1199 | /* read fp-8, should be considered safe on second visit */ |
1200 | "r0 = *(u64*)(r10 - 8);" |
1201 | "exit;" |
1202 | : |
1203 | : __imm(bpf_ktime_get_ns) |
1204 | : __clobber_all); |
1205 | } |
1206 | |
1207 | /* stacksafe(): check that STACK_MISC in old state is not considered |
1208 | * equivalent to stack spill of a non-scalar in cur state. |
1209 | */ |
1210 | SEC("socket" ) |
1211 | __success __log_level(2) |
1212 | /* verifier should process exit instructions twice: |
1213 | * - once for path entry - label 2; |
1214 | * - once for path entry - label 1 - label 2. |
1215 | */ |
1216 | __msg("r1 = *(u64 *)(r10 -8)" ) |
1217 | __msg("exit" ) |
1218 | __msg("r1 = *(u64 *)(r10 -8)" ) |
1219 | __msg("exit" ) |
1220 | __msg("processed 11 insns" ) |
1221 | __flag(BPF_F_TEST_STATE_FREQ) |
1222 | __naked void old_stack_misc_vs_cur_ctx_ptr(void) |
1223 | { |
1224 | asm volatile( |
1225 | /* remember context pointer in r9 */ |
1226 | "r9 = r1;" |
1227 | /* get a random value for branching */ |
1228 | "call %[bpf_ktime_get_ns];" |
1229 | "if r0 == 0 goto 1f;" |
1230 | /* conjure STACK_MISC at fp-8 */ |
1231 | "call %[bpf_ktime_get_ns];" |
1232 | "*(u64*)(r10 - 8) = r0;" |
1233 | "*(u32*)(r10 - 4) = r0;" |
1234 | "goto 2f;" |
1235 | "1:" |
1236 | /* conjure context pointer in fp-8 */ |
1237 | "*(u64*)(r10 - 8) = r9;" |
1238 | "2:" |
1239 | /* read fp-8, should not be considered safe on second visit */ |
1240 | "r1 = *(u64*)(r10 - 8);" |
1241 | "exit;" |
1242 | : |
1243 | : __imm(bpf_ktime_get_ns) |
1244 | : __clobber_all); |
1245 | } |
1246 | |
1247 | char _license[] SEC("license" ) = "GPL" ; |
1248 | |