1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8SEC("tc")
9__description("pkt_end - pkt_start is allowed")
10__success __retval(TEST_DATA_LEN)
11__naked void end_pkt_start_is_allowed(void)
12{
13 asm volatile (" \
14 r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \
15 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
16 r0 -= r2; \
17 exit; \
18" :
19 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
20 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
21 : __clobber_all);
22}
23
24SEC("tc")
25__description("direct packet access: test1")
26__success __retval(0)
27__naked void direct_packet_access_test1(void)
28{
29 asm volatile (" \
30 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
31 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
32 r0 = r2; \
33 r0 += 8; \
34 if r0 > r3 goto l0_%=; \
35 r0 = *(u8*)(r2 + 0); \
36l0_%=: r0 = 0; \
37 exit; \
38" :
39 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
40 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
41 : __clobber_all);
42}
43
44SEC("tc")
45__description("direct packet access: test2")
46__success __retval(0)
47__naked void direct_packet_access_test2(void)
48{
49 asm volatile (" \
50 r0 = 1; \
51 r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \
52 r3 = *(u32*)(r1 + %[__sk_buff_data]); \
53 r5 = r3; \
54 r5 += 14; \
55 if r5 > r4 goto l0_%=; \
56 r0 = *(u8*)(r3 + 7); \
57 r4 = *(u8*)(r3 + 12); \
58 r4 *= 14; \
59 r3 = *(u32*)(r1 + %[__sk_buff_data]); \
60 r3 += r4; \
61 r2 = *(u32*)(r1 + %[__sk_buff_len]); \
62 r2 <<= 49; \
63 r2 >>= 49; \
64 r3 += r2; \
65 r2 = r3; \
66 r2 += 8; \
67 r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \
68 if r2 > r1 goto l1_%=; \
69 r1 = *(u8*)(r3 + 4); \
70l1_%=: r0 = 0; \
71l0_%=: exit; \
72" :
73 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
74 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
75 __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
76 : __clobber_all);
77}
78
79SEC("socket")
80__description("direct packet access: test3")
81__failure __msg("invalid bpf_context access off=76")
82__failure_unpriv
83__naked void direct_packet_access_test3(void)
84{
85 asm volatile (" \
86 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
87 r0 = 0; \
88 exit; \
89" :
90 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
91 : __clobber_all);
92}
93
94SEC("tc")
95__description("direct packet access: test4 (write)")
96__success __retval(0)
97__naked void direct_packet_access_test4_write(void)
98{
99 asm volatile (" \
100 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
101 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
102 r0 = r2; \
103 r0 += 8; \
104 if r0 > r3 goto l0_%=; \
105 *(u8*)(r2 + 0) = r2; \
106l0_%=: r0 = 0; \
107 exit; \
108" :
109 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
110 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
111 : __clobber_all);
112}
113
114SEC("tc")
115__description("direct packet access: test5 (pkt_end >= reg, good access)")
116__success __retval(0)
117__naked void pkt_end_reg_good_access(void)
118{
119 asm volatile (" \
120 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
121 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
122 r0 = r2; \
123 r0 += 8; \
124 if r3 >= r0 goto l0_%=; \
125 r0 = 1; \
126 exit; \
127l0_%=: r0 = *(u8*)(r2 + 0); \
128 r0 = 0; \
129 exit; \
130" :
131 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
132 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
133 : __clobber_all);
134}
135
136SEC("tc")
137__description("direct packet access: test6 (pkt_end >= reg, bad access)")
138__failure __msg("invalid access to packet")
139__naked void pkt_end_reg_bad_access(void)
140{
141 asm volatile (" \
142 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
143 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
144 r0 = r2; \
145 r0 += 8; \
146 if r3 >= r0 goto l0_%=; \
147 r0 = *(u8*)(r2 + 0); \
148 r0 = 1; \
149 exit; \
150l0_%=: r0 = 0; \
151 exit; \
152" :
153 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
154 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
155 : __clobber_all);
156}
157
158SEC("tc")
159__description("direct packet access: test7 (pkt_end >= reg, both accesses)")
160__failure __msg("invalid access to packet")
161__naked void pkt_end_reg_both_accesses(void)
162{
163 asm volatile (" \
164 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
165 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
166 r0 = r2; \
167 r0 += 8; \
168 if r3 >= r0 goto l0_%=; \
169 r0 = *(u8*)(r2 + 0); \
170 r0 = 1; \
171 exit; \
172l0_%=: r0 = *(u8*)(r2 + 0); \
173 r0 = 0; \
174 exit; \
175" :
176 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
177 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
178 : __clobber_all);
179}
180
181SEC("tc")
182__description("direct packet access: test8 (double test, variant 1)")
183__success __retval(0)
184__naked void test8_double_test_variant_1(void)
185{
186 asm volatile (" \
187 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
188 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
189 r0 = r2; \
190 r0 += 8; \
191 if r3 >= r0 goto l0_%=; \
192 if r0 > r3 goto l1_%=; \
193 r0 = *(u8*)(r2 + 0); \
194l1_%=: r0 = 1; \
195 exit; \
196l0_%=: r0 = *(u8*)(r2 + 0); \
197 r0 = 0; \
198 exit; \
199" :
200 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
201 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
202 : __clobber_all);
203}
204
205SEC("tc")
206__description("direct packet access: test9 (double test, variant 2)")
207__success __retval(0)
208__naked void test9_double_test_variant_2(void)
209{
210 asm volatile (" \
211 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
212 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
213 r0 = r2; \
214 r0 += 8; \
215 if r3 >= r0 goto l0_%=; \
216 r0 = 1; \
217 exit; \
218l0_%=: if r0 > r3 goto l1_%=; \
219 r0 = *(u8*)(r2 + 0); \
220l1_%=: r0 = *(u8*)(r2 + 0); \
221 r0 = 0; \
222 exit; \
223" :
224 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
225 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
226 : __clobber_all);
227}
228
229SEC("tc")
230__description("direct packet access: test10 (write invalid)")
231__failure __msg("invalid access to packet")
232__naked void packet_access_test10_write_invalid(void)
233{
234 asm volatile (" \
235 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
236 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
237 r0 = r2; \
238 r0 += 8; \
239 if r0 > r3 goto l0_%=; \
240 r0 = 0; \
241 exit; \
242l0_%=: *(u8*)(r2 + 0) = r2; \
243 r0 = 0; \
244 exit; \
245" :
246 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
247 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
248 : __clobber_all);
249}
250
251SEC("tc")
252__description("direct packet access: test11 (shift, good access)")
253__success __retval(1)
254__naked void access_test11_shift_good_access(void)
255{
256 asm volatile (" \
257 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
258 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
259 r0 = r2; \
260 r0 += 22; \
261 if r0 > r3 goto l0_%=; \
262 r3 = 144; \
263 r5 = r3; \
264 r5 += 23; \
265 r5 >>= 3; \
266 r6 = r2; \
267 r6 += r5; \
268 r0 = 1; \
269 exit; \
270l0_%=: r0 = 0; \
271 exit; \
272" :
273 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
274 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
275 : __clobber_all);
276}
277
278SEC("tc")
279__description("direct packet access: test12 (and, good access)")
280__success __retval(1)
281__naked void access_test12_and_good_access(void)
282{
283 asm volatile (" \
284 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
285 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
286 r0 = r2; \
287 r0 += 22; \
288 if r0 > r3 goto l0_%=; \
289 r3 = 144; \
290 r5 = r3; \
291 r5 += 23; \
292 r5 &= 15; \
293 r6 = r2; \
294 r6 += r5; \
295 r0 = 1; \
296 exit; \
297l0_%=: r0 = 0; \
298 exit; \
299" :
300 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
301 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
302 : __clobber_all);
303}
304
305SEC("tc")
306__description("direct packet access: test13 (branches, good access)")
307__success __retval(1)
308__naked void access_test13_branches_good_access(void)
309{
310 asm volatile (" \
311 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
312 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
313 r0 = r2; \
314 r0 += 22; \
315 if r0 > r3 goto l0_%=; \
316 r3 = *(u32*)(r1 + %[__sk_buff_mark]); \
317 r4 = 1; \
318 if r3 > r4 goto l1_%=; \
319 r3 = 14; \
320 goto l2_%=; \
321l1_%=: r3 = 24; \
322l2_%=: r5 = r3; \
323 r5 += 23; \
324 r5 &= 15; \
325 r6 = r2; \
326 r6 += r5; \
327 r0 = 1; \
328 exit; \
329l0_%=: r0 = 0; \
330 exit; \
331" :
332 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
333 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
334 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
335 : __clobber_all);
336}
337
338SEC("tc")
339__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
340__success __retval(1)
341__naked void _0_const_imm_good_access(void)
342{
343 asm volatile (" \
344 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
345 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
346 r0 = r2; \
347 r0 += 22; \
348 if r0 > r3 goto l0_%=; \
349 r5 = 12; \
350 r5 >>= 4; \
351 r6 = r2; \
352 r6 += r5; \
353 r0 = *(u8*)(r6 + 0); \
354 r0 = 1; \
355 exit; \
356l0_%=: r0 = 0; \
357 exit; \
358" :
359 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
360 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
361 : __clobber_all);
362}
363
364SEC("tc")
365__description("direct packet access: test15 (spill with xadd)")
366__failure __msg("R2 invalid mem access 'scalar'")
367__flag(BPF_F_ANY_ALIGNMENT)
368__naked void access_test15_spill_with_xadd(void)
369{
370 asm volatile (" \
371 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
372 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
373 r0 = r2; \
374 r0 += 8; \
375 if r0 > r3 goto l0_%=; \
376 r5 = 4096; \
377 r4 = r10; \
378 r4 += -8; \
379 *(u64*)(r4 + 0) = r2; \
380 lock *(u64 *)(r4 + 0) += r5; \
381 r2 = *(u64*)(r4 + 0); \
382 *(u32*)(r2 + 0) = r5; \
383 r0 = 0; \
384l0_%=: exit; \
385" :
386 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
387 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
388 : __clobber_all);
389}
390
391SEC("tc")
392__description("direct packet access: test16 (arith on data_end)")
393__failure __msg("R3 pointer arithmetic on pkt_end")
394__naked void test16_arith_on_data_end(void)
395{
396 asm volatile (" \
397 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
398 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
399 r0 = r2; \
400 r0 += 8; \
401 r3 += 16; \
402 if r0 > r3 goto l0_%=; \
403 *(u8*)(r2 + 0) = r2; \
404l0_%=: r0 = 0; \
405 exit; \
406" :
407 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
408 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
409 : __clobber_all);
410}
411
412SEC("tc")
413__description("direct packet access: test17 (pruning, alignment)")
414__failure __msg("misaligned packet access off 2+0+15+-4 size 4")
415__flag(BPF_F_STRICT_ALIGNMENT)
416__naked void packet_access_test17_pruning_alignment(void)
417{
418 asm volatile (" \
419 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
420 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
421 r7 = *(u32*)(r1 + %[__sk_buff_mark]); \
422 r0 = r2; \
423 r0 += 14; \
424 if r7 > 1 goto l0_%=; \
425l2_%=: if r0 > r3 goto l1_%=; \
426 *(u32*)(r0 - 4) = r0; \
427l1_%=: r0 = 0; \
428 exit; \
429l0_%=: r0 += 1; \
430 goto l2_%=; \
431" :
432 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
433 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
434 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
435 : __clobber_all);
436}
437
438SEC("tc")
439__description("direct packet access: test18 (imm += pkt_ptr, 1)")
440__success __retval(0)
441__naked void test18_imm_pkt_ptr_1(void)
442{
443 asm volatile (" \
444 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
445 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
446 r0 = 8; \
447 r0 += r2; \
448 if r0 > r3 goto l0_%=; \
449 *(u8*)(r2 + 0) = r2; \
450l0_%=: r0 = 0; \
451 exit; \
452" :
453 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
454 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
455 : __clobber_all);
456}
457
458SEC("tc")
459__description("direct packet access: test19 (imm += pkt_ptr, 2)")
460__success __retval(0)
461__naked void test19_imm_pkt_ptr_2(void)
462{
463 asm volatile (" \
464 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
465 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
466 r0 = r2; \
467 r0 += 8; \
468 if r0 > r3 goto l0_%=; \
469 r4 = 4; \
470 r4 += r2; \
471 *(u8*)(r4 + 0) = r4; \
472l0_%=: r0 = 0; \
473 exit; \
474" :
475 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
476 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
477 : __clobber_all);
478}
479
480SEC("tc")
481__description("direct packet access: test20 (x += pkt_ptr, 1)")
482__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
483__naked void test20_x_pkt_ptr_1(void)
484{
485 asm volatile (" \
486 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
487 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
488 r0 = 0xffffffff; \
489 *(u64*)(r10 - 8) = r0; \
490 r0 = *(u64*)(r10 - 8); \
491 r0 &= 0x7fff; \
492 r4 = r0; \
493 r4 += r2; \
494 r5 = r4; \
495 r4 += %[__imm_0]; \
496 if r4 > r3 goto l0_%=; \
497 *(u64*)(r5 + 0) = r4; \
498l0_%=: r0 = 0; \
499 exit; \
500" :
501 : __imm_const(__imm_0, 0x7fff - 1),
502 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
503 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
504 : __clobber_all);
505}
506
507SEC("tc")
508__description("direct packet access: test21 (x += pkt_ptr, 2)")
509__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
510__naked void test21_x_pkt_ptr_2(void)
511{
512 asm volatile (" \
513 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
514 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
515 r0 = r2; \
516 r0 += 8; \
517 if r0 > r3 goto l0_%=; \
518 r4 = 0xffffffff; \
519 *(u64*)(r10 - 8) = r4; \
520 r4 = *(u64*)(r10 - 8); \
521 r4 &= 0x7fff; \
522 r4 += r2; \
523 r5 = r4; \
524 r4 += %[__imm_0]; \
525 if r4 > r3 goto l0_%=; \
526 *(u64*)(r5 + 0) = r4; \
527l0_%=: r0 = 0; \
528 exit; \
529" :
530 : __imm_const(__imm_0, 0x7fff - 1),
531 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
532 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
533 : __clobber_all);
534}
535
536SEC("tc")
537__description("direct packet access: test22 (x += pkt_ptr, 3)")
538__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
539__naked void test22_x_pkt_ptr_3(void)
540{
541 asm volatile (" \
542 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
543 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
544 r0 = r2; \
545 r0 += 8; \
546 *(u64*)(r10 - 8) = r2; \
547 *(u64*)(r10 - 16) = r3; \
548 r3 = *(u64*)(r10 - 16); \
549 if r0 > r3 goto l0_%=; \
550 r2 = *(u64*)(r10 - 8); \
551 r4 = 0xffffffff; \
552 lock *(u64 *)(r10 - 8) += r4; \
553 r4 = *(u64*)(r10 - 8); \
554 r4 >>= 49; \
555 r4 += r2; \
556 r0 = r4; \
557 r0 += 2; \
558 if r0 > r3 goto l0_%=; \
559 r2 = 1; \
560 *(u16*)(r4 + 0) = r2; \
561l0_%=: r0 = 0; \
562 exit; \
563" :
564 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
565 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
566 : __clobber_all);
567}
568
569SEC("tc")
570__description("direct packet access: test23 (x += pkt_ptr, 4)")
571__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
572__flag(BPF_F_ANY_ALIGNMENT)
573__naked void test23_x_pkt_ptr_4(void)
574{
575 asm volatile (" \
576 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
577 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
578 r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
579 *(u64*)(r10 - 8) = r0; \
580 r0 = *(u64*)(r10 - 8); \
581 r0 &= 0xffff; \
582 r4 = r0; \
583 r0 = 31; \
584 r0 += r4; \
585 r0 += r2; \
586 r5 = r0; \
587 r0 += %[__imm_0]; \
588 if r0 > r3 goto l0_%=; \
589 *(u64*)(r5 + 0) = r0; \
590l0_%=: r0 = 0; \
591 exit; \
592" :
593 : __imm_const(__imm_0, 0xffff - 1),
594 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
595 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
596 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
597 : __clobber_all);
598}
599
600SEC("tc")
601__description("direct packet access: test24 (x += pkt_ptr, 5)")
602__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
603__naked void test24_x_pkt_ptr_5(void)
604{
605 asm volatile (" \
606 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
607 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
608 r0 = 0xffffffff; \
609 *(u64*)(r10 - 8) = r0; \
610 r0 = *(u64*)(r10 - 8); \
611 r0 &= 0xff; \
612 r4 = r0; \
613 r0 = 64; \
614 r0 += r4; \
615 r0 += r2; \
616 r5 = r0; \
617 r0 += %[__imm_0]; \
618 if r0 > r3 goto l0_%=; \
619 *(u64*)(r5 + 0) = r0; \
620l0_%=: r0 = 0; \
621 exit; \
622" :
623 : __imm_const(__imm_0, 0x7fff - 1),
624 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
625 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
626 : __clobber_all);
627}
628
629SEC("tc")
630__description("direct packet access: test25 (marking on <, good access)")
631__success __retval(0)
632__naked void test25_marking_on_good_access(void)
633{
634 asm volatile (" \
635 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
636 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
637 r0 = r2; \
638 r0 += 8; \
639 if r0 < r3 goto l0_%=; \
640l1_%=: r0 = 0; \
641 exit; \
642l0_%=: r0 = *(u8*)(r2 + 0); \
643 goto l1_%=; \
644" :
645 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
646 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
647 : __clobber_all);
648}
649
650SEC("tc")
651__description("direct packet access: test26 (marking on <, bad access)")
652__failure __msg("invalid access to packet")
653__naked void test26_marking_on_bad_access(void)
654{
655 asm volatile (" \
656 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
657 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
658 r0 = r2; \
659 r0 += 8; \
660 if r0 < r3 goto l0_%=; \
661 r0 = *(u8*)(r2 + 0); \
662l1_%=: r0 = 0; \
663 exit; \
664l0_%=: goto l1_%=; \
665" :
666 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
667 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
668 : __clobber_all);
669}
670
671SEC("tc")
672__description("direct packet access: test27 (marking on <=, good access)")
673__success __retval(1)
674__naked void test27_marking_on_good_access(void)
675{
676 asm volatile (" \
677 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
678 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
679 r0 = r2; \
680 r0 += 8; \
681 if r3 <= r0 goto l0_%=; \
682 r0 = *(u8*)(r2 + 0); \
683l0_%=: r0 = 1; \
684 exit; \
685" :
686 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
687 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
688 : __clobber_all);
689}
690
691SEC("tc")
692__description("direct packet access: test28 (marking on <=, bad access)")
693__failure __msg("invalid access to packet")
694__naked void test28_marking_on_bad_access(void)
695{
696 asm volatile (" \
697 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
698 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
699 r0 = r2; \
700 r0 += 8; \
701 if r3 <= r0 goto l0_%=; \
702l1_%=: r0 = 1; \
703 exit; \
704l0_%=: r0 = *(u8*)(r2 + 0); \
705 goto l1_%=; \
706" :
707 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
708 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
709 : __clobber_all);
710}
711
712SEC("tc")
713__description("direct packet access: test29 (reg > pkt_end in subprog)")
714__success __retval(0)
715__naked void reg_pkt_end_in_subprog(void)
716{
717 asm volatile (" \
718 r6 = *(u32*)(r1 + %[__sk_buff_data]); \
719 r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \
720 r3 = r6; \
721 r3 += 8; \
722 call reg_pkt_end_in_subprog__1; \
723 if r0 == 0 goto l0_%=; \
724 r0 = *(u8*)(r6 + 0); \
725l0_%=: r0 = 0; \
726 exit; \
727" :
728 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
729 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
730 : __clobber_all);
731}
732
733static __naked __noinline __attribute__((used))
734void reg_pkt_end_in_subprog__1(void)
735{
736 asm volatile (" \
737 r0 = 0; \
738 if r3 > r2 goto l0_%=; \
739 r0 = 1; \
740l0_%=: exit; \
741" ::: __clobber_all);
742}
743
744SEC("tc")
745__description("direct packet access: test30 (check_id() in regsafe(), bad access)")
746__failure __msg("invalid access to packet, off=0 size=1, R2")
747__flag(BPF_F_TEST_STATE_FREQ)
748__naked void id_in_regsafe_bad_access(void)
749{
750 asm volatile (" \
751 /* r9 = ctx */ \
752 r9 = r1; \
753 /* r7 = ktime_get_ns() */ \
754 call %[bpf_ktime_get_ns]; \
755 r7 = r0; \
756 /* r6 = ktime_get_ns() */ \
757 call %[bpf_ktime_get_ns]; \
758 r6 = r0; \
759 /* r2 = ctx->data \
760 * r3 = ctx->data \
761 * r4 = ctx->data_end \
762 */ \
763 r2 = *(u32*)(r9 + %[__sk_buff_data]); \
764 r3 = *(u32*)(r9 + %[__sk_buff_data]); \
765 r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \
766 /* if r6 > 100 goto exit \
767 * if r7 > 100 goto exit \
768 */ \
769 if r6 > 100 goto l0_%=; \
770 if r7 > 100 goto l0_%=; \
771 /* r2 += r6 ; this forces assignment of ID to r2\
772 * r2 += 1 ; get some fixed off for r2\
773 * r3 += r7 ; this forces assignment of ID to r3\
774 * r3 += 1 ; get some fixed off for r3\
775 */ \
776 r2 += r6; \
777 r2 += 1; \
778 r3 += r7; \
779 r3 += 1; \
780 /* if r6 > r7 goto +1 ; no new information about the state is derived from\
781 * ; this check, thus produced verifier states differ\
782 * ; only in 'insn_idx' \
783 * r2 = r3 ; optionally share ID between r2 and r3\
784 */ \
785 if r6 != r7 goto l1_%=; \
786 r2 = r3; \
787l1_%=: /* if r3 > ctx->data_end goto exit */ \
788 if r3 > r4 goto l0_%=; \
789 /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
790 * ; this is not always safe\
791 */ \
792 r5 = *(u8*)(r2 - 1); \
793l0_%=: /* exit(0) */ \
794 r0 = 0; \
795 exit; \
796" :
797 : __imm(bpf_ktime_get_ns),
798 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
799 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
800 : __clobber_all);
801}
802
803char _license[] SEC("license") = "GPL";
804

source code of linux/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c