1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4#include <errno.h>
5#include <string.h>
6#include <linux/bpf.h>
7#include <bpf/bpf_helpers.h>
8#include "bpf_misc.h"
9
10#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
11
12int vals[] SEC(".data.vals") = {1, 2, 3, 4};
13
14__naked __noinline __used
15static unsigned long identity_subprog()
16{
17 /* the simplest *static* 64-bit identity function */
18 asm volatile (
19 "r0 = r1;"
20 "exit;"
21 );
22}
23
24__noinline __used
25unsigned long global_identity_subprog(__u64 x)
26{
27 /* the simplest *global* 64-bit identity function */
28 return x;
29}
30
31__naked __noinline __used
32static unsigned long callback_subprog()
33{
34 /* the simplest callback function */
35 asm volatile (
36 "r0 = 0;"
37 "exit;"
38 );
39}
40
41SEC("?raw_tp")
42__success __log_level(2)
43__msg("7: (0f) r1 += r0")
44__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
45__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
46__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
47__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
48__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
49__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
50__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
51__naked int subprog_result_precise(void)
52{
53 asm volatile (
54 "r6 = 3;"
55 /* pass r6 through r1 into subprog to get it back as r0;
56 * this whole chain will have to be marked as precise later
57 */
58 "r1 = r6;"
59 "call identity_subprog;"
60 /* now use subprog's returned value (which is a
61 * r6 -> r1 -> r0 chain), as index into vals array, forcing
62 * all of that to be known precisely
63 */
64 "r0 *= 4;"
65 "r1 = %[vals];"
66 /* here r0->r1->r6 chain is forced to be precise and has to be
67 * propagated back to the beginning, including through the
68 * subprog call
69 */
70 "r1 += r0;"
71 "r0 = *(u32 *)(r1 + 0);"
72 "exit;"
73 :
74 : __imm_ptr(vals)
75 : __clobber_common, "r6"
76 );
77}
78
79SEC("?raw_tp")
80__success __log_level(2)
81__msg("9: (0f) r1 += r0")
82__msg("mark_precise: frame0: last_idx 9 first_idx 0")
83__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
84__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
85__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
86__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
87__naked int global_subprog_result_precise(void)
88{
89 asm volatile (
90 "r6 = 3;"
91 /* pass r6 through r1 into subprog to get it back as r0;
92 * given global_identity_subprog is global, precision won't
93 * propagate all the way back to r6
94 */
95 "r1 = r6;"
96 "call global_identity_subprog;"
97 /* now use subprog's returned value (which is unknown now, so
98 * we need to clamp it), as index into vals array, forcing r0
99 * to be marked precise (with no effect on r6, though)
100 */
101 "if r0 < %[vals_arr_sz] goto 1f;"
102 "r0 = %[vals_arr_sz] - 1;"
103 "1:"
104 "r0 *= 4;"
105 "r1 = %[vals];"
106 /* here r0 is forced to be precise and has to be
107 * propagated back to the global subprog call, but it
108 * shouldn't go all the way to mark r6 as precise
109 */
110 "r1 += r0;"
111 "r0 = *(u32 *)(r1 + 0);"
112 "exit;"
113 :
114 : __imm_ptr(vals),
115 __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
116 : __clobber_common, "r6"
117 );
118}
119
120__naked __noinline __used
121static unsigned long loop_callback_bad()
122{
123 /* bpf_loop() callback that can return values outside of [0, 1] range */
124 asm volatile (
125 "call %[bpf_get_prandom_u32];"
126 "if r0 s> 1000 goto 1f;"
127 "r0 = 0;"
128 "1:"
129 "goto +0;" /* checkpoint */
130 /* bpf_loop() expects [0, 1] values, so branch above skipping
131 * r0 = 0; should lead to a failure, but if exit instruction
132 * doesn't enforce r0's precision, this callback will be
133 * successfully verified
134 */
135 "exit;"
136 :
137 : __imm(bpf_get_prandom_u32)
138 : __clobber_common
139 );
140}
141
142SEC("?raw_tp")
143__failure __log_level(2)
144__flag(BPF_F_TEST_STATE_FREQ)
145/* check that fallthrough code path marks r0 as precise */
146__msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0")
147/* check that we have branch code path doing its own validation */
148__msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001")
149/* check that branch code path marks r0 as precise, before failing */
150__msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7")
151__msg("At callback return the register R0 has smin=1001 should have been in [0, 1]")
152__naked int callback_precise_return_fail(void)
153{
154 asm volatile (
155 "r1 = 1;" /* nr_loops */
156 "r2 = %[loop_callback_bad];" /* callback_fn */
157 "r3 = 0;" /* callback_ctx */
158 "r4 = 0;" /* flags */
159 "call %[bpf_loop];"
160
161 "r0 = 0;"
162 "exit;"
163 :
164 : __imm_ptr(loop_callback_bad),
165 __imm(bpf_loop)
166 : __clobber_common
167 );
168}
169
170SEC("?raw_tp")
171__success __log_level(2)
172/* First simulated path does not include callback body,
173 * r1 and r4 are always precise for bpf_loop() calls.
174 */
175__msg("9: (85) call bpf_loop#181")
176__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
177__msg("mark_precise: frame0: parent state regs=r4 stack=:")
178__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
179__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
180__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
181__msg("mark_precise: frame0: parent state regs=r1 stack=:")
182__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
183__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
184__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
185__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
186__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
187__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
188/* r6 precision propagation */
189__msg("14: (0f) r1 += r6")
190__msg("mark_precise: frame0: last_idx 14 first_idx 9")
191__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
192__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
193__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
194__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
195__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
196/* State entering callback body popped from states stack */
197__msg("from 9 to 17: frame1:")
198__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
199__msg("17: (b7) r0 = 0")
200__msg("18: (95) exit")
201__msg("returning from callee:")
202__msg("to caller at 9:")
203__msg("frame 0: propagating r1,r4")
204__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
205__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
206__msg("from 18 to 9: safe")
207__naked int callback_result_precise(void)
208{
209 asm volatile (
210 "r6 = 3;"
211
212 /* call subprog and use result; r0 shouldn't propagate back to
213 * callback_subprog
214 */
215 "r1 = r6;" /* nr_loops */
216 "r2 = %[callback_subprog];" /* callback_fn */
217 "r3 = 0;" /* callback_ctx */
218 "r4 = 0;" /* flags */
219 "call %[bpf_loop];"
220
221 "r6 = r0;"
222 "if r6 > 3 goto 1f;"
223 "r6 *= 4;"
224 "r1 = %[vals];"
225 /* here r6 is forced to be precise and has to be propagated
226 * back to the bpf_loop() call, but not beyond
227 */
228 "r1 += r6;"
229 "r0 = *(u32 *)(r1 + 0);"
230 "1:"
231 "exit;"
232 :
233 : __imm_ptr(vals),
234 __imm_ptr(callback_subprog),
235 __imm(bpf_loop)
236 : __clobber_common, "r6"
237 );
238}
239
240SEC("?raw_tp")
241__success __log_level(2)
242__msg("7: (0f) r1 += r6")
243__msg("mark_precise: frame0: last_idx 7 first_idx 0")
244__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
245__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
246__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
247__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
248__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
249__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
250__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
251__naked int parent_callee_saved_reg_precise(void)
252{
253 asm volatile (
254 "r6 = 3;"
255
256 /* call subprog and ignore result; we need this call only to
257 * complicate jump history
258 */
259 "r1 = 0;"
260 "call identity_subprog;"
261
262 "r6 *= 4;"
263 "r1 = %[vals];"
264 /* here r6 is forced to be precise and has to be propagated
265 * back to the beginning, handling (and ignoring) subprog call
266 */
267 "r1 += r6;"
268 "r0 = *(u32 *)(r1 + 0);"
269 "exit;"
270 :
271 : __imm_ptr(vals)
272 : __clobber_common, "r6"
273 );
274}
275
276SEC("?raw_tp")
277__success __log_level(2)
278__msg("7: (0f) r1 += r6")
279__msg("mark_precise: frame0: last_idx 7 first_idx 0")
280__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
281__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
282__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
283__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
284__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
285__naked int parent_callee_saved_reg_precise_global(void)
286{
287 asm volatile (
288 "r6 = 3;"
289
290 /* call subprog and ignore result; we need this call only to
291 * complicate jump history
292 */
293 "r1 = 0;"
294 "call global_identity_subprog;"
295
296 "r6 *= 4;"
297 "r1 = %[vals];"
298 /* here r6 is forced to be precise and has to be propagated
299 * back to the beginning, handling (and ignoring) subprog call
300 */
301 "r1 += r6;"
302 "r0 = *(u32 *)(r1 + 0);"
303 "exit;"
304 :
305 : __imm_ptr(vals)
306 : __clobber_common, "r6"
307 );
308}
309
310SEC("?raw_tp")
311__success __log_level(2)
312/* First simulated path does not include callback body */
313__msg("12: (0f) r1 += r6")
314__msg("mark_precise: frame0: last_idx 12 first_idx 9")
315__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
316__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
317__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
318__msg("mark_precise: frame0: parent state regs=r6 stack=:")
319__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
320__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
321__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
322__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
323__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
324__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
325/* State entering callback body popped from states stack */
326__msg("from 9 to 15: frame1:")
327__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
328__msg("15: (b7) r0 = 0")
329__msg("16: (95) exit")
330__msg("returning from callee:")
331__msg("to caller at 9:")
332/* r1, r4 are always precise for bpf_loop(),
333 * r6 was marked before backtracking to callback body.
334 */
335__msg("frame 0: propagating r1,r4,r6")
336__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
337__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
338__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
339__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
340__msg("mark_precise: frame0: parent state regs= stack=:")
341__msg("from 16 to 9: safe")
342__naked int parent_callee_saved_reg_precise_with_callback(void)
343{
344 asm volatile (
345 "r6 = 3;"
346
347 /* call subprog and ignore result; we need this call only to
348 * complicate jump history
349 */
350 "r1 = 1;" /* nr_loops */
351 "r2 = %[callback_subprog];" /* callback_fn */
352 "r3 = 0;" /* callback_ctx */
353 "r4 = 0;" /* flags */
354 "call %[bpf_loop];"
355
356 "r6 *= 4;"
357 "r1 = %[vals];"
358 /* here r6 is forced to be precise and has to be propagated
359 * back to the beginning, handling (and ignoring) callback call
360 */
361 "r1 += r6;"
362 "r0 = *(u32 *)(r1 + 0);"
363 "exit;"
364 :
365 : __imm_ptr(vals),
366 __imm_ptr(callback_subprog),
367 __imm(bpf_loop)
368 : __clobber_common, "r6"
369 );
370}
371
372SEC("?raw_tp")
373__success __log_level(2)
374__msg("9: (0f) r1 += r6")
375__msg("mark_precise: frame0: last_idx 9 first_idx 6")
376__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
377__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
378__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
379__msg("mark_precise: frame0: parent state regs= stack=-8:")
380__msg("mark_precise: frame0: last_idx 13 first_idx 0")
381__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
382__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
383__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
384__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
385__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
386__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
387__naked int parent_stack_slot_precise(void)
388{
389 asm volatile (
390 /* spill reg */
391 "r6 = 3;"
392 "*(u64 *)(r10 - 8) = r6;"
393
394 /* call subprog and ignore result; we need this call only to
395 * complicate jump history
396 */
397 "r1 = 0;"
398 "call identity_subprog;"
399
400 /* restore reg from stack; in this case we'll be carrying
401 * stack mask when going back into subprog through jump
402 * history
403 */
404 "r6 = *(u64 *)(r10 - 8);"
405
406 "r6 *= 4;"
407 "r1 = %[vals];"
408 /* here r6 is forced to be precise and has to be propagated
409 * back to the beginning, handling (and ignoring) subprog call
410 */
411 "r1 += r6;"
412 "r0 = *(u32 *)(r1 + 0);"
413 "exit;"
414 :
415 : __imm_ptr(vals)
416 : __clobber_common, "r6"
417 );
418}
419
420SEC("?raw_tp")
421__success __log_level(2)
422__msg("9: (0f) r1 += r6")
423__msg("mark_precise: frame0: last_idx 9 first_idx 0")
424__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
425__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
426__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
427__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
428__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
429__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
430__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
431__naked int parent_stack_slot_precise_global(void)
432{
433 asm volatile (
434 /* spill reg */
435 "r6 = 3;"
436 "*(u64 *)(r10 - 8) = r6;"
437
438 /* call subprog and ignore result; we need this call only to
439 * complicate jump history
440 */
441 "r1 = 0;"
442 "call global_identity_subprog;"
443
444 /* restore reg from stack; in this case we'll be carrying
445 * stack mask when going back into subprog through jump
446 * history
447 */
448 "r6 = *(u64 *)(r10 - 8);"
449
450 "r6 *= 4;"
451 "r1 = %[vals];"
452 /* here r6 is forced to be precise and has to be propagated
453 * back to the beginning, handling (and ignoring) subprog call
454 */
455 "r1 += r6;"
456 "r0 = *(u32 *)(r1 + 0);"
457 "exit;"
458 :
459 : __imm_ptr(vals)
460 : __clobber_common, "r6"
461 );
462}
463
464SEC("?raw_tp")
465__success __log_level(2)
466/* First simulated path does not include callback body */
467__msg("14: (0f) r1 += r6")
468__msg("mark_precise: frame0: last_idx 14 first_idx 10")
469__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
470__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
471__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
472__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
473__msg("mark_precise: frame0: parent state regs= stack=-8:")
474__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
475__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
476__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
477__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
478__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
479__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
480__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
481/* State entering callback body popped from states stack */
482__msg("from 10 to 17: frame1:")
483__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
484__msg("17: (b7) r0 = 0")
485__msg("18: (95) exit")
486__msg("returning from callee:")
487__msg("to caller at 10:")
488/* r1, r4 are always precise for bpf_loop(),
489 * fp-8 was marked before backtracking to callback body.
490 */
491__msg("frame 0: propagating r1,r4,fp-8")
492__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
493__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
494__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
495__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
496__msg("mark_precise: frame0: parent state regs= stack=:")
497__msg("from 18 to 10: safe")
498__naked int parent_stack_slot_precise_with_callback(void)
499{
500 asm volatile (
501 /* spill reg */
502 "r6 = 3;"
503 "*(u64 *)(r10 - 8) = r6;"
504
505 /* ensure we have callback frame in jump history */
506 "r1 = r6;" /* nr_loops */
507 "r2 = %[callback_subprog];" /* callback_fn */
508 "r3 = 0;" /* callback_ctx */
509 "r4 = 0;" /* flags */
510 "call %[bpf_loop];"
511
512 /* restore reg from stack; in this case we'll be carrying
513 * stack mask when going back into subprog through jump
514 * history
515 */
516 "r6 = *(u64 *)(r10 - 8);"
517
518 "r6 *= 4;"
519 "r1 = %[vals];"
520 /* here r6 is forced to be precise and has to be propagated
521 * back to the beginning, handling (and ignoring) subprog call
522 */
523 "r1 += r6;"
524 "r0 = *(u32 *)(r1 + 0);"
525 "exit;"
526 :
527 : __imm_ptr(vals),
528 __imm_ptr(callback_subprog),
529 __imm(bpf_loop)
530 : __clobber_common, "r6"
531 );
532}
533
534__noinline __used
535static __u64 subprog_with_precise_arg(__u64 x)
536{
537 return vals[x]; /* x is forced to be precise */
538}
539
540SEC("?raw_tp")
541__success __log_level(2)
542__msg("8: (0f) r2 += r1")
543__msg("mark_precise: frame1: last_idx 8 first_idx 0")
544__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
545__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
546__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
547__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
548__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
549__naked int subprog_arg_precise(void)
550{
551 asm volatile (
552 "r6 = 3;"
553 "r1 = r6;"
554 /* subprog_with_precise_arg expects its argument to be
555 * precise, so r1->r6 will be marked precise from inside the
556 * subprog
557 */
558 "call subprog_with_precise_arg;"
559 "r0 += r6;"
560 "exit;"
561 :
562 :
563 : __clobber_common, "r6"
564 );
565}
566
567/* r1 is pointer to stack slot;
568 * r2 is a register to spill into that slot
569 * subprog also spills r2 into its own stack slot
570 */
571__naked __noinline __used
572static __u64 subprog_spill_reg_precise(void)
573{
574 asm volatile (
575 /* spill to parent stack */
576 "*(u64 *)(r1 + 0) = r2;"
577 /* spill to subprog stack (we use -16 offset to avoid
578 * accidental confusion with parent's -8 stack slot in
579 * verifier log output)
580 */
581 "*(u64 *)(r10 - 16) = r2;"
582 /* use both spills as return result to propagete precision everywhere */
583 "r0 = *(u64 *)(r10 - 16);"
584 "r2 = *(u64 *)(r1 + 0);"
585 "r0 += r2;"
586 "exit;"
587 );
588}
589
590SEC("?raw_tp")
591__success __log_level(2)
592__msg("10: (0f) r1 += r7")
593__msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
594__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
595__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
596__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
597__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
598__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
599__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
600__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
601__msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
602__msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
603__msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
604__msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
605__msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
606__msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
607__msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
608__msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
609__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
610__naked int subprog_spill_into_parent_stack_slot_precise(void)
611{
612 asm volatile (
613 "r6 = 1;"
614
615 /* pass pointer to stack slot and r6 to subprog;
616 * r6 will be marked precise and spilled into fp-8 slot, which
617 * also should be marked precise
618 */
619 "r1 = r10;"
620 "r1 += -8;"
621 "r2 = r6;"
622 "call subprog_spill_reg_precise;"
623
624 /* restore reg from stack; in this case we'll be carrying
625 * stack mask when going back into subprog through jump
626 * history
627 */
628 "r7 = *(u64 *)(r10 - 8);"
629
630 "r7 *= 4;"
631 "r1 = %[vals];"
632 /* here r7 is forced to be precise and has to be propagated
633 * back to the beginning, handling subprog call and logic
634 */
635 "r1 += r7;"
636 "r0 = *(u32 *)(r1 + 0);"
637 "exit;"
638 :
639 : __imm_ptr(vals)
640 : __clobber_common, "r6", "r7"
641 );
642}
643
644SEC("?raw_tp")
645__success __log_level(2)
646__msg("17: (0f) r1 += r0")
647__msg("mark_precise: frame0: last_idx 17 first_idx 0 subseq_idx -1")
648__msg("mark_precise: frame0: regs=r0 stack= before 16: (bf) r1 = r7")
649__msg("mark_precise: frame0: regs=r0 stack= before 15: (27) r0 *= 4")
650__msg("mark_precise: frame0: regs=r0 stack= before 14: (79) r0 = *(u64 *)(r10 -16)")
651__msg("mark_precise: frame0: regs= stack=-16 before 13: (7b) *(u64 *)(r7 -8) = r0")
652__msg("mark_precise: frame0: regs=r0 stack= before 12: (79) r0 = *(u64 *)(r8 +16)")
653__msg("mark_precise: frame0: regs= stack=-16 before 11: (7b) *(u64 *)(r8 +16) = r0")
654__msg("mark_precise: frame0: regs=r0 stack= before 10: (79) r0 = *(u64 *)(r7 -8)")
655__msg("mark_precise: frame0: regs= stack=-16 before 9: (7b) *(u64 *)(r10 -16) = r0")
656__msg("mark_precise: frame0: regs=r0 stack= before 8: (07) r8 += -32")
657__msg("mark_precise: frame0: regs=r0 stack= before 7: (bf) r8 = r10")
658__msg("mark_precise: frame0: regs=r0 stack= before 6: (07) r7 += -8")
659__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r7 = r10")
660__msg("mark_precise: frame0: regs=r0 stack= before 21: (95) exit")
661__msg("mark_precise: frame1: regs=r0 stack= before 20: (bf) r0 = r1")
662__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+15")
663__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
664__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
665__naked int stack_slot_aliases_precision(void)
666{
667 asm volatile (
668 "r6 = 1;"
669 /* pass r6 through r1 into subprog to get it back as r0;
670 * this whole chain will have to be marked as precise later
671 */
672 "r1 = r6;"
673 "call identity_subprog;"
674 /* let's setup two registers that are aliased to r10 */
675 "r7 = r10;"
676 "r7 += -8;" /* r7 = r10 - 8 */
677 "r8 = r10;"
678 "r8 += -32;" /* r8 = r10 - 32 */
679 /* now spill subprog's return value (a r6 -> r1 -> r0 chain)
680 * a few times through different stack pointer regs, making
681 * sure to use r10, r7, and r8 both in LDX and STX insns, and
682 * *importantly* also using a combination of const var_off and
683 * insn->off to validate that we record final stack slot
684 * correctly, instead of relying on just insn->off derivation,
685 * which is only valid for r10-based stack offset
686 */
687 "*(u64 *)(r10 - 16) = r0;"
688 "r0 = *(u64 *)(r7 - 8);" /* r7 - 8 == r10 - 16 */
689 "*(u64 *)(r8 + 16) = r0;" /* r8 + 16 = r10 - 16 */
690 "r0 = *(u64 *)(r8 + 16);"
691 "*(u64 *)(r7 - 8) = r0;"
692 "r0 = *(u64 *)(r10 - 16);"
693 /* get ready to use r0 as an index into array to force precision */
694 "r0 *= 4;"
695 "r1 = %[vals];"
696 /* here r0->r1->r6 chain is forced to be precise and has to be
697 * propagated back to the beginning, including through the
698 * subprog call and all the stack spills and loads
699 */
700 "r1 += r0;"
701 "r0 = *(u32 *)(r1 + 0);"
702 "exit;"
703 :
704 : __imm_ptr(vals)
705 : __clobber_common, "r6"
706 );
707}
708
709char _license[] SEC("license") = "GPL";
710

source code of linux/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c