1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ |
3 | |
4 | #include <errno.h> |
5 | #include <string.h> |
6 | #include <linux/bpf.h> |
7 | #include <bpf/bpf_helpers.h> |
8 | #include "bpf_misc.h" |
9 | |
10 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) |
11 | |
12 | int vals[] SEC(".data.vals" ) = {1, 2, 3, 4}; |
13 | |
14 | __naked __noinline __used |
15 | static unsigned long identity_subprog() |
16 | { |
17 | /* the simplest *static* 64-bit identity function */ |
18 | asm volatile ( |
19 | "r0 = r1;" |
20 | "exit;" |
21 | ); |
22 | } |
23 | |
24 | __noinline __used |
25 | unsigned long global_identity_subprog(__u64 x) |
26 | { |
27 | /* the simplest *global* 64-bit identity function */ |
28 | return x; |
29 | } |
30 | |
31 | __naked __noinline __used |
32 | static unsigned long callback_subprog() |
33 | { |
34 | /* the simplest callback function */ |
35 | asm volatile ( |
36 | "r0 = 0;" |
37 | "exit;" |
38 | ); |
39 | } |
40 | |
41 | SEC("?raw_tp" ) |
42 | __success __log_level(2) |
43 | __msg("7: (0f) r1 += r0" ) |
44 | __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7" ) |
45 | __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4" ) |
46 | __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit" ) |
47 | __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1" ) |
48 | __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5" ) |
49 | __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6" ) |
50 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3" ) |
51 | __naked int subprog_result_precise(void) |
52 | { |
53 | asm volatile ( |
54 | "r6 = 3;" |
55 | /* pass r6 through r1 into subprog to get it back as r0; |
56 | * this whole chain will have to be marked as precise later |
57 | */ |
58 | "r1 = r6;" |
59 | "call identity_subprog;" |
60 | /* now use subprog's returned value (which is a |
61 | * r6 -> r1 -> r0 chain), as index into vals array, forcing |
62 | * all of that to be known precisely |
63 | */ |
64 | "r0 *= 4;" |
65 | "r1 = %[vals];" |
66 | /* here r0->r1->r6 chain is forced to be precise and has to be |
67 | * propagated back to the beginning, including through the |
68 | * subprog call |
69 | */ |
70 | "r1 += r0;" |
71 | "r0 = *(u32 *)(r1 + 0);" |
72 | "exit;" |
73 | : |
74 | : __imm_ptr(vals) |
75 | : __clobber_common, "r6" |
76 | ); |
77 | } |
78 | |
79 | SEC("?raw_tp" ) |
80 | __success __log_level(2) |
81 | __msg("9: (0f) r1 += r0" ) |
82 | __msg("mark_precise: frame0: last_idx 9 first_idx 0" ) |
83 | __msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7" ) |
84 | __msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4" ) |
85 | __msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1" ) |
86 | __msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7" ) |
87 | __naked int global_subprog_result_precise(void) |
88 | { |
89 | asm volatile ( |
90 | "r6 = 3;" |
91 | /* pass r6 through r1 into subprog to get it back as r0; |
92 | * given global_identity_subprog is global, precision won't |
93 | * propagate all the way back to r6 |
94 | */ |
95 | "r1 = r6;" |
96 | "call global_identity_subprog;" |
97 | /* now use subprog's returned value (which is unknown now, so |
98 | * we need to clamp it), as index into vals array, forcing r0 |
99 | * to be marked precise (with no effect on r6, though) |
100 | */ |
101 | "if r0 < %[vals_arr_sz] goto 1f;" |
102 | "r0 = %[vals_arr_sz] - 1;" |
103 | "1:" |
104 | "r0 *= 4;" |
105 | "r1 = %[vals];" |
106 | /* here r0 is forced to be precise and has to be |
107 | * propagated back to the global subprog call, but it |
108 | * shouldn't go all the way to mark r6 as precise |
109 | */ |
110 | "r1 += r0;" |
111 | "r0 = *(u32 *)(r1 + 0);" |
112 | "exit;" |
113 | : |
114 | : __imm_ptr(vals), |
115 | __imm_const(vals_arr_sz, ARRAY_SIZE(vals)) |
116 | : __clobber_common, "r6" |
117 | ); |
118 | } |
119 | |
120 | __naked __noinline __used |
121 | static unsigned long loop_callback_bad() |
122 | { |
123 | /* bpf_loop() callback that can return values outside of [0, 1] range */ |
124 | asm volatile ( |
125 | "call %[bpf_get_prandom_u32];" |
126 | "if r0 s> 1000 goto 1f;" |
127 | "r0 = 0;" |
128 | "1:" |
129 | "goto +0;" /* checkpoint */ |
130 | /* bpf_loop() expects [0, 1] values, so branch above skipping |
131 | * r0 = 0; should lead to a failure, but if exit instruction |
132 | * doesn't enforce r0's precision, this callback will be |
133 | * successfully verified |
134 | */ |
135 | "exit;" |
136 | : |
137 | : __imm(bpf_get_prandom_u32) |
138 | : __clobber_common |
139 | ); |
140 | } |
141 | |
142 | SEC("?raw_tp" ) |
143 | __failure __log_level(2) |
144 | __flag(BPF_F_TEST_STATE_FREQ) |
145 | /* check that fallthrough code path marks r0 as precise */ |
146 | __msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0" ) |
147 | /* check that we have branch code path doing its own validation */ |
148 | __msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001" ) |
149 | /* check that branch code path marks r0 as precise, before failing */ |
150 | __msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7" ) |
151 | __msg("At callback return the register R0 has smin=1001 should have been in [0, 1]" ) |
152 | __naked int callback_precise_return_fail(void) |
153 | { |
154 | asm volatile ( |
155 | "r1 = 1;" /* nr_loops */ |
156 | "r2 = %[loop_callback_bad];" /* callback_fn */ |
157 | "r3 = 0;" /* callback_ctx */ |
158 | "r4 = 0;" /* flags */ |
159 | "call %[bpf_loop];" |
160 | |
161 | "r0 = 0;" |
162 | "exit;" |
163 | : |
164 | : __imm_ptr(loop_callback_bad), |
165 | __imm(bpf_loop) |
166 | : __clobber_common |
167 | ); |
168 | } |
169 | |
170 | SEC("?raw_tp" ) |
171 | __success __log_level(2) |
172 | /* First simulated path does not include callback body, |
173 | * r1 and r4 are always precise for bpf_loop() calls. |
174 | */ |
175 | __msg("9: (85) call bpf_loop#181" ) |
176 | __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1" ) |
177 | __msg("mark_precise: frame0: parent state regs=r4 stack=:" ) |
178 | __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9" ) |
179 | __msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0" ) |
180 | __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1" ) |
181 | __msg("mark_precise: frame0: parent state regs=r1 stack=:" ) |
182 | __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9" ) |
183 | __msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0" ) |
184 | __msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0" ) |
185 | __msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8" ) |
186 | __msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6" ) |
187 | __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3" ) |
188 | /* r6 precision propagation */ |
189 | __msg("14: (0f) r1 += r6" ) |
190 | __msg("mark_precise: frame0: last_idx 14 first_idx 9" ) |
191 | __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7" ) |
192 | __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4" ) |
193 | __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4" ) |
194 | __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0" ) |
195 | __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop" ) |
196 | /* State entering callback body popped from states stack */ |
197 | __msg("from 9 to 17: frame1:" ) |
198 | __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb" ) |
199 | __msg("17: (b7) r0 = 0" ) |
200 | __msg("18: (95) exit" ) |
201 | __msg("returning from callee:" ) |
202 | __msg("to caller at 9:" ) |
203 | __msg("frame 0: propagating r1,r4" ) |
204 | __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1" ) |
205 | __msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit" ) |
206 | __msg("from 18 to 9: safe" ) |
207 | __naked int callback_result_precise(void) |
208 | { |
209 | asm volatile ( |
210 | "r6 = 3;" |
211 | |
212 | /* call subprog and use result; r0 shouldn't propagate back to |
213 | * callback_subprog |
214 | */ |
215 | "r1 = r6;" /* nr_loops */ |
216 | "r2 = %[callback_subprog];" /* callback_fn */ |
217 | "r3 = 0;" /* callback_ctx */ |
218 | "r4 = 0;" /* flags */ |
219 | "call %[bpf_loop];" |
220 | |
221 | "r6 = r0;" |
222 | "if r6 > 3 goto 1f;" |
223 | "r6 *= 4;" |
224 | "r1 = %[vals];" |
225 | /* here r6 is forced to be precise and has to be propagated |
226 | * back to the bpf_loop() call, but not beyond |
227 | */ |
228 | "r1 += r6;" |
229 | "r0 = *(u32 *)(r1 + 0);" |
230 | "1:" |
231 | "exit;" |
232 | : |
233 | : __imm_ptr(vals), |
234 | __imm_ptr(callback_subprog), |
235 | __imm(bpf_loop) |
236 | : __clobber_common, "r6" |
237 | ); |
238 | } |
239 | |
240 | SEC("?raw_tp" ) |
241 | __success __log_level(2) |
242 | __msg("7: (0f) r1 += r6" ) |
243 | __msg("mark_precise: frame0: last_idx 7 first_idx 0" ) |
244 | __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7" ) |
245 | __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4" ) |
246 | __msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit" ) |
247 | __msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1" ) |
248 | __msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5" ) |
249 | __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0" ) |
250 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3" ) |
251 | __naked int parent_callee_saved_reg_precise(void) |
252 | { |
253 | asm volatile ( |
254 | "r6 = 3;" |
255 | |
256 | /* call subprog and ignore result; we need this call only to |
257 | * complicate jump history |
258 | */ |
259 | "r1 = 0;" |
260 | "call identity_subprog;" |
261 | |
262 | "r6 *= 4;" |
263 | "r1 = %[vals];" |
264 | /* here r6 is forced to be precise and has to be propagated |
265 | * back to the beginning, handling (and ignoring) subprog call |
266 | */ |
267 | "r1 += r6;" |
268 | "r0 = *(u32 *)(r1 + 0);" |
269 | "exit;" |
270 | : |
271 | : __imm_ptr(vals) |
272 | : __clobber_common, "r6" |
273 | ); |
274 | } |
275 | |
276 | SEC("?raw_tp" ) |
277 | __success __log_level(2) |
278 | __msg("7: (0f) r1 += r6" ) |
279 | __msg("mark_precise: frame0: last_idx 7 first_idx 0" ) |
280 | __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7" ) |
281 | __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4" ) |
282 | __msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5" ) |
283 | __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0" ) |
284 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3" ) |
285 | __naked int parent_callee_saved_reg_precise_global(void) |
286 | { |
287 | asm volatile ( |
288 | "r6 = 3;" |
289 | |
290 | /* call subprog and ignore result; we need this call only to |
291 | * complicate jump history |
292 | */ |
293 | "r1 = 0;" |
294 | "call global_identity_subprog;" |
295 | |
296 | "r6 *= 4;" |
297 | "r1 = %[vals];" |
298 | /* here r6 is forced to be precise and has to be propagated |
299 | * back to the beginning, handling (and ignoring) subprog call |
300 | */ |
301 | "r1 += r6;" |
302 | "r0 = *(u32 *)(r1 + 0);" |
303 | "exit;" |
304 | : |
305 | : __imm_ptr(vals) |
306 | : __clobber_common, "r6" |
307 | ); |
308 | } |
309 | |
310 | SEC("?raw_tp" ) |
311 | __success __log_level(2) |
312 | /* First simulated path does not include callback body */ |
313 | __msg("12: (0f) r1 += r6" ) |
314 | __msg("mark_precise: frame0: last_idx 12 first_idx 9" ) |
315 | __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7" ) |
316 | __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4" ) |
317 | __msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop" ) |
318 | __msg("mark_precise: frame0: parent state regs=r6 stack=:" ) |
319 | __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9" ) |
320 | __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0" ) |
321 | __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0" ) |
322 | __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8" ) |
323 | __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1" ) |
324 | __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3" ) |
325 | /* State entering callback body popped from states stack */ |
326 | __msg("from 9 to 15: frame1:" ) |
327 | __msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb" ) |
328 | __msg("15: (b7) r0 = 0" ) |
329 | __msg("16: (95) exit" ) |
330 | __msg("returning from callee:" ) |
331 | __msg("to caller at 9:" ) |
332 | /* r1, r4 are always precise for bpf_loop(), |
333 | * r6 was marked before backtracking to callback body. |
334 | */ |
335 | __msg("frame 0: propagating r1,r4,r6" ) |
336 | __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1" ) |
337 | __msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit" ) |
338 | __msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0" ) |
339 | __msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop" ) |
340 | __msg("mark_precise: frame0: parent state regs= stack=:" ) |
341 | __msg("from 16 to 9: safe" ) |
342 | __naked int parent_callee_saved_reg_precise_with_callback(void) |
343 | { |
344 | asm volatile ( |
345 | "r6 = 3;" |
346 | |
347 | /* call subprog and ignore result; we need this call only to |
348 | * complicate jump history |
349 | */ |
350 | "r1 = 1;" /* nr_loops */ |
351 | "r2 = %[callback_subprog];" /* callback_fn */ |
352 | "r3 = 0;" /* callback_ctx */ |
353 | "r4 = 0;" /* flags */ |
354 | "call %[bpf_loop];" |
355 | |
356 | "r6 *= 4;" |
357 | "r1 = %[vals];" |
358 | /* here r6 is forced to be precise and has to be propagated |
359 | * back to the beginning, handling (and ignoring) callback call |
360 | */ |
361 | "r1 += r6;" |
362 | "r0 = *(u32 *)(r1 + 0);" |
363 | "exit;" |
364 | : |
365 | : __imm_ptr(vals), |
366 | __imm_ptr(callback_subprog), |
367 | __imm(bpf_loop) |
368 | : __clobber_common, "r6" |
369 | ); |
370 | } |
371 | |
372 | SEC("?raw_tp" ) |
373 | __success __log_level(2) |
374 | __msg("9: (0f) r1 += r6" ) |
375 | __msg("mark_precise: frame0: last_idx 9 first_idx 6" ) |
376 | __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7" ) |
377 | __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4" ) |
378 | __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)" ) |
379 | __msg("mark_precise: frame0: parent state regs= stack=-8:" ) |
380 | __msg("mark_precise: frame0: last_idx 13 first_idx 0" ) |
381 | __msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit" ) |
382 | __msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1" ) |
383 | __msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6" ) |
384 | __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0" ) |
385 | __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6" ) |
386 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3" ) |
387 | __naked int parent_stack_slot_precise(void) |
388 | { |
389 | asm volatile ( |
390 | /* spill reg */ |
391 | "r6 = 3;" |
392 | "*(u64 *)(r10 - 8) = r6;" |
393 | |
394 | /* call subprog and ignore result; we need this call only to |
395 | * complicate jump history |
396 | */ |
397 | "r1 = 0;" |
398 | "call identity_subprog;" |
399 | |
400 | /* restore reg from stack; in this case we'll be carrying |
401 | * stack mask when going back into subprog through jump |
402 | * history |
403 | */ |
404 | "r6 = *(u64 *)(r10 - 8);" |
405 | |
406 | "r6 *= 4;" |
407 | "r1 = %[vals];" |
408 | /* here r6 is forced to be precise and has to be propagated |
409 | * back to the beginning, handling (and ignoring) subprog call |
410 | */ |
411 | "r1 += r6;" |
412 | "r0 = *(u32 *)(r1 + 0);" |
413 | "exit;" |
414 | : |
415 | : __imm_ptr(vals) |
416 | : __clobber_common, "r6" |
417 | ); |
418 | } |
419 | |
420 | SEC("?raw_tp" ) |
421 | __success __log_level(2) |
422 | __msg("9: (0f) r1 += r6" ) |
423 | __msg("mark_precise: frame0: last_idx 9 first_idx 0" ) |
424 | __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7" ) |
425 | __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4" ) |
426 | __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)" ) |
427 | __msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6" ) |
428 | __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0" ) |
429 | __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6" ) |
430 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3" ) |
431 | __naked int parent_stack_slot_precise_global(void) |
432 | { |
433 | asm volatile ( |
434 | /* spill reg */ |
435 | "r6 = 3;" |
436 | "*(u64 *)(r10 - 8) = r6;" |
437 | |
438 | /* call subprog and ignore result; we need this call only to |
439 | * complicate jump history |
440 | */ |
441 | "r1 = 0;" |
442 | "call global_identity_subprog;" |
443 | |
444 | /* restore reg from stack; in this case we'll be carrying |
445 | * stack mask when going back into subprog through jump |
446 | * history |
447 | */ |
448 | "r6 = *(u64 *)(r10 - 8);" |
449 | |
450 | "r6 *= 4;" |
451 | "r1 = %[vals];" |
452 | /* here r6 is forced to be precise and has to be propagated |
453 | * back to the beginning, handling (and ignoring) subprog call |
454 | */ |
455 | "r1 += r6;" |
456 | "r0 = *(u32 *)(r1 + 0);" |
457 | "exit;" |
458 | : |
459 | : __imm_ptr(vals) |
460 | : __clobber_common, "r6" |
461 | ); |
462 | } |
463 | |
464 | SEC("?raw_tp" ) |
465 | __success __log_level(2) |
466 | /* First simulated path does not include callback body */ |
467 | __msg("14: (0f) r1 += r6" ) |
468 | __msg("mark_precise: frame0: last_idx 14 first_idx 10" ) |
469 | __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7" ) |
470 | __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4" ) |
471 | __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)" ) |
472 | __msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop" ) |
473 | __msg("mark_precise: frame0: parent state regs= stack=-8:" ) |
474 | __msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10" ) |
475 | __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0" ) |
476 | __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0" ) |
477 | __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8" ) |
478 | __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6" ) |
479 | __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6" ) |
480 | __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3" ) |
481 | /* State entering callback body popped from states stack */ |
482 | __msg("from 10 to 17: frame1:" ) |
483 | __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb" ) |
484 | __msg("17: (b7) r0 = 0" ) |
485 | __msg("18: (95) exit" ) |
486 | __msg("returning from callee:" ) |
487 | __msg("to caller at 10:" ) |
488 | /* r1, r4 are always precise for bpf_loop(), |
489 | * fp-8 was marked before backtracking to callback body. |
490 | */ |
491 | __msg("frame 0: propagating r1,r4,fp-8" ) |
492 | __msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1" ) |
493 | __msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit" ) |
494 | __msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0" ) |
495 | __msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181" ) |
496 | __msg("mark_precise: frame0: parent state regs= stack=:" ) |
497 | __msg("from 18 to 10: safe" ) |
498 | __naked int parent_stack_slot_precise_with_callback(void) |
499 | { |
500 | asm volatile ( |
501 | /* spill reg */ |
502 | "r6 = 3;" |
503 | "*(u64 *)(r10 - 8) = r6;" |
504 | |
505 | /* ensure we have callback frame in jump history */ |
506 | "r1 = r6;" /* nr_loops */ |
507 | "r2 = %[callback_subprog];" /* callback_fn */ |
508 | "r3 = 0;" /* callback_ctx */ |
509 | "r4 = 0;" /* flags */ |
510 | "call %[bpf_loop];" |
511 | |
512 | /* restore reg from stack; in this case we'll be carrying |
513 | * stack mask when going back into subprog through jump |
514 | * history |
515 | */ |
516 | "r6 = *(u64 *)(r10 - 8);" |
517 | |
518 | "r6 *= 4;" |
519 | "r1 = %[vals];" |
520 | /* here r6 is forced to be precise and has to be propagated |
521 | * back to the beginning, handling (and ignoring) subprog call |
522 | */ |
523 | "r1 += r6;" |
524 | "r0 = *(u32 *)(r1 + 0);" |
525 | "exit;" |
526 | : |
527 | : __imm_ptr(vals), |
528 | __imm_ptr(callback_subprog), |
529 | __imm(bpf_loop) |
530 | : __clobber_common, "r6" |
531 | ); |
532 | } |
533 | |
534 | __noinline __used |
535 | static __u64 subprog_with_precise_arg(__u64 x) |
536 | { |
537 | return vals[x]; /* x is forced to be precise */ |
538 | } |
539 | |
540 | SEC("?raw_tp" ) |
541 | __success __log_level(2) |
542 | __msg("8: (0f) r2 += r1" ) |
543 | __msg("mark_precise: frame1: last_idx 8 first_idx 0" ) |
544 | __msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = " ) |
545 | __msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2" ) |
546 | __msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2" ) |
547 | __msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6" ) |
548 | __msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3" ) |
549 | __naked int subprog_arg_precise(void) |
550 | { |
551 | asm volatile ( |
552 | "r6 = 3;" |
553 | "r1 = r6;" |
554 | /* subprog_with_precise_arg expects its argument to be |
555 | * precise, so r1->r6 will be marked precise from inside the |
556 | * subprog |
557 | */ |
558 | "call subprog_with_precise_arg;" |
559 | "r0 += r6;" |
560 | "exit;" |
561 | : |
562 | : |
563 | : __clobber_common, "r6" |
564 | ); |
565 | } |
566 | |
567 | /* r1 is pointer to stack slot; |
568 | * r2 is a register to spill into that slot |
569 | * subprog also spills r2 into its own stack slot |
570 | */ |
571 | __naked __noinline __used |
572 | static __u64 subprog_spill_reg_precise(void) |
573 | { |
574 | asm volatile ( |
575 | /* spill to parent stack */ |
576 | "*(u64 *)(r1 + 0) = r2;" |
577 | /* spill to subprog stack (we use -16 offset to avoid |
578 | * accidental confusion with parent's -8 stack slot in |
579 | * verifier log output) |
580 | */ |
581 | "*(u64 *)(r10 - 16) = r2;" |
582 | /* use both spills as return result to propagete precision everywhere */ |
583 | "r0 = *(u64 *)(r10 - 16);" |
584 | "r2 = *(u64 *)(r1 + 0);" |
585 | "r0 += r2;" |
586 | "exit;" |
587 | ); |
588 | } |
589 | |
590 | SEC("?raw_tp" ) |
591 | __success __log_level(2) |
592 | __msg("10: (0f) r1 += r7" ) |
593 | __msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1" ) |
594 | __msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8" ) |
595 | __msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4" ) |
596 | __msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)" ) |
597 | __msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1" ) |
598 | __msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7" ) |
599 | __msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit" ) |
600 | __msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2" ) |
601 | __msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)" ) |
602 | __msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)" ) |
603 | __msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2" ) |
604 | __msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2" ) |
605 | __msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6" ) |
606 | __msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6" ) |
607 | __msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8" ) |
608 | __msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10" ) |
609 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1" ) |
610 | __naked int subprog_spill_into_parent_stack_slot_precise(void) |
611 | { |
612 | asm volatile ( |
613 | "r6 = 1;" |
614 | |
615 | /* pass pointer to stack slot and r6 to subprog; |
616 | * r6 will be marked precise and spilled into fp-8 slot, which |
617 | * also should be marked precise |
618 | */ |
619 | "r1 = r10;" |
620 | "r1 += -8;" |
621 | "r2 = r6;" |
622 | "call subprog_spill_reg_precise;" |
623 | |
624 | /* restore reg from stack; in this case we'll be carrying |
625 | * stack mask when going back into subprog through jump |
626 | * history |
627 | */ |
628 | "r7 = *(u64 *)(r10 - 8);" |
629 | |
630 | "r7 *= 4;" |
631 | "r1 = %[vals];" |
632 | /* here r7 is forced to be precise and has to be propagated |
633 | * back to the beginning, handling subprog call and logic |
634 | */ |
635 | "r1 += r7;" |
636 | "r0 = *(u32 *)(r1 + 0);" |
637 | "exit;" |
638 | : |
639 | : __imm_ptr(vals) |
640 | : __clobber_common, "r6" , "r7" |
641 | ); |
642 | } |
643 | |
644 | SEC("?raw_tp" ) |
645 | __success __log_level(2) |
646 | __msg("17: (0f) r1 += r0" ) |
647 | __msg("mark_precise: frame0: last_idx 17 first_idx 0 subseq_idx -1" ) |
648 | __msg("mark_precise: frame0: regs=r0 stack= before 16: (bf) r1 = r7" ) |
649 | __msg("mark_precise: frame0: regs=r0 stack= before 15: (27) r0 *= 4" ) |
650 | __msg("mark_precise: frame0: regs=r0 stack= before 14: (79) r0 = *(u64 *)(r10 -16)" ) |
651 | __msg("mark_precise: frame0: regs= stack=-16 before 13: (7b) *(u64 *)(r7 -8) = r0" ) |
652 | __msg("mark_precise: frame0: regs=r0 stack= before 12: (79) r0 = *(u64 *)(r8 +16)" ) |
653 | __msg("mark_precise: frame0: regs= stack=-16 before 11: (7b) *(u64 *)(r8 +16) = r0" ) |
654 | __msg("mark_precise: frame0: regs=r0 stack= before 10: (79) r0 = *(u64 *)(r7 -8)" ) |
655 | __msg("mark_precise: frame0: regs= stack=-16 before 9: (7b) *(u64 *)(r10 -16) = r0" ) |
656 | __msg("mark_precise: frame0: regs=r0 stack= before 8: (07) r8 += -32" ) |
657 | __msg("mark_precise: frame0: regs=r0 stack= before 7: (bf) r8 = r10" ) |
658 | __msg("mark_precise: frame0: regs=r0 stack= before 6: (07) r7 += -8" ) |
659 | __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r7 = r10" ) |
660 | __msg("mark_precise: frame0: regs=r0 stack= before 21: (95) exit" ) |
661 | __msg("mark_precise: frame1: regs=r0 stack= before 20: (bf) r0 = r1" ) |
662 | __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+15" ) |
663 | __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6" ) |
664 | __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1" ) |
665 | __naked int stack_slot_aliases_precision(void) |
666 | { |
667 | asm volatile ( |
668 | "r6 = 1;" |
669 | /* pass r6 through r1 into subprog to get it back as r0; |
670 | * this whole chain will have to be marked as precise later |
671 | */ |
672 | "r1 = r6;" |
673 | "call identity_subprog;" |
674 | /* let's setup two registers that are aliased to r10 */ |
675 | "r7 = r10;" |
676 | "r7 += -8;" /* r7 = r10 - 8 */ |
677 | "r8 = r10;" |
678 | "r8 += -32;" /* r8 = r10 - 32 */ |
679 | /* now spill subprog's return value (a r6 -> r1 -> r0 chain) |
680 | * a few times through different stack pointer regs, making |
681 | * sure to use r10, r7, and r8 both in LDX and STX insns, and |
682 | * *importantly* also using a combination of const var_off and |
683 | * insn->off to validate that we record final stack slot |
684 | * correctly, instead of relying on just insn->off derivation, |
685 | * which is only valid for r10-based stack offset |
686 | */ |
687 | "*(u64 *)(r10 - 16) = r0;" |
688 | "r0 = *(u64 *)(r7 - 8);" /* r7 - 8 == r10 - 16 */ |
689 | "*(u64 *)(r8 + 16) = r0;" /* r8 + 16 = r10 - 16 */ |
690 | "r0 = *(u64 *)(r8 + 16);" |
691 | "*(u64 *)(r7 - 8) = r0;" |
692 | "r0 = *(u64 *)(r10 - 16);" |
693 | /* get ready to use r0 as an index into array to force precision */ |
694 | "r0 *= 4;" |
695 | "r1 = %[vals];" |
696 | /* here r0->r1->r6 chain is forced to be precise and has to be |
697 | * propagated back to the beginning, including through the |
698 | * subprog call and all the stack spills and loads |
699 | */ |
700 | "r1 += r0;" |
701 | "r0 = *(u32 *)(r1 + 0);" |
702 | "exit;" |
703 | : |
704 | : __imm_ptr(vals) |
705 | : __clobber_common, "r6" |
706 | ); |
707 | } |
708 | |
709 | char _license[] SEC("license" ) = "GPL" ; |
710 | |