1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8struct val {
9 int cnt;
10 struct bpf_spin_lock l;
11};
12
13struct {
14 __uint(type, BPF_MAP_TYPE_ARRAY);
15 __uint(max_entries, 1);
16 __type(key, int);
17 __type(value, struct val);
18} map_spin_lock SEC(".maps");
19
20SEC("cgroup/skb")
21__description("spin_lock: test1 success")
22__success __failure_unpriv __msg_unpriv("")
23__retval(0)
24__naked void spin_lock_test1_success(void)
25{
26 asm volatile (" \
27 r1 = 0; \
28 *(u32*)(r10 - 4) = r1; \
29 r2 = r10; \
30 r2 += -4; \
31 r1 = %[map_spin_lock] ll; \
32 call %[bpf_map_lookup_elem]; \
33 if r0 != 0 goto l0_%=; \
34 exit; \
35l0_%=: r6 = r0; \
36 r1 = r0; \
37 r1 += 4; \
38 call %[bpf_spin_lock]; \
39 r1 = r6; \
40 r1 += 4; \
41 r0 = *(u32*)(r6 + 0); \
42 call %[bpf_spin_unlock]; \
43 r0 = 0; \
44 exit; \
45" :
46 : __imm(bpf_map_lookup_elem),
47 __imm(bpf_spin_lock),
48 __imm(bpf_spin_unlock),
49 __imm_addr(map_spin_lock)
50 : __clobber_all);
51}
52
53SEC("cgroup/skb")
54__description("spin_lock: test2 direct ld/st")
55__failure __msg("cannot be accessed directly")
56__failure_unpriv __msg_unpriv("")
57__naked void lock_test2_direct_ld_st(void)
58{
59 asm volatile (" \
60 r1 = 0; \
61 *(u32*)(r10 - 4) = r1; \
62 r2 = r10; \
63 r2 += -4; \
64 r1 = %[map_spin_lock] ll; \
65 call %[bpf_map_lookup_elem]; \
66 if r0 != 0 goto l0_%=; \
67 exit; \
68l0_%=: r6 = r0; \
69 r1 = r0; \
70 r1 += 4; \
71 call %[bpf_spin_lock]; \
72 r1 = r6; \
73 r1 += 4; \
74 r0 = *(u32*)(r1 + 0); \
75 call %[bpf_spin_unlock]; \
76 r0 = 0; \
77 exit; \
78" :
79 : __imm(bpf_map_lookup_elem),
80 __imm(bpf_spin_lock),
81 __imm(bpf_spin_unlock),
82 __imm_addr(map_spin_lock)
83 : __clobber_all);
84}
85
86SEC("cgroup/skb")
87__description("spin_lock: test3 direct ld/st")
88__failure __msg("cannot be accessed directly")
89__failure_unpriv __msg_unpriv("")
90__flag(BPF_F_ANY_ALIGNMENT)
91__naked void lock_test3_direct_ld_st(void)
92{
93 asm volatile (" \
94 r1 = 0; \
95 *(u32*)(r10 - 4) = r1; \
96 r2 = r10; \
97 r2 += -4; \
98 r1 = %[map_spin_lock] ll; \
99 call %[bpf_map_lookup_elem]; \
100 if r0 != 0 goto l0_%=; \
101 exit; \
102l0_%=: r6 = r0; \
103 r1 = r0; \
104 r1 += 4; \
105 call %[bpf_spin_lock]; \
106 r1 = r6; \
107 r1 += 4; \
108 r0 = *(u32*)(r6 + 1); \
109 call %[bpf_spin_unlock]; \
110 r0 = 0; \
111 exit; \
112" :
113 : __imm(bpf_map_lookup_elem),
114 __imm(bpf_spin_lock),
115 __imm(bpf_spin_unlock),
116 __imm_addr(map_spin_lock)
117 : __clobber_all);
118}
119
120SEC("cgroup/skb")
121__description("spin_lock: test4 direct ld/st")
122__failure __msg("cannot be accessed directly")
123__failure_unpriv __msg_unpriv("")
124__flag(BPF_F_ANY_ALIGNMENT)
125__naked void lock_test4_direct_ld_st(void)
126{
127 asm volatile (" \
128 r1 = 0; \
129 *(u32*)(r10 - 4) = r1; \
130 r2 = r10; \
131 r2 += -4; \
132 r1 = %[map_spin_lock] ll; \
133 call %[bpf_map_lookup_elem]; \
134 if r0 != 0 goto l0_%=; \
135 exit; \
136l0_%=: r6 = r0; \
137 r1 = r0; \
138 r1 += 4; \
139 call %[bpf_spin_lock]; \
140 r1 = r6; \
141 r1 += 4; \
142 r0 = *(u16*)(r6 + 3); \
143 call %[bpf_spin_unlock]; \
144 r0 = 0; \
145 exit; \
146" :
147 : __imm(bpf_map_lookup_elem),
148 __imm(bpf_spin_lock),
149 __imm(bpf_spin_unlock),
150 __imm_addr(map_spin_lock)
151 : __clobber_all);
152}
153
154SEC("cgroup/skb")
155__description("spin_lock: test5 call within a locked region")
156__failure __msg("calls are not allowed")
157__failure_unpriv __msg_unpriv("")
158__naked void call_within_a_locked_region(void)
159{
160 asm volatile (" \
161 r1 = 0; \
162 *(u32*)(r10 - 4) = r1; \
163 r2 = r10; \
164 r2 += -4; \
165 r1 = %[map_spin_lock] ll; \
166 call %[bpf_map_lookup_elem]; \
167 if r0 != 0 goto l0_%=; \
168 exit; \
169l0_%=: r6 = r0; \
170 r1 = r0; \
171 r1 += 4; \
172 call %[bpf_spin_lock]; \
173 call %[bpf_get_prandom_u32]; \
174 r1 = r6; \
175 r1 += 4; \
176 call %[bpf_spin_unlock]; \
177 r0 = 0; \
178 exit; \
179" :
180 : __imm(bpf_get_prandom_u32),
181 __imm(bpf_map_lookup_elem),
182 __imm(bpf_spin_lock),
183 __imm(bpf_spin_unlock),
184 __imm_addr(map_spin_lock)
185 : __clobber_all);
186}
187
188SEC("cgroup/skb")
189__description("spin_lock: test6 missing unlock")
190__failure __msg("unlock is missing")
191__failure_unpriv __msg_unpriv("")
192__naked void spin_lock_test6_missing_unlock(void)
193{
194 asm volatile (" \
195 r1 = 0; \
196 *(u32*)(r10 - 4) = r1; \
197 r2 = r10; \
198 r2 += -4; \
199 r1 = %[map_spin_lock] ll; \
200 call %[bpf_map_lookup_elem]; \
201 if r0 != 0 goto l0_%=; \
202 exit; \
203l0_%=: r6 = r0; \
204 r1 = r0; \
205 r1 += 4; \
206 call %[bpf_spin_lock]; \
207 r1 = r6; \
208 r1 += 4; \
209 r0 = *(u32*)(r6 + 0); \
210 if r0 != 0 goto l1_%=; \
211 call %[bpf_spin_unlock]; \
212l1_%=: r0 = 0; \
213 exit; \
214" :
215 : __imm(bpf_map_lookup_elem),
216 __imm(bpf_spin_lock),
217 __imm(bpf_spin_unlock),
218 __imm_addr(map_spin_lock)
219 : __clobber_all);
220}
221
222SEC("cgroup/skb")
223__description("spin_lock: test7 unlock without lock")
224__failure __msg("without taking a lock")
225__failure_unpriv __msg_unpriv("")
226__naked void lock_test7_unlock_without_lock(void)
227{
228 asm volatile (" \
229 r1 = 0; \
230 *(u32*)(r10 - 4) = r1; \
231 r2 = r10; \
232 r2 += -4; \
233 r1 = %[map_spin_lock] ll; \
234 call %[bpf_map_lookup_elem]; \
235 if r0 != 0 goto l0_%=; \
236 exit; \
237l0_%=: r6 = r0; \
238 r1 = r0; \
239 r1 += 4; \
240 if r1 != 0 goto l1_%=; \
241 call %[bpf_spin_lock]; \
242l1_%=: r1 = r6; \
243 r1 += 4; \
244 r0 = *(u32*)(r6 + 0); \
245 call %[bpf_spin_unlock]; \
246 r0 = 0; \
247 exit; \
248" :
249 : __imm(bpf_map_lookup_elem),
250 __imm(bpf_spin_lock),
251 __imm(bpf_spin_unlock),
252 __imm_addr(map_spin_lock)
253 : __clobber_all);
254}
255
256SEC("cgroup/skb")
257__description("spin_lock: test8 double lock")
258__failure __msg("calls are not allowed")
259__failure_unpriv __msg_unpriv("")
260__naked void spin_lock_test8_double_lock(void)
261{
262 asm volatile (" \
263 r1 = 0; \
264 *(u32*)(r10 - 4) = r1; \
265 r2 = r10; \
266 r2 += -4; \
267 r1 = %[map_spin_lock] ll; \
268 call %[bpf_map_lookup_elem]; \
269 if r0 != 0 goto l0_%=; \
270 exit; \
271l0_%=: r6 = r0; \
272 r1 = r0; \
273 r1 += 4; \
274 call %[bpf_spin_lock]; \
275 r1 = r6; \
276 r1 += 4; \
277 call %[bpf_spin_lock]; \
278 r1 = r6; \
279 r1 += 4; \
280 r0 = *(u32*)(r6 + 0); \
281 call %[bpf_spin_unlock]; \
282 r0 = 0; \
283 exit; \
284" :
285 : __imm(bpf_map_lookup_elem),
286 __imm(bpf_spin_lock),
287 __imm(bpf_spin_unlock),
288 __imm_addr(map_spin_lock)
289 : __clobber_all);
290}
291
292SEC("cgroup/skb")
293__description("spin_lock: test9 different lock")
294__failure __msg("unlock of different lock")
295__failure_unpriv __msg_unpriv("")
296__naked void spin_lock_test9_different_lock(void)
297{
298 asm volatile (" \
299 r1 = 0; \
300 *(u32*)(r10 - 4) = r1; \
301 r2 = r10; \
302 r2 += -4; \
303 r1 = %[map_spin_lock] ll; \
304 call %[bpf_map_lookup_elem]; \
305 if r0 != 0 goto l0_%=; \
306 exit; \
307l0_%=: r6 = r0; \
308 r2 = r10; \
309 r2 += -4; \
310 r1 = %[map_spin_lock] ll; \
311 call %[bpf_map_lookup_elem]; \
312 if r0 != 0 goto l1_%=; \
313 exit; \
314l1_%=: r7 = r0; \
315 r1 = r6; \
316 r1 += 4; \
317 call %[bpf_spin_lock]; \
318 r1 = r7; \
319 r1 += 4; \
320 call %[bpf_spin_unlock]; \
321 r0 = 0; \
322 exit; \
323" :
324 : __imm(bpf_map_lookup_elem),
325 __imm(bpf_spin_lock),
326 __imm(bpf_spin_unlock),
327 __imm_addr(map_spin_lock)
328 : __clobber_all);
329}
330
331SEC("cgroup/skb")
332__description("spin_lock: test10 lock in subprog without unlock")
333__success
334__failure_unpriv __msg_unpriv("")
335__naked void lock_in_subprog_without_unlock(void)
336{
337 asm volatile (" \
338 r1 = 0; \
339 *(u32*)(r10 - 4) = r1; \
340 r2 = r10; \
341 r2 += -4; \
342 r1 = %[map_spin_lock] ll; \
343 call %[bpf_map_lookup_elem]; \
344 if r0 != 0 goto l0_%=; \
345 exit; \
346l0_%=: r6 = r0; \
347 r1 = r0; \
348 r1 += 4; \
349 call lock_in_subprog_without_unlock__1; \
350 r1 = r6; \
351 r1 += 4; \
352 call %[bpf_spin_unlock]; \
353 r0 = 1; \
354 exit; \
355" :
356 : __imm(bpf_map_lookup_elem),
357 __imm(bpf_spin_unlock),
358 __imm_addr(map_spin_lock)
359 : __clobber_all);
360}
361
362static __naked __noinline __attribute__((used))
363void lock_in_subprog_without_unlock__1(void)
364{
365 asm volatile (" \
366 call %[bpf_spin_lock]; \
367 r0 = 0; \
368 exit; \
369" :
370 : __imm(bpf_spin_lock)
371 : __clobber_all);
372}
373
374SEC("tc")
375__description("spin_lock: test11 ld_abs under lock")
376__failure __msg("inside bpf_spin_lock")
377__naked void test11_ld_abs_under_lock(void)
378{
379 asm volatile (" \
380 r6 = r1; \
381 r1 = 0; \
382 *(u32*)(r10 - 4) = r1; \
383 r2 = r10; \
384 r2 += -4; \
385 r1 = %[map_spin_lock] ll; \
386 call %[bpf_map_lookup_elem]; \
387 if r0 != 0 goto l0_%=; \
388 exit; \
389l0_%=: r7 = r0; \
390 r1 = r0; \
391 r1 += 4; \
392 call %[bpf_spin_lock]; \
393 r0 = *(u8*)skb[0]; \
394 r1 = r7; \
395 r1 += 4; \
396 call %[bpf_spin_unlock]; \
397 r0 = 0; \
398 exit; \
399" :
400 : __imm(bpf_map_lookup_elem),
401 __imm(bpf_spin_lock),
402 __imm(bpf_spin_unlock),
403 __imm_addr(map_spin_lock)
404 : __clobber_all);
405}
406
407SEC("tc")
408__description("spin_lock: regsafe compare reg->id for map value")
409__failure __msg("bpf_spin_unlock of different lock")
410__flag(BPF_F_TEST_STATE_FREQ)
411__naked void reg_id_for_map_value(void)
412{
413 asm volatile (" \
414 r6 = r1; \
415 r6 = *(u32*)(r6 + %[__sk_buff_mark]); \
416 r1 = %[map_spin_lock] ll; \
417 r9 = r1; \
418 r2 = 0; \
419 *(u32*)(r10 - 4) = r2; \
420 r2 = r10; \
421 r2 += -4; \
422 call %[bpf_map_lookup_elem]; \
423 if r0 != 0 goto l0_%=; \
424 exit; \
425l0_%=: r7 = r0; \
426 r1 = r9; \
427 r2 = r10; \
428 r2 += -4; \
429 call %[bpf_map_lookup_elem]; \
430 if r0 != 0 goto l1_%=; \
431 exit; \
432l1_%=: r8 = r0; \
433 r1 = r7; \
434 r1 += 4; \
435 call %[bpf_spin_lock]; \
436 if r6 == 0 goto l2_%=; \
437 goto l3_%=; \
438l2_%=: r7 = r8; \
439l3_%=: r1 = r7; \
440 r1 += 4; \
441 call %[bpf_spin_unlock]; \
442 r0 = 0; \
443 exit; \
444" :
445 : __imm(bpf_map_lookup_elem),
446 __imm(bpf_spin_lock),
447 __imm(bpf_spin_unlock),
448 __imm_addr(map_spin_lock),
449 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
450 : __clobber_all);
451}
452
453/* Make sure that regsafe() compares ids for spin lock records using
454 * check_ids():
455 * 1: r9 = map_lookup_elem(...) ; r9.id == 1
456 * 2: r8 = map_lookup_elem(...) ; r8.id == 2
457 * 3: r7 = ktime_get_ns()
458 * 4: r6 = ktime_get_ns()
459 * 5: if r6 > r7 goto <9>
460 * 6: spin_lock(r8)
461 * 7: r9 = r8
462 * 8: goto <10>
463 * 9: spin_lock(r9)
464 * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active,
465 * ; second visit to (10) should be considered safe
466 * ; if check_ids() is used.
467 * 11: exit(0)
468 */
469
470SEC("cgroup/skb")
471__description("spin_lock: regsafe() check_ids() similar id mappings")
472__success __msg("29: safe")
473__failure_unpriv __msg_unpriv("")
474__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
475__naked void check_ids_similar_id_mappings(void)
476{
477 asm volatile (" \
478 r1 = 0; \
479 *(u32*)(r10 - 4) = r1; \
480 /* r9 = map_lookup_elem(...) */ \
481 r2 = r10; \
482 r2 += -4; \
483 r1 = %[map_spin_lock] ll; \
484 call %[bpf_map_lookup_elem]; \
485 if r0 == 0 goto l0_%=; \
486 r9 = r0; \
487 /* r8 = map_lookup_elem(...) */ \
488 r2 = r10; \
489 r2 += -4; \
490 r1 = %[map_spin_lock] ll; \
491 call %[bpf_map_lookup_elem]; \
492 if r0 == 0 goto l1_%=; \
493 r8 = r0; \
494 /* r7 = ktime_get_ns() */ \
495 call %[bpf_ktime_get_ns]; \
496 r7 = r0; \
497 /* r6 = ktime_get_ns() */ \
498 call %[bpf_ktime_get_ns]; \
499 r6 = r0; \
500 /* if r6 > r7 goto +5 ; no new information about the state is derived from\
501 * ; this check, thus produced verifier states differ\
502 * ; only in 'insn_idx' \
503 * spin_lock(r8) \
504 * r9 = r8 \
505 * goto unlock \
506 */ \
507 if r6 > r7 goto l2_%=; \
508 r1 = r8; \
509 r1 += 4; \
510 call %[bpf_spin_lock]; \
511 r9 = r8; \
512 goto l3_%=; \
513l2_%=: /* spin_lock(r9) */ \
514 r1 = r9; \
515 r1 += 4; \
516 call %[bpf_spin_lock]; \
517l3_%=: /* spin_unlock(r9) */ \
518 r1 = r9; \
519 r1 += 4; \
520 call %[bpf_spin_unlock]; \
521l0_%=: /* exit(0) */ \
522 r0 = 0; \
523l1_%=: exit; \
524" :
525 : __imm(bpf_ktime_get_ns),
526 __imm(bpf_map_lookup_elem),
527 __imm(bpf_spin_lock),
528 __imm(bpf_spin_unlock),
529 __imm_addr(map_spin_lock)
530 : __clobber_all);
531}
532
533char _license[] SEC("license") = "GPL";
534

source code of linux/tools/testing/selftests/bpf/progs/verifier_spin_lock.c