1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2022 Facebook */ |
3 | |
4 | #include <string.h> |
5 | #include <stdbool.h> |
6 | #include <linux/bpf.h> |
7 | #include <bpf/bpf_helpers.h> |
8 | #include "bpf_misc.h" |
9 | #include "bpf_kfuncs.h" |
10 | #include "errno.h" |
11 | |
12 | char _license[] SEC("license" ) = "GPL" ; |
13 | |
14 | int pid, err, val; |
15 | |
16 | struct sample { |
17 | int pid; |
18 | int seq; |
19 | long value; |
20 | char comm[16]; |
21 | }; |
22 | |
23 | struct { |
24 | __uint(type, BPF_MAP_TYPE_RINGBUF); |
25 | __uint(max_entries, 4096); |
26 | } ringbuf SEC(".maps" ); |
27 | |
28 | struct { |
29 | __uint(type, BPF_MAP_TYPE_ARRAY); |
30 | __uint(max_entries, 1); |
31 | __type(key, __u32); |
32 | __type(value, __u32); |
33 | } array_map SEC(".maps" ); |
34 | |
35 | SEC("?tp/syscalls/sys_enter_nanosleep" ) |
36 | int test_read_write(void *ctx) |
37 | { |
38 | char write_data[64] = "hello there, world!!" ; |
39 | char read_data[64] = {}; |
40 | struct bpf_dynptr ptr; |
41 | int i; |
42 | |
43 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
44 | return 0; |
45 | |
46 | bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr); |
47 | |
48 | /* Write data into the dynptr */ |
49 | err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); |
50 | |
51 | /* Read the data that was written into the dynptr */ |
52 | err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0); |
53 | |
54 | /* Ensure the data we read matches the data we wrote */ |
55 | for (i = 0; i < sizeof(read_data); i++) { |
56 | if (read_data[i] != write_data[i]) { |
57 | err = 1; |
58 | break; |
59 | } |
60 | } |
61 | |
62 | bpf_ringbuf_discard_dynptr(&ptr, 0); |
63 | return 0; |
64 | } |
65 | |
66 | SEC("?tp/syscalls/sys_enter_nanosleep" ) |
67 | int test_dynptr_data(void *ctx) |
68 | { |
69 | __u32 key = 0, val = 235, *map_val; |
70 | struct bpf_dynptr ptr; |
71 | __u32 map_val_size; |
72 | void *data; |
73 | |
74 | map_val_size = sizeof(*map_val); |
75 | |
76 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
77 | return 0; |
78 | |
79 | bpf_map_update_elem(&array_map, &key, &val, 0); |
80 | |
81 | map_val = bpf_map_lookup_elem(&array_map, &key); |
82 | if (!map_val) { |
83 | err = 1; |
84 | return 0; |
85 | } |
86 | |
87 | bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr); |
88 | |
89 | /* Try getting a data slice that is out of range */ |
90 | data = bpf_dynptr_data(&ptr, map_val_size + 1, 1); |
91 | if (data) { |
92 | err = 2; |
93 | return 0; |
94 | } |
95 | |
96 | /* Try getting more bytes than available */ |
97 | data = bpf_dynptr_data(&ptr, 0, map_val_size + 1); |
98 | if (data) { |
99 | err = 3; |
100 | return 0; |
101 | } |
102 | |
103 | data = bpf_dynptr_data(&ptr, 0, sizeof(__u32)); |
104 | if (!data) { |
105 | err = 4; |
106 | return 0; |
107 | } |
108 | |
109 | *(__u32 *)data = 999; |
110 | |
111 | err = bpf_probe_read_kernel(&val, sizeof(val), data); |
112 | if (err) |
113 | return 0; |
114 | |
115 | if (val != *(int *)data) |
116 | err = 5; |
117 | |
118 | return 0; |
119 | } |
120 | |
121 | static int ringbuf_callback(__u32 index, void *data) |
122 | { |
123 | struct sample *sample; |
124 | |
125 | struct bpf_dynptr *ptr = (struct bpf_dynptr *)data; |
126 | |
127 | sample = bpf_dynptr_data(ptr, 0, sizeof(*sample)); |
128 | if (!sample) |
129 | err = 2; |
130 | else |
131 | sample->pid += index; |
132 | |
133 | return 0; |
134 | } |
135 | |
136 | SEC("?tp/syscalls/sys_enter_nanosleep" ) |
137 | int test_ringbuf(void *ctx) |
138 | { |
139 | struct bpf_dynptr ptr; |
140 | struct sample *sample; |
141 | |
142 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
143 | return 0; |
144 | |
145 | val = 100; |
146 | |
147 | /* check that you can reserve a dynamic size reservation */ |
148 | err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); |
149 | |
150 | sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample)); |
151 | if (!sample) { |
152 | err = 1; |
153 | goto done; |
154 | } |
155 | |
156 | sample->pid = 10; |
157 | |
158 | /* Can pass dynptr to callback functions */ |
159 | bpf_loop(10, ringbuf_callback, &ptr, 0); |
160 | |
161 | if (sample->pid != 55) |
162 | err = 2; |
163 | |
164 | done: |
165 | bpf_ringbuf_discard_dynptr(&ptr, 0); |
166 | return 0; |
167 | } |
168 | |
169 | SEC("?cgroup_skb/egress" ) |
170 | int test_skb_readonly(struct __sk_buff *skb) |
171 | { |
172 | __u8 write_data[2] = {1, 2}; |
173 | struct bpf_dynptr ptr; |
174 | int ret; |
175 | |
176 | if (bpf_dynptr_from_skb(skb, 0, &ptr)) { |
177 | err = 1; |
178 | return 1; |
179 | } |
180 | |
181 | /* since cgroup skbs are read only, writes should fail */ |
182 | ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); |
183 | if (ret != -EINVAL) { |
184 | err = 2; |
185 | return 1; |
186 | } |
187 | |
188 | return 1; |
189 | } |
190 | |
191 | SEC("?cgroup_skb/egress" ) |
192 | int test_dynptr_skb_data(struct __sk_buff *skb) |
193 | { |
194 | struct bpf_dynptr ptr; |
195 | __u64 *data; |
196 | |
197 | if (bpf_dynptr_from_skb(skb, 0, &ptr)) { |
198 | err = 1; |
199 | return 1; |
200 | } |
201 | |
202 | /* This should return NULL. Must use bpf_dynptr_slice API */ |
203 | data = bpf_dynptr_data(&ptr, 0, 1); |
204 | if (data) { |
205 | err = 2; |
206 | return 1; |
207 | } |
208 | |
209 | return 1; |
210 | } |
211 | |
212 | SEC("tp/syscalls/sys_enter_nanosleep" ) |
213 | int test_adjust(void *ctx) |
214 | { |
215 | struct bpf_dynptr ptr; |
216 | __u32 bytes = 64; |
217 | __u32 off = 10; |
218 | __u32 trim = 15; |
219 | |
220 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
221 | return 0; |
222 | |
223 | err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr); |
224 | if (err) { |
225 | err = 1; |
226 | goto done; |
227 | } |
228 | |
229 | if (bpf_dynptr_size(&ptr) != bytes) { |
230 | err = 2; |
231 | goto done; |
232 | } |
233 | |
234 | /* Advance the dynptr by off */ |
235 | err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr)); |
236 | if (err) { |
237 | err = 3; |
238 | goto done; |
239 | } |
240 | |
241 | if (bpf_dynptr_size(&ptr) != bytes - off) { |
242 | err = 4; |
243 | goto done; |
244 | } |
245 | |
246 | /* Trim the dynptr */ |
247 | err = bpf_dynptr_adjust(&ptr, off, 15); |
248 | if (err) { |
249 | err = 5; |
250 | goto done; |
251 | } |
252 | |
253 | /* Check that the size was adjusted correctly */ |
254 | if (bpf_dynptr_size(&ptr) != trim - off) { |
255 | err = 6; |
256 | goto done; |
257 | } |
258 | |
259 | done: |
260 | bpf_ringbuf_discard_dynptr(&ptr, 0); |
261 | return 0; |
262 | } |
263 | |
264 | SEC("tp/syscalls/sys_enter_nanosleep" ) |
265 | int test_adjust_err(void *ctx) |
266 | { |
267 | char write_data[45] = "hello there, world!!" ; |
268 | struct bpf_dynptr ptr; |
269 | __u32 size = 64; |
270 | __u32 off = 20; |
271 | |
272 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
273 | return 0; |
274 | |
275 | if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { |
276 | err = 1; |
277 | goto done; |
278 | } |
279 | |
280 | /* Check that start can't be greater than end */ |
281 | if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) { |
282 | err = 2; |
283 | goto done; |
284 | } |
285 | |
286 | /* Check that start can't be greater than size */ |
287 | if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) { |
288 | err = 3; |
289 | goto done; |
290 | } |
291 | |
292 | /* Check that end can't be greater than size */ |
293 | if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) { |
294 | err = 4; |
295 | goto done; |
296 | } |
297 | |
298 | if (bpf_dynptr_adjust(&ptr, off, size)) { |
299 | err = 5; |
300 | goto done; |
301 | } |
302 | |
303 | /* Check that you can't write more bytes than available into the dynptr |
304 | * after you've adjusted it |
305 | */ |
306 | if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { |
307 | err = 6; |
308 | goto done; |
309 | } |
310 | |
311 | /* Check that even after adjusting, submitting/discarding |
312 | * a ringbuf dynptr works |
313 | */ |
314 | bpf_ringbuf_submit_dynptr(&ptr, 0); |
315 | return 0; |
316 | |
317 | done: |
318 | bpf_ringbuf_discard_dynptr(&ptr, 0); |
319 | return 0; |
320 | } |
321 | |
322 | SEC("tp/syscalls/sys_enter_nanosleep" ) |
323 | int test_zero_size_dynptr(void *ctx) |
324 | { |
325 | char write_data = 'x', read_data; |
326 | struct bpf_dynptr ptr; |
327 | __u32 size = 64; |
328 | |
329 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
330 | return 0; |
331 | |
332 | if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { |
333 | err = 1; |
334 | goto done; |
335 | } |
336 | |
337 | /* After this, the dynptr has a size of 0 */ |
338 | if (bpf_dynptr_adjust(&ptr, size, size)) { |
339 | err = 2; |
340 | goto done; |
341 | } |
342 | |
343 | /* Test that reading + writing non-zero bytes is not ok */ |
344 | if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) { |
345 | err = 3; |
346 | goto done; |
347 | } |
348 | |
349 | if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { |
350 | err = 4; |
351 | goto done; |
352 | } |
353 | |
354 | /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */ |
355 | if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) { |
356 | err = 5; |
357 | goto done; |
358 | } |
359 | |
360 | if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) { |
361 | err = 6; |
362 | goto done; |
363 | } |
364 | |
365 | err = 0; |
366 | |
367 | done: |
368 | bpf_ringbuf_discard_dynptr(&ptr, 0); |
369 | return 0; |
370 | } |
371 | |
372 | SEC("tp/syscalls/sys_enter_nanosleep" ) |
373 | int test_dynptr_is_null(void *ctx) |
374 | { |
375 | struct bpf_dynptr ptr1; |
376 | struct bpf_dynptr ptr2; |
377 | __u64 size = 4; |
378 | |
379 | if (bpf_get_current_pid_tgid() >> 32 != pid) |
380 | return 0; |
381 | |
382 | /* Pass in invalid flags, get back an invalid dynptr */ |
383 | if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) { |
384 | err = 1; |
385 | goto exit_early; |
386 | } |
387 | |
388 | /* Test that the invalid dynptr is null */ |
389 | if (!bpf_dynptr_is_null(&ptr1)) { |
390 | err = 2; |
391 | goto exit_early; |
392 | } |
393 | |
394 | /* Get a valid dynptr */ |
395 | if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) { |
396 | err = 3; |
397 | goto exit; |
398 | } |
399 | |
400 | /* Test that the valid dynptr is not null */ |
401 | if (bpf_dynptr_is_null(&ptr2)) { |
402 | err = 4; |
403 | goto exit; |
404 | } |
405 | |
406 | exit: |
407 | bpf_ringbuf_discard_dynptr(&ptr2, 0); |
408 | exit_early: |
409 | bpf_ringbuf_discard_dynptr(&ptr1, 0); |
410 | return 0; |
411 | } |
412 | |
413 | SEC("cgroup_skb/egress" ) |
414 | int test_dynptr_is_rdonly(struct __sk_buff *skb) |
415 | { |
416 | struct bpf_dynptr ptr1; |
417 | struct bpf_dynptr ptr2; |
418 | struct bpf_dynptr ptr3; |
419 | |
420 | /* Pass in invalid flags, get back an invalid dynptr */ |
421 | if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) { |
422 | err = 1; |
423 | return 0; |
424 | } |
425 | |
426 | /* Test that an invalid dynptr is_rdonly returns false */ |
427 | if (bpf_dynptr_is_rdonly(&ptr1)) { |
428 | err = 2; |
429 | return 0; |
430 | } |
431 | |
432 | /* Get a read-only dynptr */ |
433 | if (bpf_dynptr_from_skb(skb, 0, &ptr2)) { |
434 | err = 3; |
435 | return 0; |
436 | } |
437 | |
438 | /* Test that the dynptr is read-only */ |
439 | if (!bpf_dynptr_is_rdonly(&ptr2)) { |
440 | err = 4; |
441 | return 0; |
442 | } |
443 | |
444 | /* Get a read-writeable dynptr */ |
445 | if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) { |
446 | err = 5; |
447 | goto done; |
448 | } |
449 | |
450 | /* Test that the dynptr is read-only */ |
451 | if (bpf_dynptr_is_rdonly(&ptr3)) { |
452 | err = 6; |
453 | goto done; |
454 | } |
455 | |
456 | done: |
457 | bpf_ringbuf_discard_dynptr(&ptr3, 0); |
458 | return 0; |
459 | } |
460 | |
461 | SEC("cgroup_skb/egress" ) |
462 | int test_dynptr_clone(struct __sk_buff *skb) |
463 | { |
464 | struct bpf_dynptr ptr1; |
465 | struct bpf_dynptr ptr2; |
466 | __u32 off = 2, size; |
467 | |
468 | /* Get a dynptr */ |
469 | if (bpf_dynptr_from_skb(skb, 0, &ptr1)) { |
470 | err = 1; |
471 | return 0; |
472 | } |
473 | |
474 | if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) { |
475 | err = 2; |
476 | return 0; |
477 | } |
478 | |
479 | /* Clone the dynptr */ |
480 | if (bpf_dynptr_clone(&ptr1, &ptr2)) { |
481 | err = 3; |
482 | return 0; |
483 | } |
484 | |
485 | size = bpf_dynptr_size(&ptr1); |
486 | |
487 | /* Check that the clone has the same size and rd-only */ |
488 | if (bpf_dynptr_size(&ptr2) != size) { |
489 | err = 4; |
490 | return 0; |
491 | } |
492 | |
493 | if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) { |
494 | err = 5; |
495 | return 0; |
496 | } |
497 | |
498 | /* Advance and trim the original dynptr */ |
499 | bpf_dynptr_adjust(&ptr1, 5, 5); |
500 | |
501 | /* Check that only original dynptr was affected, and the clone wasn't */ |
502 | if (bpf_dynptr_size(&ptr2) != size) { |
503 | err = 6; |
504 | return 0; |
505 | } |
506 | |
507 | return 0; |
508 | } |
509 | |
510 | SEC("?cgroup_skb/egress" ) |
511 | int test_dynptr_skb_no_buff(struct __sk_buff *skb) |
512 | { |
513 | struct bpf_dynptr ptr; |
514 | __u64 *data; |
515 | |
516 | if (bpf_dynptr_from_skb(skb, 0, &ptr)) { |
517 | err = 1; |
518 | return 1; |
519 | } |
520 | |
521 | /* This may return NULL. SKB may require a buffer */ |
522 | data = bpf_dynptr_slice(&ptr, 0, NULL, 1); |
523 | |
524 | return !!data; |
525 | } |
526 | |
527 | SEC("?cgroup_skb/egress" ) |
528 | int test_dynptr_skb_strcmp(struct __sk_buff *skb) |
529 | { |
530 | struct bpf_dynptr ptr; |
531 | char *data; |
532 | |
533 | if (bpf_dynptr_from_skb(skb, 0, &ptr)) { |
534 | err = 1; |
535 | return 1; |
536 | } |
537 | |
538 | /* This may return NULL. SKB may require a buffer */ |
539 | data = bpf_dynptr_slice(&ptr, 0, NULL, 10); |
540 | if (data) { |
541 | bpf_strncmp(data, 10, "foo" ); |
542 | return 1; |
543 | } |
544 | |
545 | return 1; |
546 | } |
547 | |