1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ |
3 | |
4 | #include <vmlinux.h> |
5 | #include <bpf/bpf_tracing.h> |
6 | #include <bpf/bpf_helpers.h> |
7 | |
8 | #include "bpf_misc.h" |
9 | #include "cpumask_common.h" |
10 | |
11 | char _license[] SEC("license" ) = "GPL" ; |
12 | |
13 | int pid, nr_cpus; |
14 | |
15 | static bool is_test_task(void) |
16 | { |
17 | int cur_pid = bpf_get_current_pid_tgid() >> 32; |
18 | |
19 | return pid == cur_pid; |
20 | } |
21 | |
22 | static bool create_cpumask_set(struct bpf_cpumask **out1, |
23 | struct bpf_cpumask **out2, |
24 | struct bpf_cpumask **out3, |
25 | struct bpf_cpumask **out4) |
26 | { |
27 | struct bpf_cpumask *mask1, *mask2, *mask3, *mask4; |
28 | |
29 | mask1 = create_cpumask(); |
30 | if (!mask1) |
31 | return false; |
32 | |
33 | mask2 = create_cpumask(); |
34 | if (!mask2) { |
35 | bpf_cpumask_release(mask1); |
36 | err = 3; |
37 | return false; |
38 | } |
39 | |
40 | mask3 = create_cpumask(); |
41 | if (!mask3) { |
42 | bpf_cpumask_release(mask1); |
43 | bpf_cpumask_release(mask2); |
44 | err = 4; |
45 | return false; |
46 | } |
47 | |
48 | mask4 = create_cpumask(); |
49 | if (!mask4) { |
50 | bpf_cpumask_release(mask1); |
51 | bpf_cpumask_release(mask2); |
52 | bpf_cpumask_release(mask3); |
53 | err = 5; |
54 | return false; |
55 | } |
56 | |
57 | *out1 = mask1; |
58 | *out2 = mask2; |
59 | *out3 = mask3; |
60 | *out4 = mask4; |
61 | |
62 | return true; |
63 | } |
64 | |
65 | SEC("tp_btf/task_newtask" ) |
66 | int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags) |
67 | { |
68 | struct bpf_cpumask *cpumask; |
69 | |
70 | if (!is_test_task()) |
71 | return 0; |
72 | |
73 | cpumask = create_cpumask(); |
74 | if (!cpumask) |
75 | return 0; |
76 | |
77 | bpf_cpumask_release(cpumask); |
78 | return 0; |
79 | } |
80 | |
81 | SEC("tp_btf/task_newtask" ) |
82 | int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) |
83 | { |
84 | struct bpf_cpumask *cpumask; |
85 | |
86 | if (!is_test_task()) |
87 | return 0; |
88 | |
89 | cpumask = create_cpumask(); |
90 | if (!cpumask) |
91 | return 0; |
92 | |
93 | bpf_cpumask_set_cpu(0, cpumask); |
94 | if (!bpf_cpumask_test_cpu(0, cast(cpumask))) { |
95 | err = 3; |
96 | goto release_exit; |
97 | } |
98 | |
99 | bpf_cpumask_clear_cpu(0, cpumask); |
100 | if (bpf_cpumask_test_cpu(0, cast(cpumask))) { |
101 | err = 4; |
102 | goto release_exit; |
103 | } |
104 | |
105 | release_exit: |
106 | bpf_cpumask_release(cpumask); |
107 | return 0; |
108 | } |
109 | |
110 | SEC("tp_btf/task_newtask" ) |
111 | int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags) |
112 | { |
113 | struct bpf_cpumask *cpumask; |
114 | |
115 | if (!is_test_task()) |
116 | return 0; |
117 | |
118 | cpumask = create_cpumask(); |
119 | if (!cpumask) |
120 | return 0; |
121 | |
122 | bpf_cpumask_setall(cpumask); |
123 | if (!bpf_cpumask_full(cast(cpumask))) { |
124 | err = 3; |
125 | goto release_exit; |
126 | } |
127 | |
128 | bpf_cpumask_clear(cpumask); |
129 | if (!bpf_cpumask_empty(cast(cpumask))) { |
130 | err = 4; |
131 | goto release_exit; |
132 | } |
133 | |
134 | release_exit: |
135 | bpf_cpumask_release(cpumask); |
136 | return 0; |
137 | } |
138 | |
139 | SEC("tp_btf/task_newtask" ) |
140 | int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags) |
141 | { |
142 | struct bpf_cpumask *cpumask; |
143 | |
144 | if (!is_test_task()) |
145 | return 0; |
146 | |
147 | cpumask = create_cpumask(); |
148 | if (!cpumask) |
149 | return 0; |
150 | |
151 | if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) { |
152 | err = 3; |
153 | goto release_exit; |
154 | } |
155 | |
156 | if (bpf_cpumask_first_zero(cast(cpumask)) != 0) { |
157 | bpf_printk("first zero: %d" , bpf_cpumask_first_zero(cast(cpumask))); |
158 | err = 4; |
159 | goto release_exit; |
160 | } |
161 | |
162 | bpf_cpumask_set_cpu(0, cpumask); |
163 | if (bpf_cpumask_first(cast(cpumask)) != 0) { |
164 | err = 5; |
165 | goto release_exit; |
166 | } |
167 | |
168 | if (bpf_cpumask_first_zero(cast(cpumask)) != 1) { |
169 | err = 6; |
170 | goto release_exit; |
171 | } |
172 | |
173 | release_exit: |
174 | bpf_cpumask_release(cpumask); |
175 | return 0; |
176 | } |
177 | |
178 | SEC("tp_btf/task_newtask" ) |
179 | int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) |
180 | { |
181 | struct bpf_cpumask *mask1, *mask2; |
182 | u32 first; |
183 | |
184 | if (!is_test_task()) |
185 | return 0; |
186 | |
187 | mask1 = create_cpumask(); |
188 | if (!mask1) |
189 | return 0; |
190 | |
191 | mask2 = create_cpumask(); |
192 | if (!mask2) |
193 | goto release_exit; |
194 | |
195 | bpf_cpumask_set_cpu(0, mask1); |
196 | bpf_cpumask_set_cpu(1, mask2); |
197 | |
198 | first = bpf_cpumask_first_and(cast(mask1), cast(mask2)); |
199 | if (first <= 1) |
200 | err = 3; |
201 | |
202 | release_exit: |
203 | if (mask1) |
204 | bpf_cpumask_release(mask1); |
205 | if (mask2) |
206 | bpf_cpumask_release(mask2); |
207 | return 0; |
208 | } |
209 | |
210 | SEC("tp_btf/task_newtask" ) |
211 | int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) |
212 | { |
213 | struct bpf_cpumask *cpumask; |
214 | |
215 | if (!is_test_task()) |
216 | return 0; |
217 | |
218 | cpumask = create_cpumask(); |
219 | if (!cpumask) |
220 | return 0; |
221 | |
222 | if (bpf_cpumask_test_and_set_cpu(0, cpumask)) { |
223 | err = 3; |
224 | goto release_exit; |
225 | } |
226 | |
227 | if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) { |
228 | err = 4; |
229 | goto release_exit; |
230 | } |
231 | |
232 | if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) { |
233 | err = 5; |
234 | goto release_exit; |
235 | } |
236 | |
237 | release_exit: |
238 | bpf_cpumask_release(cpumask); |
239 | return 0; |
240 | } |
241 | |
242 | SEC("tp_btf/task_newtask" ) |
243 | int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) |
244 | { |
245 | struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; |
246 | |
247 | if (!is_test_task()) |
248 | return 0; |
249 | |
250 | if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) |
251 | return 0; |
252 | |
253 | bpf_cpumask_set_cpu(0, mask1); |
254 | bpf_cpumask_set_cpu(1, mask2); |
255 | |
256 | if (bpf_cpumask_and(dst1, cast(cpumask: mask1), cast(cpumask: mask2))) { |
257 | err = 6; |
258 | goto release_exit; |
259 | } |
260 | if (!bpf_cpumask_empty(cast(cpumask: dst1))) { |
261 | err = 7; |
262 | goto release_exit; |
263 | } |
264 | |
265 | bpf_cpumask_or(dst1, cast(cpumask: mask1), cast(cpumask: mask2)); |
266 | if (!bpf_cpumask_test_cpu(0, cast(cpumask: dst1))) { |
267 | err = 8; |
268 | goto release_exit; |
269 | } |
270 | if (!bpf_cpumask_test_cpu(1, cast(cpumask: dst1))) { |
271 | err = 9; |
272 | goto release_exit; |
273 | } |
274 | |
275 | bpf_cpumask_xor(dst2, cast(cpumask: mask1), cast(cpumask: mask2)); |
276 | if (!bpf_cpumask_equal(cast(cpumask: dst1), cast(cpumask: dst2))) { |
277 | err = 10; |
278 | goto release_exit; |
279 | } |
280 | |
281 | release_exit: |
282 | bpf_cpumask_release(mask1); |
283 | bpf_cpumask_release(mask2); |
284 | bpf_cpumask_release(dst1); |
285 | bpf_cpumask_release(dst2); |
286 | return 0; |
287 | } |
288 | |
289 | SEC("tp_btf/task_newtask" ) |
290 | int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags) |
291 | { |
292 | struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; |
293 | |
294 | if (!is_test_task()) |
295 | return 0; |
296 | |
297 | if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) |
298 | return 0; |
299 | |
300 | bpf_cpumask_set_cpu(0, mask1); |
301 | bpf_cpumask_set_cpu(1, mask2); |
302 | if (bpf_cpumask_intersects(cast(cpumask: mask1), cast(cpumask: mask2))) { |
303 | err = 6; |
304 | goto release_exit; |
305 | } |
306 | |
307 | bpf_cpumask_or(dst1, cast(cpumask: mask1), cast(cpumask: mask2)); |
308 | if (!bpf_cpumask_subset(cast(cpumask: mask1), cast(cpumask: dst1))) { |
309 | err = 7; |
310 | goto release_exit; |
311 | } |
312 | |
313 | if (!bpf_cpumask_subset(cast(cpumask: mask2), cast(cpumask: dst1))) { |
314 | err = 8; |
315 | goto release_exit; |
316 | } |
317 | |
318 | if (bpf_cpumask_subset(cast(cpumask: dst1), cast(cpumask: mask1))) { |
319 | err = 9; |
320 | goto release_exit; |
321 | } |
322 | |
323 | release_exit: |
324 | bpf_cpumask_release(mask1); |
325 | bpf_cpumask_release(mask2); |
326 | bpf_cpumask_release(dst1); |
327 | bpf_cpumask_release(dst2); |
328 | return 0; |
329 | } |
330 | |
331 | SEC("tp_btf/task_newtask" ) |
332 | int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) |
333 | { |
334 | struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; |
335 | int cpu; |
336 | |
337 | if (!is_test_task()) |
338 | return 0; |
339 | |
340 | if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) |
341 | return 0; |
342 | |
343 | bpf_cpumask_set_cpu(0, mask1); |
344 | bpf_cpumask_set_cpu(1, mask2); |
345 | bpf_cpumask_or(dst1, cast(cpumask: mask1), cast(cpumask: mask2)); |
346 | |
347 | cpu = bpf_cpumask_any_distribute(cast(cpumask: mask1)); |
348 | if (cpu != 0) { |
349 | err = 6; |
350 | goto release_exit; |
351 | } |
352 | |
353 | cpu = bpf_cpumask_any_distribute(cast(cpumask: dst2)); |
354 | if (cpu < nr_cpus) { |
355 | err = 7; |
356 | goto release_exit; |
357 | } |
358 | |
359 | bpf_cpumask_copy(dst2, cast(cpumask: dst1)); |
360 | if (!bpf_cpumask_equal(cast(cpumask: dst1), cast(cpumask: dst2))) { |
361 | err = 8; |
362 | goto release_exit; |
363 | } |
364 | |
365 | cpu = bpf_cpumask_any_distribute(cast(cpumask: dst2)); |
366 | if (cpu > 1) { |
367 | err = 9; |
368 | goto release_exit; |
369 | } |
370 | |
371 | cpu = bpf_cpumask_any_and_distribute(cast(cpumask: mask1), cast(cpumask: mask2)); |
372 | if (cpu < nr_cpus) { |
373 | err = 10; |
374 | goto release_exit; |
375 | } |
376 | |
377 | release_exit: |
378 | bpf_cpumask_release(mask1); |
379 | bpf_cpumask_release(mask2); |
380 | bpf_cpumask_release(dst1); |
381 | bpf_cpumask_release(dst2); |
382 | return 0; |
383 | } |
384 | |
385 | SEC("tp_btf/task_newtask" ) |
386 | int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) |
387 | { |
388 | struct bpf_cpumask *cpumask; |
389 | |
390 | cpumask = create_cpumask(); |
391 | if (!cpumask) |
392 | return 0; |
393 | |
394 | if (cpumask_map_insert(mask: cpumask)) |
395 | err = 3; |
396 | |
397 | return 0; |
398 | } |
399 | |
400 | SEC("tp_btf/task_newtask" ) |
401 | int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags) |
402 | { |
403 | struct bpf_cpumask *cpumask; |
404 | struct __cpumask_map_value *v; |
405 | |
406 | cpumask = create_cpumask(); |
407 | if (!cpumask) |
408 | return 0; |
409 | |
410 | if (cpumask_map_insert(mask: cpumask)) { |
411 | err = 3; |
412 | return 0; |
413 | } |
414 | |
415 | v = cpumask_map_value_lookup(); |
416 | if (!v) { |
417 | err = 4; |
418 | return 0; |
419 | } |
420 | |
421 | cpumask = bpf_kptr_xchg(&v->cpumask, NULL); |
422 | if (cpumask) |
423 | bpf_cpumask_release(cpumask); |
424 | else |
425 | err = 5; |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | SEC("tp_btf/task_newtask" ) |
431 | int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) |
432 | { |
433 | struct bpf_cpumask *local, *prev; |
434 | |
435 | if (!is_test_task()) |
436 | return 0; |
437 | |
438 | local = create_cpumask(); |
439 | if (!local) |
440 | return 0; |
441 | |
442 | prev = bpf_kptr_xchg(&global_mask, local); |
443 | if (prev) { |
444 | bpf_cpumask_release(prev); |
445 | err = 3; |
446 | return 0; |
447 | } |
448 | |
449 | bpf_rcu_read_lock(); |
450 | local = global_mask; |
451 | if (!local) { |
452 | err = 4; |
453 | bpf_rcu_read_unlock(); |
454 | return 0; |
455 | } |
456 | |
457 | bpf_cpumask_test_cpu(0, (const struct cpumask *)local); |
458 | bpf_rcu_read_unlock(); |
459 | |
460 | return 0; |
461 | } |
462 | |
463 | SEC("tp_btf/task_newtask" ) |
464 | int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags) |
465 | { |
466 | struct bpf_cpumask *local; |
467 | |
468 | if (!is_test_task()) |
469 | return 0; |
470 | |
471 | local = create_cpumask(); |
472 | if (!local) |
473 | return 0; |
474 | |
475 | if (bpf_cpumask_weight(cast(cpumask: local)) != 0) { |
476 | err = 3; |
477 | goto out; |
478 | } |
479 | |
480 | bpf_cpumask_set_cpu(0, local); |
481 | if (bpf_cpumask_weight(cast(cpumask: local)) != 1) { |
482 | err = 4; |
483 | goto out; |
484 | } |
485 | |
486 | /* |
487 | * Make sure that adding additional CPUs changes the weight. Test to |
488 | * see whether the CPU was set to account for running on UP machines. |
489 | */ |
490 | bpf_cpumask_set_cpu(1, local); |
491 | if (bpf_cpumask_test_cpu(1, cast(cpumask: local)) && bpf_cpumask_weight(cast(cpumask: local)) != 2) { |
492 | err = 5; |
493 | goto out; |
494 | } |
495 | |
496 | bpf_cpumask_clear(local); |
497 | if (bpf_cpumask_weight(cast(cpumask: local)) != 0) { |
498 | err = 6; |
499 | goto out; |
500 | } |
501 | out: |
502 | bpf_cpumask_release(local); |
503 | return 0; |
504 | } |
505 | |
506 | SEC("tp_btf/task_newtask" ) |
507 | __success |
508 | int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags) |
509 | { |
510 | struct bpf_cpumask *mask1, *mask2; |
511 | |
512 | mask1 = bpf_cpumask_create(); |
513 | mask2 = bpf_cpumask_create(); |
514 | |
515 | if (!mask1 || !mask2) |
516 | goto free_masks_return; |
517 | |
518 | bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1); |
519 | bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2); |
520 | |
521 | free_masks_return: |
522 | if (mask1) |
523 | bpf_cpumask_release(mask1); |
524 | if (mask2) |
525 | bpf_cpumask_release(mask2); |
526 | return 0; |
527 | } |
528 | |