1 | /* |
2 | * Copyright (C) 2017-2018 Netronome Systems, Inc. |
3 | * |
4 | * This software is licensed under the GNU General License Version 2, |
5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
6 | * source tree. |
7 | * |
8 | * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" |
9 | * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, |
10 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
11 | * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE |
12 | * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME |
13 | * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
14 | */ |
15 | |
16 | #include <linux/bpf.h> |
17 | #include <linux/bpf_verifier.h> |
18 | #include <linux/bug.h> |
19 | #include <linux/kdev_t.h> |
20 | #include <linux/list.h> |
21 | #include <linux/lockdep.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/printk.h> |
24 | #include <linux/proc_ns.h> |
25 | #include <linux/rhashtable.h> |
26 | #include <linux/rtnetlink.h> |
27 | #include <linux/rwsem.h> |
28 | #include <net/xdp.h> |
29 | |
30 | /* Protects offdevs, members of bpf_offload_netdev and offload members |
31 | * of all progs. |
32 | * RTNL lock cannot be taken when holding this lock. |
33 | */ |
34 | static DECLARE_RWSEM(bpf_devs_lock); |
35 | |
36 | struct bpf_offload_dev { |
37 | const struct bpf_prog_offload_ops *ops; |
38 | struct list_head netdevs; |
39 | void *priv; |
40 | }; |
41 | |
42 | struct bpf_offload_netdev { |
43 | struct rhash_head l; |
44 | struct net_device *netdev; |
45 | struct bpf_offload_dev *offdev; /* NULL when bound-only */ |
46 | struct list_head progs; |
47 | struct list_head maps; |
48 | struct list_head offdev_netdevs; |
49 | }; |
50 | |
51 | static const struct rhashtable_params offdevs_params = { |
52 | .nelem_hint = 4, |
53 | .key_len = sizeof(struct net_device *), |
54 | .key_offset = offsetof(struct bpf_offload_netdev, netdev), |
55 | .head_offset = offsetof(struct bpf_offload_netdev, l), |
56 | .automatic_shrinking = true, |
57 | }; |
58 | |
59 | static struct rhashtable offdevs; |
60 | |
61 | static int bpf_dev_offload_check(struct net_device *netdev) |
62 | { |
63 | if (!netdev) |
64 | return -EINVAL; |
65 | if (!netdev->netdev_ops->ndo_bpf) |
66 | return -EOPNOTSUPP; |
67 | return 0; |
68 | } |
69 | |
70 | static struct bpf_offload_netdev * |
71 | bpf_offload_find_netdev(struct net_device *netdev) |
72 | { |
73 | lockdep_assert_held(&bpf_devs_lock); |
74 | |
75 | return rhashtable_lookup_fast(ht: &offdevs, key: &netdev, params: offdevs_params); |
76 | } |
77 | |
78 | static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
79 | struct net_device *netdev) |
80 | { |
81 | struct bpf_offload_netdev *ondev; |
82 | int err; |
83 | |
84 | ondev = kzalloc(size: sizeof(*ondev), GFP_KERNEL); |
85 | if (!ondev) |
86 | return -ENOMEM; |
87 | |
88 | ondev->netdev = netdev; |
89 | ondev->offdev = offdev; |
90 | INIT_LIST_HEAD(list: &ondev->progs); |
91 | INIT_LIST_HEAD(list: &ondev->maps); |
92 | |
93 | err = rhashtable_insert_fast(ht: &offdevs, obj: &ondev->l, params: offdevs_params); |
94 | if (err) { |
95 | netdev_warn(dev: netdev, format: "failed to register for BPF offload\n" ); |
96 | goto err_free; |
97 | } |
98 | |
99 | if (offdev) |
100 | list_add(new: &ondev->offdev_netdevs, head: &offdev->netdevs); |
101 | return 0; |
102 | |
103 | err_free: |
104 | kfree(objp: ondev); |
105 | return err; |
106 | } |
107 | |
108 | static void __bpf_prog_offload_destroy(struct bpf_prog *prog) |
109 | { |
110 | struct bpf_prog_offload *offload = prog->aux->offload; |
111 | |
112 | if (offload->dev_state) |
113 | offload->offdev->ops->destroy(prog); |
114 | |
115 | list_del_init(entry: &offload->offloads); |
116 | kfree(objp: offload); |
117 | prog->aux->offload = NULL; |
118 | } |
119 | |
120 | static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, |
121 | enum bpf_netdev_command cmd) |
122 | { |
123 | struct netdev_bpf data = {}; |
124 | struct net_device *netdev; |
125 | |
126 | ASSERT_RTNL(); |
127 | |
128 | data.command = cmd; |
129 | data.offmap = offmap; |
130 | /* Caller must make sure netdev is valid */ |
131 | netdev = offmap->netdev; |
132 | |
133 | return netdev->netdev_ops->ndo_bpf(netdev, &data); |
134 | } |
135 | |
136 | static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) |
137 | { |
138 | WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); |
139 | /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ |
140 | bpf_map_free_id(map: &offmap->map); |
141 | list_del_init(entry: &offmap->offloads); |
142 | offmap->netdev = NULL; |
143 | } |
144 | |
145 | static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, |
146 | struct net_device *netdev) |
147 | { |
148 | struct bpf_offload_netdev *ondev, *altdev = NULL; |
149 | struct bpf_offloaded_map *offmap, *mtmp; |
150 | struct bpf_prog_offload *offload, *ptmp; |
151 | |
152 | ASSERT_RTNL(); |
153 | |
154 | ondev = rhashtable_lookup_fast(ht: &offdevs, key: &netdev, params: offdevs_params); |
155 | if (WARN_ON(!ondev)) |
156 | return; |
157 | |
158 | WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); |
159 | |
160 | /* Try to move the objects to another netdev of the device */ |
161 | if (offdev) { |
162 | list_del(entry: &ondev->offdev_netdevs); |
163 | altdev = list_first_entry_or_null(&offdev->netdevs, |
164 | struct bpf_offload_netdev, |
165 | offdev_netdevs); |
166 | } |
167 | |
168 | if (altdev) { |
169 | list_for_each_entry(offload, &ondev->progs, offloads) |
170 | offload->netdev = altdev->netdev; |
171 | list_splice_init(list: &ondev->progs, head: &altdev->progs); |
172 | |
173 | list_for_each_entry(offmap, &ondev->maps, offloads) |
174 | offmap->netdev = altdev->netdev; |
175 | list_splice_init(list: &ondev->maps, head: &altdev->maps); |
176 | } else { |
177 | list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) |
178 | __bpf_prog_offload_destroy(prog: offload->prog); |
179 | list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) |
180 | __bpf_map_offload_destroy(offmap); |
181 | } |
182 | |
183 | WARN_ON(!list_empty(&ondev->progs)); |
184 | WARN_ON(!list_empty(&ondev->maps)); |
185 | kfree(objp: ondev); |
186 | } |
187 | |
188 | static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev) |
189 | { |
190 | struct bpf_offload_netdev *ondev; |
191 | struct bpf_prog_offload *offload; |
192 | int err; |
193 | |
194 | offload = kzalloc(size: sizeof(*offload), GFP_USER); |
195 | if (!offload) |
196 | return -ENOMEM; |
197 | |
198 | offload->prog = prog; |
199 | offload->netdev = netdev; |
200 | |
201 | ondev = bpf_offload_find_netdev(netdev: offload->netdev); |
202 | /* When program is offloaded require presence of "true" |
203 | * bpf_offload_netdev, avoid the one created for !ondev case below. |
204 | */ |
205 | if (bpf_prog_is_offloaded(aux: prog->aux) && (!ondev || !ondev->offdev)) { |
206 | err = -EINVAL; |
207 | goto err_free; |
208 | } |
209 | if (!ondev) { |
210 | /* When only binding to the device, explicitly |
211 | * create an entry in the hashtable. |
212 | */ |
213 | err = __bpf_offload_dev_netdev_register(NULL, netdev: offload->netdev); |
214 | if (err) |
215 | goto err_free; |
216 | ondev = bpf_offload_find_netdev(netdev: offload->netdev); |
217 | } |
218 | offload->offdev = ondev->offdev; |
219 | prog->aux->offload = offload; |
220 | list_add_tail(new: &offload->offloads, head: &ondev->progs); |
221 | |
222 | return 0; |
223 | err_free: |
224 | kfree(objp: offload); |
225 | return err; |
226 | } |
227 | |
228 | int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) |
229 | { |
230 | struct net_device *netdev; |
231 | int err; |
232 | |
233 | if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && |
234 | attr->prog_type != BPF_PROG_TYPE_XDP) |
235 | return -EINVAL; |
236 | |
237 | if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS)) |
238 | return -EINVAL; |
239 | |
240 | /* Frags are allowed only if program is dev-bound-only, but not |
241 | * if it is requesting bpf offload. |
242 | */ |
243 | if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS && |
244 | !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)) |
245 | return -EINVAL; |
246 | |
247 | if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && |
248 | attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) |
249 | return -EINVAL; |
250 | |
251 | netdev = dev_get_by_index(current->nsproxy->net_ns, ifindex: attr->prog_ifindex); |
252 | if (!netdev) |
253 | return -EINVAL; |
254 | |
255 | err = bpf_dev_offload_check(netdev); |
256 | if (err) |
257 | goto out; |
258 | |
259 | prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); |
260 | |
261 | down_write(sem: &bpf_devs_lock); |
262 | err = __bpf_prog_dev_bound_init(prog, netdev); |
263 | up_write(sem: &bpf_devs_lock); |
264 | |
265 | out: |
266 | dev_put(dev: netdev); |
267 | return err; |
268 | } |
269 | |
270 | int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog) |
271 | { |
272 | int err; |
273 | |
274 | if (!bpf_prog_is_dev_bound(aux: old_prog->aux)) |
275 | return 0; |
276 | |
277 | if (bpf_prog_is_offloaded(aux: old_prog->aux)) |
278 | return -EINVAL; |
279 | |
280 | new_prog->aux->dev_bound = old_prog->aux->dev_bound; |
281 | new_prog->aux->offload_requested = old_prog->aux->offload_requested; |
282 | |
283 | down_write(sem: &bpf_devs_lock); |
284 | if (!old_prog->aux->offload) { |
285 | err = -EINVAL; |
286 | goto out; |
287 | } |
288 | |
289 | err = __bpf_prog_dev_bound_init(prog: new_prog, netdev: old_prog->aux->offload->netdev); |
290 | |
291 | out: |
292 | up_write(sem: &bpf_devs_lock); |
293 | return err; |
294 | } |
295 | |
296 | int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) |
297 | { |
298 | struct bpf_prog_offload *offload; |
299 | int ret = -ENODEV; |
300 | |
301 | down_read(sem: &bpf_devs_lock); |
302 | offload = prog->aux->offload; |
303 | if (offload) { |
304 | ret = offload->offdev->ops->prepare(prog); |
305 | offload->dev_state = !ret; |
306 | } |
307 | up_read(sem: &bpf_devs_lock); |
308 | |
309 | return ret; |
310 | } |
311 | |
312 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
313 | int insn_idx, int prev_insn_idx) |
314 | { |
315 | struct bpf_prog_offload *offload; |
316 | int ret = -ENODEV; |
317 | |
318 | down_read(sem: &bpf_devs_lock); |
319 | offload = env->prog->aux->offload; |
320 | if (offload) |
321 | ret = offload->offdev->ops->insn_hook(env, insn_idx, |
322 | prev_insn_idx); |
323 | up_read(sem: &bpf_devs_lock); |
324 | |
325 | return ret; |
326 | } |
327 | |
328 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env) |
329 | { |
330 | struct bpf_prog_offload *offload; |
331 | int ret = -ENODEV; |
332 | |
333 | down_read(sem: &bpf_devs_lock); |
334 | offload = env->prog->aux->offload; |
335 | if (offload) { |
336 | if (offload->offdev->ops->finalize) |
337 | ret = offload->offdev->ops->finalize(env); |
338 | else |
339 | ret = 0; |
340 | } |
341 | up_read(sem: &bpf_devs_lock); |
342 | |
343 | return ret; |
344 | } |
345 | |
346 | void |
347 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, |
348 | struct bpf_insn *insn) |
349 | { |
350 | const struct bpf_prog_offload_ops *ops; |
351 | struct bpf_prog_offload *offload; |
352 | int ret = -EOPNOTSUPP; |
353 | |
354 | down_read(sem: &bpf_devs_lock); |
355 | offload = env->prog->aux->offload; |
356 | if (offload) { |
357 | ops = offload->offdev->ops; |
358 | if (!offload->opt_failed && ops->replace_insn) |
359 | ret = ops->replace_insn(env, off, insn); |
360 | offload->opt_failed |= ret; |
361 | } |
362 | up_read(sem: &bpf_devs_lock); |
363 | } |
364 | |
365 | void |
366 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) |
367 | { |
368 | struct bpf_prog_offload *offload; |
369 | int ret = -EOPNOTSUPP; |
370 | |
371 | down_read(sem: &bpf_devs_lock); |
372 | offload = env->prog->aux->offload; |
373 | if (offload) { |
374 | if (!offload->opt_failed && offload->offdev->ops->remove_insns) |
375 | ret = offload->offdev->ops->remove_insns(env, off, cnt); |
376 | offload->opt_failed |= ret; |
377 | } |
378 | up_read(sem: &bpf_devs_lock); |
379 | } |
380 | |
381 | void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) |
382 | { |
383 | struct bpf_offload_netdev *ondev; |
384 | struct net_device *netdev; |
385 | |
386 | rtnl_lock(); |
387 | down_write(sem: &bpf_devs_lock); |
388 | if (prog->aux->offload) { |
389 | list_del_init(entry: &prog->aux->offload->offloads); |
390 | |
391 | netdev = prog->aux->offload->netdev; |
392 | __bpf_prog_offload_destroy(prog); |
393 | |
394 | ondev = bpf_offload_find_netdev(netdev); |
395 | if (!ondev->offdev && list_empty(head: &ondev->progs)) |
396 | __bpf_offload_dev_netdev_unregister(NULL, netdev); |
397 | } |
398 | up_write(sem: &bpf_devs_lock); |
399 | rtnl_unlock(); |
400 | } |
401 | |
402 | static int bpf_prog_offload_translate(struct bpf_prog *prog) |
403 | { |
404 | struct bpf_prog_offload *offload; |
405 | int ret = -ENODEV; |
406 | |
407 | down_read(sem: &bpf_devs_lock); |
408 | offload = prog->aux->offload; |
409 | if (offload) |
410 | ret = offload->offdev->ops->translate(prog); |
411 | up_read(sem: &bpf_devs_lock); |
412 | |
413 | return ret; |
414 | } |
415 | |
416 | static unsigned int bpf_prog_warn_on_exec(const void *ctx, |
417 | const struct bpf_insn *insn) |
418 | { |
419 | WARN(1, "attempt to execute device eBPF program on the host!" ); |
420 | return 0; |
421 | } |
422 | |
423 | int bpf_prog_offload_compile(struct bpf_prog *prog) |
424 | { |
425 | prog->bpf_func = bpf_prog_warn_on_exec; |
426 | |
427 | return bpf_prog_offload_translate(prog); |
428 | } |
429 | |
430 | struct ns_get_path_bpf_prog_args { |
431 | struct bpf_prog *prog; |
432 | struct bpf_prog_info *info; |
433 | }; |
434 | |
435 | static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) |
436 | { |
437 | struct ns_get_path_bpf_prog_args *args = private_data; |
438 | struct bpf_prog_aux *aux = args->prog->aux; |
439 | struct ns_common *ns; |
440 | struct net *net; |
441 | |
442 | rtnl_lock(); |
443 | down_read(sem: &bpf_devs_lock); |
444 | |
445 | if (aux->offload) { |
446 | args->info->ifindex = aux->offload->netdev->ifindex; |
447 | net = dev_net(dev: aux->offload->netdev); |
448 | get_net(net); |
449 | ns = &net->ns; |
450 | } else { |
451 | args->info->ifindex = 0; |
452 | ns = NULL; |
453 | } |
454 | |
455 | up_read(sem: &bpf_devs_lock); |
456 | rtnl_unlock(); |
457 | |
458 | return ns; |
459 | } |
460 | |
461 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
462 | struct bpf_prog *prog) |
463 | { |
464 | struct ns_get_path_bpf_prog_args args = { |
465 | .prog = prog, |
466 | .info = info, |
467 | }; |
468 | struct bpf_prog_aux *aux = prog->aux; |
469 | struct inode *ns_inode; |
470 | struct path ns_path; |
471 | char __user *uinsns; |
472 | int res; |
473 | u32 ulen; |
474 | |
475 | res = ns_get_path_cb(path: &ns_path, ns_get_cb: bpf_prog_offload_info_fill_ns, private_data: &args); |
476 | if (res) { |
477 | if (!info->ifindex) |
478 | return -ENODEV; |
479 | return res; |
480 | } |
481 | |
482 | down_read(sem: &bpf_devs_lock); |
483 | |
484 | if (!aux->offload) { |
485 | up_read(sem: &bpf_devs_lock); |
486 | return -ENODEV; |
487 | } |
488 | |
489 | ulen = info->jited_prog_len; |
490 | info->jited_prog_len = aux->offload->jited_len; |
491 | if (info->jited_prog_len && ulen) { |
492 | uinsns = u64_to_user_ptr(info->jited_prog_insns); |
493 | ulen = min_t(u32, info->jited_prog_len, ulen); |
494 | if (copy_to_user(to: uinsns, from: aux->offload->jited_image, n: ulen)) { |
495 | up_read(sem: &bpf_devs_lock); |
496 | return -EFAULT; |
497 | } |
498 | } |
499 | |
500 | up_read(sem: &bpf_devs_lock); |
501 | |
502 | ns_inode = ns_path.dentry->d_inode; |
503 | info->netns_dev = new_encode_dev(dev: ns_inode->i_sb->s_dev); |
504 | info->netns_ino = ns_inode->i_ino; |
505 | path_put(&ns_path); |
506 | |
507 | return 0; |
508 | } |
509 | |
510 | const struct bpf_prog_ops bpf_offload_prog_ops = { |
511 | }; |
512 | |
513 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) |
514 | { |
515 | struct net *net = current->nsproxy->net_ns; |
516 | struct bpf_offload_netdev *ondev; |
517 | struct bpf_offloaded_map *offmap; |
518 | int err; |
519 | |
520 | if (!capable(CAP_SYS_ADMIN)) |
521 | return ERR_PTR(error: -EPERM); |
522 | if (attr->map_type != BPF_MAP_TYPE_ARRAY && |
523 | attr->map_type != BPF_MAP_TYPE_HASH) |
524 | return ERR_PTR(error: -EINVAL); |
525 | |
526 | offmap = bpf_map_area_alloc(size: sizeof(*offmap), NUMA_NO_NODE); |
527 | if (!offmap) |
528 | return ERR_PTR(error: -ENOMEM); |
529 | |
530 | bpf_map_init_from_attr(map: &offmap->map, attr); |
531 | |
532 | rtnl_lock(); |
533 | down_write(sem: &bpf_devs_lock); |
534 | offmap->netdev = __dev_get_by_index(net, ifindex: attr->map_ifindex); |
535 | err = bpf_dev_offload_check(netdev: offmap->netdev); |
536 | if (err) |
537 | goto err_unlock; |
538 | |
539 | ondev = bpf_offload_find_netdev(netdev: offmap->netdev); |
540 | if (!ondev) { |
541 | err = -EINVAL; |
542 | goto err_unlock; |
543 | } |
544 | |
545 | err = bpf_map_offload_ndo(offmap, cmd: BPF_OFFLOAD_MAP_ALLOC); |
546 | if (err) |
547 | goto err_unlock; |
548 | |
549 | list_add_tail(new: &offmap->offloads, head: &ondev->maps); |
550 | up_write(sem: &bpf_devs_lock); |
551 | rtnl_unlock(); |
552 | |
553 | return &offmap->map; |
554 | |
555 | err_unlock: |
556 | up_write(sem: &bpf_devs_lock); |
557 | rtnl_unlock(); |
558 | bpf_map_area_free(base: offmap); |
559 | return ERR_PTR(error: err); |
560 | } |
561 | |
562 | void bpf_map_offload_map_free(struct bpf_map *map) |
563 | { |
564 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
565 | |
566 | rtnl_lock(); |
567 | down_write(sem: &bpf_devs_lock); |
568 | if (offmap->netdev) |
569 | __bpf_map_offload_destroy(offmap); |
570 | up_write(sem: &bpf_devs_lock); |
571 | rtnl_unlock(); |
572 | |
573 | bpf_map_area_free(base: offmap); |
574 | } |
575 | |
576 | u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) |
577 | { |
578 | /* The memory dynamically allocated in netdev dev_ops is not counted */ |
579 | return sizeof(struct bpf_offloaded_map); |
580 | } |
581 | |
582 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) |
583 | { |
584 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
585 | int ret = -ENODEV; |
586 | |
587 | down_read(sem: &bpf_devs_lock); |
588 | if (offmap->netdev) |
589 | ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); |
590 | up_read(sem: &bpf_devs_lock); |
591 | |
592 | return ret; |
593 | } |
594 | |
595 | int bpf_map_offload_update_elem(struct bpf_map *map, |
596 | void *key, void *value, u64 flags) |
597 | { |
598 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
599 | int ret = -ENODEV; |
600 | |
601 | if (unlikely(flags > BPF_EXIST)) |
602 | return -EINVAL; |
603 | |
604 | down_read(sem: &bpf_devs_lock); |
605 | if (offmap->netdev) |
606 | ret = offmap->dev_ops->map_update_elem(offmap, key, value, |
607 | flags); |
608 | up_read(sem: &bpf_devs_lock); |
609 | |
610 | return ret; |
611 | } |
612 | |
613 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) |
614 | { |
615 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
616 | int ret = -ENODEV; |
617 | |
618 | down_read(sem: &bpf_devs_lock); |
619 | if (offmap->netdev) |
620 | ret = offmap->dev_ops->map_delete_elem(offmap, key); |
621 | up_read(sem: &bpf_devs_lock); |
622 | |
623 | return ret; |
624 | } |
625 | |
626 | int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) |
627 | { |
628 | struct bpf_offloaded_map *offmap = map_to_offmap(map); |
629 | int ret = -ENODEV; |
630 | |
631 | down_read(sem: &bpf_devs_lock); |
632 | if (offmap->netdev) |
633 | ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); |
634 | up_read(sem: &bpf_devs_lock); |
635 | |
636 | return ret; |
637 | } |
638 | |
639 | struct ns_get_path_bpf_map_args { |
640 | struct bpf_offloaded_map *offmap; |
641 | struct bpf_map_info *info; |
642 | }; |
643 | |
644 | static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) |
645 | { |
646 | struct ns_get_path_bpf_map_args *args = private_data; |
647 | struct ns_common *ns; |
648 | struct net *net; |
649 | |
650 | rtnl_lock(); |
651 | down_read(sem: &bpf_devs_lock); |
652 | |
653 | if (args->offmap->netdev) { |
654 | args->info->ifindex = args->offmap->netdev->ifindex; |
655 | net = dev_net(dev: args->offmap->netdev); |
656 | get_net(net); |
657 | ns = &net->ns; |
658 | } else { |
659 | args->info->ifindex = 0; |
660 | ns = NULL; |
661 | } |
662 | |
663 | up_read(sem: &bpf_devs_lock); |
664 | rtnl_unlock(); |
665 | |
666 | return ns; |
667 | } |
668 | |
669 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) |
670 | { |
671 | struct ns_get_path_bpf_map_args args = { |
672 | .offmap = map_to_offmap(map), |
673 | .info = info, |
674 | }; |
675 | struct inode *ns_inode; |
676 | struct path ns_path; |
677 | int res; |
678 | |
679 | res = ns_get_path_cb(path: &ns_path, ns_get_cb: bpf_map_offload_info_fill_ns, private_data: &args); |
680 | if (res) { |
681 | if (!info->ifindex) |
682 | return -ENODEV; |
683 | return res; |
684 | } |
685 | |
686 | ns_inode = ns_path.dentry->d_inode; |
687 | info->netns_dev = new_encode_dev(dev: ns_inode->i_sb->s_dev); |
688 | info->netns_ino = ns_inode->i_ino; |
689 | path_put(&ns_path); |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static bool __bpf_offload_dev_match(struct bpf_prog *prog, |
695 | struct net_device *netdev) |
696 | { |
697 | struct bpf_offload_netdev *ondev1, *ondev2; |
698 | struct bpf_prog_offload *offload; |
699 | |
700 | if (!bpf_prog_is_dev_bound(aux: prog->aux)) |
701 | return false; |
702 | |
703 | offload = prog->aux->offload; |
704 | if (!offload) |
705 | return false; |
706 | if (offload->netdev == netdev) |
707 | return true; |
708 | |
709 | ondev1 = bpf_offload_find_netdev(netdev: offload->netdev); |
710 | ondev2 = bpf_offload_find_netdev(netdev); |
711 | |
712 | return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; |
713 | } |
714 | |
715 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) |
716 | { |
717 | bool ret; |
718 | |
719 | down_read(sem: &bpf_devs_lock); |
720 | ret = __bpf_offload_dev_match(prog, netdev); |
721 | up_read(sem: &bpf_devs_lock); |
722 | |
723 | return ret; |
724 | } |
725 | EXPORT_SYMBOL_GPL(bpf_offload_dev_match); |
726 | |
727 | bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) |
728 | { |
729 | bool ret; |
730 | |
731 | if (bpf_prog_is_offloaded(aux: lhs->aux) != bpf_prog_is_offloaded(aux: rhs->aux)) |
732 | return false; |
733 | |
734 | down_read(sem: &bpf_devs_lock); |
735 | ret = lhs->aux->offload && rhs->aux->offload && |
736 | lhs->aux->offload->netdev && |
737 | lhs->aux->offload->netdev == rhs->aux->offload->netdev; |
738 | up_read(sem: &bpf_devs_lock); |
739 | |
740 | return ret; |
741 | } |
742 | |
743 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) |
744 | { |
745 | struct bpf_offloaded_map *offmap; |
746 | bool ret; |
747 | |
748 | if (!bpf_map_is_offloaded(map)) |
749 | return bpf_map_offload_neutral(map); |
750 | offmap = map_to_offmap(map); |
751 | |
752 | down_read(sem: &bpf_devs_lock); |
753 | ret = __bpf_offload_dev_match(prog, netdev: offmap->netdev); |
754 | up_read(sem: &bpf_devs_lock); |
755 | |
756 | return ret; |
757 | } |
758 | |
759 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
760 | struct net_device *netdev) |
761 | { |
762 | int err; |
763 | |
764 | down_write(sem: &bpf_devs_lock); |
765 | err = __bpf_offload_dev_netdev_register(offdev, netdev); |
766 | up_write(sem: &bpf_devs_lock); |
767 | return err; |
768 | } |
769 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); |
770 | |
771 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, |
772 | struct net_device *netdev) |
773 | { |
774 | down_write(sem: &bpf_devs_lock); |
775 | __bpf_offload_dev_netdev_unregister(offdev, netdev); |
776 | up_write(sem: &bpf_devs_lock); |
777 | } |
778 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); |
779 | |
780 | struct bpf_offload_dev * |
781 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) |
782 | { |
783 | struct bpf_offload_dev *offdev; |
784 | |
785 | offdev = kzalloc(size: sizeof(*offdev), GFP_KERNEL); |
786 | if (!offdev) |
787 | return ERR_PTR(error: -ENOMEM); |
788 | |
789 | offdev->ops = ops; |
790 | offdev->priv = priv; |
791 | INIT_LIST_HEAD(list: &offdev->netdevs); |
792 | |
793 | return offdev; |
794 | } |
795 | EXPORT_SYMBOL_GPL(bpf_offload_dev_create); |
796 | |
797 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) |
798 | { |
799 | WARN_ON(!list_empty(&offdev->netdevs)); |
800 | kfree(objp: offdev); |
801 | } |
802 | EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); |
803 | |
804 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) |
805 | { |
806 | return offdev->priv; |
807 | } |
808 | EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); |
809 | |
810 | void bpf_dev_bound_netdev_unregister(struct net_device *dev) |
811 | { |
812 | struct bpf_offload_netdev *ondev; |
813 | |
814 | ASSERT_RTNL(); |
815 | |
816 | down_write(sem: &bpf_devs_lock); |
817 | ondev = bpf_offload_find_netdev(netdev: dev); |
818 | if (ondev && !ondev->offdev) |
819 | __bpf_offload_dev_netdev_unregister(NULL, netdev: ondev->netdev); |
820 | up_write(sem: &bpf_devs_lock); |
821 | } |
822 | |
823 | int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
824 | struct bpf_prog_aux *prog_aux) |
825 | { |
826 | if (!bpf_prog_is_dev_bound(aux: prog_aux)) { |
827 | bpf_log(log, fmt: "metadata kfuncs require device-bound program\n" ); |
828 | return -EINVAL; |
829 | } |
830 | |
831 | if (bpf_prog_is_offloaded(aux: prog_aux)) { |
832 | bpf_log(log, fmt: "metadata kfuncs can't be offloaded\n" ); |
833 | return -EINVAL; |
834 | } |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id) |
840 | { |
841 | const struct xdp_metadata_ops *ops; |
842 | void *p = NULL; |
843 | |
844 | /* We don't hold bpf_devs_lock while resolving several |
845 | * kfuncs and can race with the unregister_netdevice(). |
846 | * We rely on bpf_dev_bound_match() check at attach |
847 | * to render this program unusable. |
848 | */ |
849 | down_read(sem: &bpf_devs_lock); |
850 | if (!prog->aux->offload) |
851 | goto out; |
852 | |
853 | ops = prog->aux->offload->netdev->xdp_metadata_ops; |
854 | if (!ops) |
855 | goto out; |
856 | |
857 | #define XDP_METADATA_KFUNC(name, _, __, xmo) \ |
858 | if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo; |
859 | XDP_METADATA_KFUNC_xxx |
860 | #undef XDP_METADATA_KFUNC |
861 | |
862 | out: |
863 | up_read(sem: &bpf_devs_lock); |
864 | |
865 | return p; |
866 | } |
867 | |
868 | static int __init bpf_offload_init(void) |
869 | { |
870 | return rhashtable_init(ht: &offdevs, params: &offdevs_params); |
871 | } |
872 | |
873 | core_initcall(bpf_offload_init); |
874 | |