1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Berkeley Packet Filter based traffic classifier
4 *
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
7 * ematches.
8 *
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/skbuff.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17#include <linux/idr.h>
18
19#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22#include <net/tc_wrapper.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
26MODULE_DESCRIPTION("TC BPF based classifier");
27
28#define CLS_BPF_NAME_LEN 256
29#define CLS_BPF_SUPPORTED_GEN_FLAGS \
30 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
31
32struct cls_bpf_head {
33 struct list_head plist;
34 struct idr handle_idr;
35 struct rcu_head rcu;
36};
37
38struct cls_bpf_prog {
39 struct bpf_prog *filter;
40 struct list_head link;
41 struct tcf_result res;
42 bool exts_integrated;
43 u32 gen_flags;
44 unsigned int in_hw_count;
45 struct tcf_exts exts;
46 u32 handle;
47 u16 bpf_num_ops;
48 struct sock_filter *bpf_ops;
49 const char *bpf_name;
50 struct tcf_proto *tp;
51 struct rcu_work rwork;
52};
53
54static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
58 [TCA_BPF_FD] = { .type = NLA_U32 },
59 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
60 .len = CLS_BPF_NAME_LEN },
61 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
62 [TCA_BPF_OPS] = { .type = NLA_BINARY,
63 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
64};
65
66static int cls_bpf_exec_opcode(int code)
67{
68 switch (code) {
69 case TC_ACT_OK:
70 case TC_ACT_SHOT:
71 case TC_ACT_STOLEN:
72 case TC_ACT_TRAP:
73 case TC_ACT_REDIRECT:
74 case TC_ACT_UNSPEC:
75 return code;
76 default:
77 return TC_ACT_UNSPEC;
78 }
79}
80
81TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
82 const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 bool at_ingress = skb_at_tc_ingress(skb);
87 struct cls_bpf_prog *prog;
88 int ret = -1;
89
90 list_for_each_entry_rcu(prog, &head->plist, link) {
91 int filter_res;
92
93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94
95 if (tc_skip_sw(flags: prog->gen_flags)) {
96 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 } else if (at_ingress) {
98 /* It is safe to push/pull even if skb_shared() */
99 __skb_push(skb, len: skb->mac_len);
100 bpf_compute_data_pointers(skb);
101 filter_res = bpf_prog_run(prog: prog->filter, ctx: skb);
102 __skb_pull(skb, len: skb->mac_len);
103 } else {
104 bpf_compute_data_pointers(skb);
105 filter_res = bpf_prog_run(prog: prog->filter, ctx: skb);
106 }
107 if (unlikely(!skb->tstamp && skb->mono_delivery_time))
108 skb->mono_delivery_time = 0;
109
110 if (prog->exts_integrated) {
111 res->class = 0;
112 res->classid = TC_H_MAJ(prog->res.classid) |
113 qdisc_skb_cb(skb)->tc_classid;
114
115 ret = cls_bpf_exec_opcode(code: filter_res);
116 if (ret == TC_ACT_UNSPEC)
117 continue;
118 break;
119 }
120
121 if (filter_res == 0)
122 continue;
123 if (filter_res != -1) {
124 res->class = 0;
125 res->classid = filter_res;
126 } else {
127 *res = prog->res;
128 }
129
130 ret = tcf_exts_exec(skb, exts: &prog->exts, res);
131 if (ret < 0)
132 continue;
133
134 break;
135 }
136
137 return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142 return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 struct cls_bpf_prog *oldprog,
147 struct netlink_ext_ack *extack)
148{
149 struct tcf_block *block = tp->chain->block;
150 struct tc_cls_bpf_offload cls_bpf = {};
151 struct cls_bpf_prog *obj;
152 bool skip_sw;
153 int err;
154
155 skip_sw = prog && tc_skip_sw(flags: prog->gen_flags);
156 obj = prog ?: oldprog;
157
158 tc_cls_common_offload_init(cls_common: &cls_bpf.common, tp, flags: obj->gen_flags, extack);
159 cls_bpf.command = TC_CLSBPF_OFFLOAD;
160 cls_bpf.exts = &obj->exts;
161 cls_bpf.prog = prog ? prog->filter : NULL;
162 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
163 cls_bpf.name = obj->bpf_name;
164 cls_bpf.exts_integrated = obj->exts_integrated;
165
166 if (oldprog && prog)
167 err = tc_setup_cb_replace(block, tp, type: TC_SETUP_CLSBPF, type_data: &cls_bpf,
168 err_stop: skip_sw, old_flags: &oldprog->gen_flags,
169 old_in_hw_count: &oldprog->in_hw_count,
170 new_flags: &prog->gen_flags, new_in_hw_count: &prog->in_hw_count,
171 rtnl_held: true);
172 else if (prog)
173 err = tc_setup_cb_add(block, tp, type: TC_SETUP_CLSBPF, type_data: &cls_bpf,
174 err_stop: skip_sw, flags: &prog->gen_flags,
175 in_hw_count: &prog->in_hw_count, rtnl_held: true);
176 else
177 err = tc_setup_cb_destroy(block, tp, type: TC_SETUP_CLSBPF, type_data: &cls_bpf,
178 err_stop: skip_sw, flags: &oldprog->gen_flags,
179 in_hw_count: &oldprog->in_hw_count, rtnl_held: true);
180
181 if (prog && err) {
182 cls_bpf_offload_cmd(tp, prog: oldprog, oldprog: prog, extack);
183 return err;
184 }
185
186 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
187 return -EINVAL;
188
189 return 0;
190}
191
192static u32 cls_bpf_flags(u32 flags)
193{
194 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
195}
196
197static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
198 struct cls_bpf_prog *oldprog,
199 struct netlink_ext_ack *extack)
200{
201 if (prog && oldprog &&
202 cls_bpf_flags(flags: prog->gen_flags) !=
203 cls_bpf_flags(flags: oldprog->gen_flags))
204 return -EINVAL;
205
206 if (prog && tc_skip_hw(flags: prog->gen_flags))
207 prog = NULL;
208 if (oldprog && tc_skip_hw(flags: oldprog->gen_flags))
209 oldprog = NULL;
210 if (!prog && !oldprog)
211 return 0;
212
213 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
214}
215
216static void cls_bpf_stop_offload(struct tcf_proto *tp,
217 struct cls_bpf_prog *prog,
218 struct netlink_ext_ack *extack)
219{
220 int err;
221
222 err = cls_bpf_offload_cmd(tp, NULL, oldprog: prog, extack);
223 if (err)
224 pr_err("Stopping hardware offload failed: %d\n", err);
225}
226
227static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
228 struct cls_bpf_prog *prog)
229{
230 struct tcf_block *block = tp->chain->block;
231 struct tc_cls_bpf_offload cls_bpf = {};
232
233 tc_cls_common_offload_init(cls_common: &cls_bpf.common, tp, flags: prog->gen_flags, NULL);
234 cls_bpf.command = TC_CLSBPF_STATS;
235 cls_bpf.exts = &prog->exts;
236 cls_bpf.prog = prog->filter;
237 cls_bpf.name = prog->bpf_name;
238 cls_bpf.exts_integrated = prog->exts_integrated;
239
240 tc_setup_cb_call(block, type: TC_SETUP_CLSBPF, type_data: &cls_bpf, err_stop: false, rtnl_held: true);
241}
242
243static int cls_bpf_init(struct tcf_proto *tp)
244{
245 struct cls_bpf_head *head;
246
247 head = kzalloc(size: sizeof(*head), GFP_KERNEL);
248 if (head == NULL)
249 return -ENOBUFS;
250
251 INIT_LIST_HEAD_RCU(list: &head->plist);
252 idr_init(idr: &head->handle_idr);
253 rcu_assign_pointer(tp->root, head);
254
255 return 0;
256}
257
258static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
259{
260 if (cls_bpf_is_ebpf(prog))
261 bpf_prog_put(prog: prog->filter);
262 else
263 bpf_prog_destroy(fp: prog->filter);
264
265 kfree(objp: prog->bpf_name);
266 kfree(objp: prog->bpf_ops);
267}
268
269static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270{
271 tcf_exts_destroy(exts: &prog->exts);
272 tcf_exts_put_net(exts: &prog->exts);
273
274 cls_bpf_free_parms(prog);
275 kfree(objp: prog);
276}
277
278static void cls_bpf_delete_prog_work(struct work_struct *work)
279{
280 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281 struct cls_bpf_prog,
282 rwork);
283 rtnl_lock();
284 __cls_bpf_delete_prog(prog);
285 rtnl_unlock();
286}
287
288static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
289 struct netlink_ext_ack *extack)
290{
291 struct cls_bpf_head *head = rtnl_dereference(tp->root);
292
293 idr_remove(&head->handle_idr, id: prog->handle);
294 cls_bpf_stop_offload(tp, prog, extack);
295 list_del_rcu(entry: &prog->link);
296 tcf_unbind_filter(tp, r: &prog->res);
297 if (tcf_exts_get_net(exts: &prog->exts))
298 tcf_queue_work(rwork: &prog->rwork, func: cls_bpf_delete_prog_work);
299 else
300 __cls_bpf_delete_prog(prog);
301}
302
303static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
304 bool rtnl_held, struct netlink_ext_ack *extack)
305{
306 struct cls_bpf_head *head = rtnl_dereference(tp->root);
307
308 __cls_bpf_delete(tp, prog: arg, extack);
309 *last = list_empty(head: &head->plist);
310 return 0;
311}
312
313static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
314 struct netlink_ext_ack *extack)
315{
316 struct cls_bpf_head *head = rtnl_dereference(tp->root);
317 struct cls_bpf_prog *prog, *tmp;
318
319 list_for_each_entry_safe(prog, tmp, &head->plist, link)
320 __cls_bpf_delete(tp, prog, extack);
321
322 idr_destroy(&head->handle_idr);
323 kfree_rcu(head, rcu);
324}
325
326static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
327{
328 struct cls_bpf_head *head = rtnl_dereference(tp->root);
329 struct cls_bpf_prog *prog;
330
331 list_for_each_entry(prog, &head->plist, link) {
332 if (prog->handle == handle)
333 return prog;
334 }
335
336 return NULL;
337}
338
339static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
340{
341 struct sock_filter *bpf_ops;
342 struct sock_fprog_kern fprog_tmp;
343 struct bpf_prog *fp;
344 u16 bpf_size, bpf_num_ops;
345 int ret;
346
347 bpf_num_ops = nla_get_u16(nla: tb[TCA_BPF_OPS_LEN]);
348 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
349 return -EINVAL;
350
351 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
352 if (bpf_size != nla_len(nla: tb[TCA_BPF_OPS]))
353 return -EINVAL;
354
355 bpf_ops = kmemdup(p: nla_data(nla: tb[TCA_BPF_OPS]), size: bpf_size, GFP_KERNEL);
356 if (bpf_ops == NULL)
357 return -ENOMEM;
358
359 fprog_tmp.len = bpf_num_ops;
360 fprog_tmp.filter = bpf_ops;
361
362 ret = bpf_prog_create(pfp: &fp, fprog: &fprog_tmp);
363 if (ret < 0) {
364 kfree(objp: bpf_ops);
365 return ret;
366 }
367
368 prog->bpf_ops = bpf_ops;
369 prog->bpf_num_ops = bpf_num_ops;
370 prog->bpf_name = NULL;
371 prog->filter = fp;
372
373 return 0;
374}
375
376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377 u32 gen_flags, const struct tcf_proto *tp)
378{
379 struct bpf_prog *fp;
380 char *name = NULL;
381 bool skip_sw;
382 u32 bpf_fd;
383
384 bpf_fd = nla_get_u32(nla: tb[TCA_BPF_FD]);
385 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
386
387 fp = bpf_prog_get_type_dev(ufd: bpf_fd, type: BPF_PROG_TYPE_SCHED_CLS, attach_drv: skip_sw);
388 if (IS_ERR(ptr: fp))
389 return PTR_ERR(ptr: fp);
390
391 if (tb[TCA_BPF_NAME]) {
392 name = nla_memdup(src: tb[TCA_BPF_NAME], GFP_KERNEL);
393 if (!name) {
394 bpf_prog_put(prog: fp);
395 return -ENOMEM;
396 }
397 }
398
399 prog->bpf_ops = NULL;
400 prog->bpf_name = name;
401 prog->filter = fp;
402
403 if (fp->dst_needed)
404 tcf_block_netif_keep_dst(block: tp->chain->block);
405
406 return 0;
407}
408
409static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
410 struct tcf_proto *tp, unsigned long base,
411 u32 handle, struct nlattr **tca,
412 void **arg, u32 flags,
413 struct netlink_ext_ack *extack)
414{
415 struct cls_bpf_head *head = rtnl_dereference(tp->root);
416 bool is_bpf, is_ebpf, have_exts = false;
417 struct cls_bpf_prog *oldprog = *arg;
418 struct nlattr *tb[TCA_BPF_MAX + 1];
419 bool bound_to_filter = false;
420 struct cls_bpf_prog *prog;
421 u32 gen_flags = 0;
422 int ret;
423
424 if (tca[TCA_OPTIONS] == NULL)
425 return -EINVAL;
426
427 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, nla: tca[TCA_OPTIONS],
428 policy: bpf_policy, NULL);
429 if (ret < 0)
430 return ret;
431
432 prog = kzalloc(size: sizeof(*prog), GFP_KERNEL);
433 if (!prog)
434 return -ENOBUFS;
435
436 ret = tcf_exts_init(exts: &prog->exts, net, action: TCA_BPF_ACT, police: TCA_BPF_POLICE);
437 if (ret < 0)
438 goto errout;
439
440 if (oldprog) {
441 if (handle && oldprog->handle != handle) {
442 ret = -EINVAL;
443 goto errout;
444 }
445 }
446
447 if (handle == 0) {
448 handle = 1;
449 ret = idr_alloc_u32(&head->handle_idr, ptr: prog, id: &handle,
450 INT_MAX, GFP_KERNEL);
451 } else if (!oldprog) {
452 ret = idr_alloc_u32(&head->handle_idr, ptr: prog, id: &handle,
453 max: handle, GFP_KERNEL);
454 }
455
456 if (ret)
457 goto errout;
458 prog->handle = handle;
459
460 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
461 is_ebpf = tb[TCA_BPF_FD];
462 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
463 ret = -EINVAL;
464 goto errout_idr;
465 }
466
467 ret = tcf_exts_validate(net, tp, tb, rate_tlv: tca[TCA_RATE], exts: &prog->exts,
468 flags, extack);
469 if (ret < 0)
470 goto errout_idr;
471
472 if (tb[TCA_BPF_FLAGS]) {
473 u32 bpf_flags = nla_get_u32(nla: tb[TCA_BPF_FLAGS]);
474
475 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
476 ret = -EINVAL;
477 goto errout_idr;
478 }
479
480 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
481 }
482 if (tb[TCA_BPF_FLAGS_GEN]) {
483 gen_flags = nla_get_u32(nla: tb[TCA_BPF_FLAGS_GEN]);
484 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
485 !tc_flags_valid(flags: gen_flags)) {
486 ret = -EINVAL;
487 goto errout_idr;
488 }
489 }
490
491 prog->exts_integrated = have_exts;
492 prog->gen_flags = gen_flags;
493
494 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
495 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
496 if (ret < 0)
497 goto errout_idr;
498
499 if (tb[TCA_BPF_CLASSID]) {
500 prog->res.classid = nla_get_u32(nla: tb[TCA_BPF_CLASSID]);
501 tcf_bind_filter(tp, r: &prog->res, base);
502 bound_to_filter = true;
503 }
504
505 ret = cls_bpf_offload(tp, prog, oldprog, extack);
506 if (ret)
507 goto errout_parms;
508
509 if (!tc_in_hw(flags: prog->gen_flags))
510 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
511
512 if (oldprog) {
513 idr_replace(&head->handle_idr, prog, id: handle);
514 list_replace_rcu(old: &oldprog->link, new: &prog->link);
515 tcf_unbind_filter(tp, r: &oldprog->res);
516 tcf_exts_get_net(exts: &oldprog->exts);
517 tcf_queue_work(rwork: &oldprog->rwork, func: cls_bpf_delete_prog_work);
518 } else {
519 list_add_rcu(new: &prog->link, head: &head->plist);
520 }
521
522 *arg = prog;
523 return 0;
524
525errout_parms:
526 if (bound_to_filter)
527 tcf_unbind_filter(tp, r: &prog->res);
528 cls_bpf_free_parms(prog);
529errout_idr:
530 if (!oldprog)
531 idr_remove(&head->handle_idr, id: prog->handle);
532errout:
533 tcf_exts_destroy(exts: &prog->exts);
534 kfree(objp: prog);
535 return ret;
536}
537
538static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
539 struct sk_buff *skb)
540{
541 struct nlattr *nla;
542
543 if (nla_put_u16(skb, attrtype: TCA_BPF_OPS_LEN, value: prog->bpf_num_ops))
544 return -EMSGSIZE;
545
546 nla = nla_reserve(skb, attrtype: TCA_BPF_OPS, attrlen: prog->bpf_num_ops *
547 sizeof(struct sock_filter));
548 if (nla == NULL)
549 return -EMSGSIZE;
550
551 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
552
553 return 0;
554}
555
556static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
557 struct sk_buff *skb)
558{
559 struct nlattr *nla;
560
561 if (prog->bpf_name &&
562 nla_put_string(skb, attrtype: TCA_BPF_NAME, str: prog->bpf_name))
563 return -EMSGSIZE;
564
565 if (nla_put_u32(skb, attrtype: TCA_BPF_ID, value: prog->filter->aux->id))
566 return -EMSGSIZE;
567
568 nla = nla_reserve(skb, attrtype: TCA_BPF_TAG, attrlen: sizeof(prog->filter->tag));
569 if (nla == NULL)
570 return -EMSGSIZE;
571
572 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
573
574 return 0;
575}
576
577static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
578 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
579{
580 struct cls_bpf_prog *prog = fh;
581 struct nlattr *nest;
582 u32 bpf_flags = 0;
583 int ret;
584
585 if (prog == NULL)
586 return skb->len;
587
588 tm->tcm_handle = prog->handle;
589
590 cls_bpf_offload_update_stats(tp, prog);
591
592 nest = nla_nest_start_noflag(skb, attrtype: TCA_OPTIONS);
593 if (nest == NULL)
594 goto nla_put_failure;
595
596 if (prog->res.classid &&
597 nla_put_u32(skb, attrtype: TCA_BPF_CLASSID, value: prog->res.classid))
598 goto nla_put_failure;
599
600 if (cls_bpf_is_ebpf(prog))
601 ret = cls_bpf_dump_ebpf_info(prog, skb);
602 else
603 ret = cls_bpf_dump_bpf_info(prog, skb);
604 if (ret)
605 goto nla_put_failure;
606
607 if (tcf_exts_dump(skb, exts: &prog->exts) < 0)
608 goto nla_put_failure;
609
610 if (prog->exts_integrated)
611 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
612 if (bpf_flags && nla_put_u32(skb, attrtype: TCA_BPF_FLAGS, value: bpf_flags))
613 goto nla_put_failure;
614 if (prog->gen_flags &&
615 nla_put_u32(skb, attrtype: TCA_BPF_FLAGS_GEN, value: prog->gen_flags))
616 goto nla_put_failure;
617
618 nla_nest_end(skb, start: nest);
619
620 if (tcf_exts_dump_stats(skb, exts: &prog->exts) < 0)
621 goto nla_put_failure;
622
623 return skb->len;
624
625nla_put_failure:
626 nla_nest_cancel(skb, start: nest);
627 return -1;
628}
629
630static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
631 void *q, unsigned long base)
632{
633 struct cls_bpf_prog *prog = fh;
634
635 tc_cls_bind_class(classid, cl, q, res: &prog->res, base);
636}
637
638static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
639 bool rtnl_held)
640{
641 struct cls_bpf_head *head = rtnl_dereference(tp->root);
642 struct cls_bpf_prog *prog;
643
644 list_for_each_entry(prog, &head->plist, link) {
645 if (!tc_cls_stats_dump(tp, arg, filter: prog))
646 break;
647 }
648}
649
650static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
651 void *cb_priv, struct netlink_ext_ack *extack)
652{
653 struct cls_bpf_head *head = rtnl_dereference(tp->root);
654 struct tcf_block *block = tp->chain->block;
655 struct tc_cls_bpf_offload cls_bpf = {};
656 struct cls_bpf_prog *prog;
657 int err;
658
659 list_for_each_entry(prog, &head->plist, link) {
660 if (tc_skip_hw(flags: prog->gen_flags))
661 continue;
662
663 tc_cls_common_offload_init(cls_common: &cls_bpf.common, tp, flags: prog->gen_flags,
664 extack);
665 cls_bpf.command = TC_CLSBPF_OFFLOAD;
666 cls_bpf.exts = &prog->exts;
667 cls_bpf.prog = add ? prog->filter : NULL;
668 cls_bpf.oldprog = add ? NULL : prog->filter;
669 cls_bpf.name = prog->bpf_name;
670 cls_bpf.exts_integrated = prog->exts_integrated;
671
672 err = tc_setup_cb_reoffload(block, tp, add, cb, type: TC_SETUP_CLSBPF,
673 type_data: &cls_bpf, cb_priv, flags: &prog->gen_flags,
674 in_hw_count: &prog->in_hw_count);
675 if (err)
676 return err;
677 }
678
679 return 0;
680}
681
682static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
683 .kind = "bpf",
684 .owner = THIS_MODULE,
685 .classify = cls_bpf_classify,
686 .init = cls_bpf_init,
687 .destroy = cls_bpf_destroy,
688 .get = cls_bpf_get,
689 .change = cls_bpf_change,
690 .delete = cls_bpf_delete,
691 .walk = cls_bpf_walk,
692 .reoffload = cls_bpf_reoffload,
693 .dump = cls_bpf_dump,
694 .bind_class = cls_bpf_bind_class,
695};
696
697static int __init cls_bpf_init_mod(void)
698{
699 return register_tcf_proto_ops(ops: &cls_bpf_ops);
700}
701
702static void __exit cls_bpf_exit_mod(void)
703{
704 unregister_tcf_proto_ops(ops: &cls_bpf_ops);
705}
706
707module_init(cls_bpf_init_mod);
708module_exit(cls_bpf_exit_mod);
709

source code of linux/net/sched/cls_bpf.c