1// SPDX-License-Identifier: GPL-2.0-or-later
2/* net/sched/sch_ingress.c - Ingress and clsact qdisc
3 *
4 * Authors: Jamal Hadi Salim 1999
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/list.h>
10#include <linux/skbuff.h>
11#include <linux/rtnetlink.h>
12
13#include <net/netlink.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16#include <net/tcx.h>
17
18struct ingress_sched_data {
19 struct tcf_block *block;
20 struct tcf_block_ext_info block_info;
21 struct mini_Qdisc_pair miniqp;
22};
23
24static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
25{
26 return NULL;
27}
28
29static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
30{
31 return TC_H_MIN(classid) + 1;
32}
33
34static unsigned long ingress_bind_filter(struct Qdisc *sch,
35 unsigned long parent, u32 classid)
36{
37 return ingress_find(sch, classid);
38}
39
40static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
41{
42}
43
44static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
45{
46}
47
48static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
49 struct netlink_ext_ack *extack)
50{
51 struct ingress_sched_data *q = qdisc_priv(sch);
52
53 return q->block;
54}
55
56static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
57{
58 struct mini_Qdisc_pair *miniqp = priv;
59
60 mini_qdisc_pair_swap(miniqp, tp_head);
61};
62
63static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
64{
65 struct ingress_sched_data *q = qdisc_priv(sch);
66
67 q->block_info.block_index = block_index;
68}
69
70static u32 ingress_ingress_block_get(struct Qdisc *sch)
71{
72 struct ingress_sched_data *q = qdisc_priv(sch);
73
74 return q->block_info.block_index;
75}
76
77static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
78 struct netlink_ext_ack *extack)
79{
80 struct ingress_sched_data *q = qdisc_priv(sch);
81 struct net_device *dev = qdisc_dev(qdisc: sch);
82 struct bpf_mprog_entry *entry;
83 bool created;
84 int err;
85
86 if (sch->parent != TC_H_INGRESS)
87 return -EOPNOTSUPP;
88
89 net_inc_ingress_queue();
90
91 entry = tcx_entry_fetch_or_create(dev, ingress: true, created: &created);
92 if (!entry)
93 return -ENOMEM;
94 tcx_miniq_set_active(entry, active: true);
95 mini_qdisc_pair_init(miniqp: &q->miniqp, qdisc: sch, p_miniq: &tcx_entry(entry)->miniq);
96 if (created)
97 tcx_entry_update(dev, entry, ingress: true);
98
99 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
100 q->block_info.chain_head_change = clsact_chain_head_change;
101 q->block_info.chain_head_change_priv = &q->miniqp;
102
103 err = tcf_block_get_ext(p_block: &q->block, q: sch, ei: &q->block_info, extack);
104 if (err)
105 return err;
106
107 mini_qdisc_pair_block_init(miniqp: &q->miniqp, block: q->block);
108
109 return 0;
110}
111
112static void ingress_destroy(struct Qdisc *sch)
113{
114 struct ingress_sched_data *q = qdisc_priv(sch);
115 struct net_device *dev = qdisc_dev(qdisc: sch);
116 struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress);
117
118 if (sch->parent != TC_H_INGRESS)
119 return;
120
121 tcf_block_put_ext(block: q->block, q: sch, ei: &q->block_info);
122
123 if (entry) {
124 tcx_miniq_set_active(entry, active: false);
125 if (!tcx_entry_is_active(entry)) {
126 tcx_entry_update(dev, NULL, ingress: true);
127 tcx_entry_free(entry);
128 }
129 }
130
131 net_dec_ingress_queue();
132}
133
134static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
135{
136 struct nlattr *nest;
137
138 nest = nla_nest_start_noflag(skb, attrtype: TCA_OPTIONS);
139 if (nest == NULL)
140 goto nla_put_failure;
141
142 return nla_nest_end(skb, start: nest);
143
144nla_put_failure:
145 nla_nest_cancel(skb, start: nest);
146 return -1;
147}
148
149static const struct Qdisc_class_ops ingress_class_ops = {
150 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
151 .leaf = ingress_leaf,
152 .find = ingress_find,
153 .walk = ingress_walk,
154 .tcf_block = ingress_tcf_block,
155 .bind_tcf = ingress_bind_filter,
156 .unbind_tcf = ingress_unbind_filter,
157};
158
159static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
160 .cl_ops = &ingress_class_ops,
161 .id = "ingress",
162 .priv_size = sizeof(struct ingress_sched_data),
163 .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
164 .init = ingress_init,
165 .destroy = ingress_destroy,
166 .dump = ingress_dump,
167 .ingress_block_set = ingress_ingress_block_set,
168 .ingress_block_get = ingress_ingress_block_get,
169 .owner = THIS_MODULE,
170};
171
172struct clsact_sched_data {
173 struct tcf_block *ingress_block;
174 struct tcf_block *egress_block;
175 struct tcf_block_ext_info ingress_block_info;
176 struct tcf_block_ext_info egress_block_info;
177 struct mini_Qdisc_pair miniqp_ingress;
178 struct mini_Qdisc_pair miniqp_egress;
179};
180
181static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
182{
183 switch (TC_H_MIN(classid)) {
184 case TC_H_MIN(TC_H_MIN_INGRESS):
185 case TC_H_MIN(TC_H_MIN_EGRESS):
186 return TC_H_MIN(classid);
187 default:
188 return 0;
189 }
190}
191
192static unsigned long clsact_bind_filter(struct Qdisc *sch,
193 unsigned long parent, u32 classid)
194{
195 return clsact_find(sch, classid);
196}
197
198static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
199 struct netlink_ext_ack *extack)
200{
201 struct clsact_sched_data *q = qdisc_priv(sch);
202
203 switch (cl) {
204 case TC_H_MIN(TC_H_MIN_INGRESS):
205 return q->ingress_block;
206 case TC_H_MIN(TC_H_MIN_EGRESS):
207 return q->egress_block;
208 default:
209 return NULL;
210 }
211}
212
213static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
214{
215 struct clsact_sched_data *q = qdisc_priv(sch);
216
217 q->ingress_block_info.block_index = block_index;
218}
219
220static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
221{
222 struct clsact_sched_data *q = qdisc_priv(sch);
223
224 q->egress_block_info.block_index = block_index;
225}
226
227static u32 clsact_ingress_block_get(struct Qdisc *sch)
228{
229 struct clsact_sched_data *q = qdisc_priv(sch);
230
231 return q->ingress_block_info.block_index;
232}
233
234static u32 clsact_egress_block_get(struct Qdisc *sch)
235{
236 struct clsact_sched_data *q = qdisc_priv(sch);
237
238 return q->egress_block_info.block_index;
239}
240
241static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
242 struct netlink_ext_ack *extack)
243{
244 struct clsact_sched_data *q = qdisc_priv(sch);
245 struct net_device *dev = qdisc_dev(qdisc: sch);
246 struct bpf_mprog_entry *entry;
247 bool created;
248 int err;
249
250 if (sch->parent != TC_H_CLSACT)
251 return -EOPNOTSUPP;
252
253 net_inc_ingress_queue();
254 net_inc_egress_queue();
255
256 entry = tcx_entry_fetch_or_create(dev, ingress: true, created: &created);
257 if (!entry)
258 return -ENOMEM;
259 tcx_miniq_set_active(entry, active: true);
260 mini_qdisc_pair_init(miniqp: &q->miniqp_ingress, qdisc: sch, p_miniq: &tcx_entry(entry)->miniq);
261 if (created)
262 tcx_entry_update(dev, entry, ingress: true);
263
264 q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
265 q->ingress_block_info.chain_head_change = clsact_chain_head_change;
266 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
267
268 err = tcf_block_get_ext(p_block: &q->ingress_block, q: sch, ei: &q->ingress_block_info,
269 extack);
270 if (err)
271 return err;
272
273 mini_qdisc_pair_block_init(miniqp: &q->miniqp_ingress, block: q->ingress_block);
274
275 entry = tcx_entry_fetch_or_create(dev, ingress: false, created: &created);
276 if (!entry)
277 return -ENOMEM;
278 tcx_miniq_set_active(entry, active: true);
279 mini_qdisc_pair_init(miniqp: &q->miniqp_egress, qdisc: sch, p_miniq: &tcx_entry(entry)->miniq);
280 if (created)
281 tcx_entry_update(dev, entry, ingress: false);
282
283 q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
284 q->egress_block_info.chain_head_change = clsact_chain_head_change;
285 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
286
287 return tcf_block_get_ext(p_block: &q->egress_block, q: sch, ei: &q->egress_block_info, extack);
288}
289
290static void clsact_destroy(struct Qdisc *sch)
291{
292 struct clsact_sched_data *q = qdisc_priv(sch);
293 struct net_device *dev = qdisc_dev(qdisc: sch);
294 struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress);
295 struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress);
296
297 if (sch->parent != TC_H_CLSACT)
298 return;
299
300 tcf_block_put_ext(block: q->ingress_block, q: sch, ei: &q->ingress_block_info);
301 tcf_block_put_ext(block: q->egress_block, q: sch, ei: &q->egress_block_info);
302
303 if (ingress_entry) {
304 tcx_miniq_set_active(entry: ingress_entry, active: false);
305 if (!tcx_entry_is_active(entry: ingress_entry)) {
306 tcx_entry_update(dev, NULL, ingress: true);
307 tcx_entry_free(entry: ingress_entry);
308 }
309 }
310
311 if (egress_entry) {
312 tcx_miniq_set_active(entry: egress_entry, active: false);
313 if (!tcx_entry_is_active(entry: egress_entry)) {
314 tcx_entry_update(dev, NULL, ingress: false);
315 tcx_entry_free(entry: egress_entry);
316 }
317 }
318
319 net_dec_ingress_queue();
320 net_dec_egress_queue();
321}
322
323static const struct Qdisc_class_ops clsact_class_ops = {
324 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
325 .leaf = ingress_leaf,
326 .find = clsact_find,
327 .walk = ingress_walk,
328 .tcf_block = clsact_tcf_block,
329 .bind_tcf = clsact_bind_filter,
330 .unbind_tcf = ingress_unbind_filter,
331};
332
333static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
334 .cl_ops = &clsact_class_ops,
335 .id = "clsact",
336 .priv_size = sizeof(struct clsact_sched_data),
337 .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
338 .init = clsact_init,
339 .destroy = clsact_destroy,
340 .dump = ingress_dump,
341 .ingress_block_set = clsact_ingress_block_set,
342 .egress_block_set = clsact_egress_block_set,
343 .ingress_block_get = clsact_ingress_block_get,
344 .egress_block_get = clsact_egress_block_get,
345 .owner = THIS_MODULE,
346};
347
348static int __init ingress_module_init(void)
349{
350 int ret;
351
352 ret = register_qdisc(qops: &ingress_qdisc_ops);
353 if (!ret) {
354 ret = register_qdisc(qops: &clsact_qdisc_ops);
355 if (ret)
356 unregister_qdisc(qops: &ingress_qdisc_ops);
357 }
358
359 return ret;
360}
361
362static void __exit ingress_module_exit(void)
363{
364 unregister_qdisc(qops: &ingress_qdisc_ops);
365 unregister_qdisc(qops: &clsact_qdisc_ops);
366}
367
368module_init(ingress_module_init);
369module_exit(ingress_module_exit);
370
371MODULE_ALIAS("sch_clsact");
372MODULE_LICENSE("GPL");
373

source code of linux/net/sched/sch_ingress.c