1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_mirred.c packet mirroring and redirect actions
4 *
5 * Authors: Jamal Hadi Salim (2002-4)
6 *
7 * TODO: Add ingress support (and socket redirect support)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/gfp.h>
19#include <linux/if_arp.h>
20#include <net/net_namespace.h>
21#include <net/netlink.h>
22#include <net/dst.h>
23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h>
25#include <linux/tc_act/tc_mirred.h>
26#include <net/tc_act/tc_mirred.h>
27#include <net/tc_wrapper.h>
28
29static LIST_HEAD(mirred_list);
30static DEFINE_SPINLOCK(mirred_list_lock);
31
32#define MIRRED_NEST_LIMIT 4
33static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
34
35static bool tcf_mirred_is_act_redirect(int action)
36{
37 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
38}
39
40static bool tcf_mirred_act_wants_ingress(int action)
41{
42 switch (action) {
43 case TCA_EGRESS_REDIR:
44 case TCA_EGRESS_MIRROR:
45 return false;
46 case TCA_INGRESS_REDIR:
47 case TCA_INGRESS_MIRROR:
48 return true;
49 default:
50 BUG();
51 }
52}
53
54static bool tcf_mirred_can_reinsert(int action)
55{
56 switch (action) {
57 case TC_ACT_SHOT:
58 case TC_ACT_STOLEN:
59 case TC_ACT_QUEUED:
60 case TC_ACT_TRAP:
61 return true;
62 }
63 return false;
64}
65
66static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67{
68 return rcu_dereference_protected(m->tcfm_dev,
69 lockdep_is_held(&m->tcf_lock));
70}
71
72static void tcf_mirred_release(struct tc_action *a)
73{
74 struct tcf_mirred *m = to_mirred(a);
75 struct net_device *dev;
76
77 spin_lock(lock: &mirred_list_lock);
78 list_del(entry: &m->tcfm_list);
79 spin_unlock(lock: &mirred_list_lock);
80
81 /* last reference to action, no need to lock */
82 dev = rcu_dereference_protected(m->tcfm_dev, 1);
83 netdev_put(dev, tracker: &m->tcfm_dev_tracker);
84}
85
86static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
88};
89
90static struct tc_action_ops act_mirred_ops;
91
92static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 struct nlattr *est, struct tc_action **a,
94 struct tcf_proto *tp,
95 u32 flags, struct netlink_ext_ack *extack)
96{
97 struct tc_action_net *tn = net_generic(net, id: act_mirred_ops.net_id);
98 bool bind = flags & TCA_ACT_FLAGS_BIND;
99 struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 struct tcf_chain *goto_ch = NULL;
101 bool mac_header_xmit = false;
102 struct tc_mirred *parm;
103 struct tcf_mirred *m;
104 bool exists = false;
105 int ret, err;
106 u32 index;
107
108 if (!nla) {
109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
110 return -EINVAL;
111 }
112 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113 policy: mirred_policy, extack);
114 if (ret < 0)
115 return ret;
116 if (!tb[TCA_MIRRED_PARMS]) {
117 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
118 return -EINVAL;
119 }
120 parm = nla_data(nla: tb[TCA_MIRRED_PARMS]);
121 index = parm->index;
122 err = tcf_idr_check_alloc(tn, index: &index, a, bind);
123 if (err < 0)
124 return err;
125 exists = err;
126 if (exists && bind)
127 return 0;
128
129 switch (parm->eaction) {
130 case TCA_EGRESS_MIRROR:
131 case TCA_EGRESS_REDIR:
132 case TCA_INGRESS_REDIR:
133 case TCA_INGRESS_MIRROR:
134 break;
135 default:
136 if (exists)
137 tcf_idr_release(a: *a, bind);
138 else
139 tcf_idr_cleanup(tn, index);
140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141 return -EINVAL;
142 }
143
144 if (!exists) {
145 if (!parm->ifindex) {
146 tcf_idr_cleanup(tn, index);
147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148 return -EINVAL;
149 }
150 ret = tcf_idr_create_from_flags(tn, index, est, a,
151 ops: &act_mirred_ops, bind, flags);
152 if (ret) {
153 tcf_idr_cleanup(tn, index);
154 return ret;
155 }
156 ret = ACT_P_CREATED;
157 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
158 tcf_idr_release(a: *a, bind);
159 return -EEXIST;
160 }
161
162 m = to_mirred(*a);
163 if (ret == ACT_P_CREATED)
164 INIT_LIST_HEAD(list: &m->tcfm_list);
165
166 err = tcf_action_check_ctrlact(action: parm->action, tp, handle: &goto_ch, newchain: extack);
167 if (err < 0)
168 goto release_idr;
169
170 spin_lock_bh(lock: &m->tcf_lock);
171
172 if (parm->ifindex) {
173 struct net_device *odev, *ndev;
174
175 ndev = dev_get_by_index(net, ifindex: parm->ifindex);
176 if (!ndev) {
177 spin_unlock_bh(lock: &m->tcf_lock);
178 err = -ENODEV;
179 goto put_chain;
180 }
181 mac_header_xmit = dev_is_mac_header_xmit(dev: ndev);
182 odev = rcu_replace_pointer(m->tcfm_dev, ndev,
183 lockdep_is_held(&m->tcf_lock));
184 netdev_put(dev: odev, tracker: &m->tcfm_dev_tracker);
185 netdev_tracker_alloc(dev: ndev, tracker: &m->tcfm_dev_tracker, GFP_ATOMIC);
186 m->tcfm_mac_header_xmit = mac_header_xmit;
187 }
188 goto_ch = tcf_action_set_ctrlact(a: *a, action: parm->action, newchain: goto_ch);
189 m->tcfm_eaction = parm->eaction;
190 spin_unlock_bh(lock: &m->tcf_lock);
191 if (goto_ch)
192 tcf_chain_put_by_act(chain: goto_ch);
193
194 if (ret == ACT_P_CREATED) {
195 spin_lock(lock: &mirred_list_lock);
196 list_add(new: &m->tcfm_list, head: &mirred_list);
197 spin_unlock(lock: &mirred_list_lock);
198 }
199
200 return ret;
201put_chain:
202 if (goto_ch)
203 tcf_chain_put_by_act(chain: goto_ch);
204release_idr:
205 tcf_idr_release(a: *a, bind);
206 return err;
207}
208
209static bool is_mirred_nested(void)
210{
211 return unlikely(__this_cpu_read(mirred_nest_level) > 1);
212}
213
214static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
215{
216 int err;
217
218 if (!want_ingress)
219 err = tcf_dev_queue_xmit(skb, xmit: dev_queue_xmit);
220 else if (is_mirred_nested())
221 err = netif_rx(skb);
222 else
223 err = netif_receive_skb(skb);
224
225 return err;
226}
227
228TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
229 const struct tc_action *a,
230 struct tcf_result *res)
231{
232 struct tcf_mirred *m = to_mirred(a);
233 struct sk_buff *skb2 = skb;
234 bool m_mac_header_xmit;
235 struct net_device *dev;
236 unsigned int nest_level;
237 int retval, err = 0;
238 bool use_reinsert;
239 bool want_ingress;
240 bool is_redirect;
241 bool expects_nh;
242 bool at_ingress;
243 int m_eaction;
244 int mac_len;
245 bool at_nh;
246
247 nest_level = __this_cpu_inc_return(mirred_nest_level);
248 if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
249 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
250 netdev_name(skb->dev));
251 __this_cpu_dec(mirred_nest_level);
252 return TC_ACT_SHOT;
253 }
254
255 tcf_lastuse_update(tm: &m->tcf_tm);
256 tcf_action_update_bstats(a: &m->common, skb);
257
258 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
259 m_eaction = READ_ONCE(m->tcfm_eaction);
260 retval = READ_ONCE(m->tcf_action);
261 dev = rcu_dereference_bh(m->tcfm_dev);
262 if (unlikely(!dev)) {
263 pr_notice_once("tc mirred: target device is gone\n");
264 goto out;
265 }
266
267 if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
268 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
269 dev->name);
270 goto out;
271 }
272
273 /* we could easily avoid the clone only if called by ingress and clsact;
274 * since we can't easily detect the clsact caller, skip clone only for
275 * ingress - that covers the TC S/W datapath.
276 */
277 is_redirect = tcf_mirred_is_act_redirect(action: m_eaction);
278 at_ingress = skb_at_tc_ingress(skb);
279 use_reinsert = at_ingress && is_redirect &&
280 tcf_mirred_can_reinsert(action: retval);
281 if (!use_reinsert) {
282 skb2 = skb_clone(skb, GFP_ATOMIC);
283 if (!skb2)
284 goto out;
285 }
286
287 want_ingress = tcf_mirred_act_wants_ingress(action: m_eaction);
288
289 /* All mirred/redirected skbs should clear previous ct info */
290 nf_reset_ct(skb: skb2);
291 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
292 skb_dst_drop(skb: skb2);
293
294 expects_nh = want_ingress || !m_mac_header_xmit;
295 at_nh = skb->data == skb_network_header(skb);
296 if (at_nh != expects_nh) {
297 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
298 skb_network_offset(skb);
299 if (expects_nh) {
300 /* target device/action expect data at nh */
301 skb_pull_rcsum(skb: skb2, len: mac_len);
302 } else {
303 /* target device/action expect data at mac */
304 skb_push_rcsum(skb: skb2, len: mac_len);
305 }
306 }
307
308 skb2->skb_iif = skb->dev->ifindex;
309 skb2->dev = dev;
310
311 /* mirror is always swallowed */
312 if (is_redirect) {
313 skb_set_redirected(skb: skb2, from_ingress: skb2->tc_at_ingress);
314
315 /* let's the caller reinsert the packet, if possible */
316 if (use_reinsert) {
317 err = tcf_mirred_forward(want_ingress, skb);
318 if (err)
319 tcf_action_inc_overlimit_qstats(a: &m->common);
320 __this_cpu_dec(mirred_nest_level);
321 return TC_ACT_CONSUMED;
322 }
323 }
324
325 err = tcf_mirred_forward(want_ingress, skb: skb2);
326 if (err) {
327out:
328 tcf_action_inc_overlimit_qstats(a: &m->common);
329 if (tcf_mirred_is_act_redirect(action: m_eaction))
330 retval = TC_ACT_SHOT;
331 }
332 __this_cpu_dec(mirred_nest_level);
333
334 return retval;
335}
336
337static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
338 u64 drops, u64 lastuse, bool hw)
339{
340 struct tcf_mirred *m = to_mirred(a);
341 struct tcf_t *tm = &m->tcf_tm;
342
343 tcf_action_update_stats(a, bytes, packets, drops, hw);
344 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
345}
346
347static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
348 int ref)
349{
350 unsigned char *b = skb_tail_pointer(skb);
351 struct tcf_mirred *m = to_mirred(a);
352 struct tc_mirred opt = {
353 .index = m->tcf_index,
354 .refcnt = refcount_read(r: &m->tcf_refcnt) - ref,
355 .bindcnt = atomic_read(v: &m->tcf_bindcnt) - bind,
356 };
357 struct net_device *dev;
358 struct tcf_t t;
359
360 spin_lock_bh(lock: &m->tcf_lock);
361 opt.action = m->tcf_action;
362 opt.eaction = m->tcfm_eaction;
363 dev = tcf_mirred_dev_dereference(m);
364 if (dev)
365 opt.ifindex = dev->ifindex;
366
367 if (nla_put(skb, attrtype: TCA_MIRRED_PARMS, attrlen: sizeof(opt), data: &opt))
368 goto nla_put_failure;
369
370 tcf_tm_dump(dtm: &t, stm: &m->tcf_tm);
371 if (nla_put_64bit(skb, attrtype: TCA_MIRRED_TM, attrlen: sizeof(t), data: &t, padattr: TCA_MIRRED_PAD))
372 goto nla_put_failure;
373 spin_unlock_bh(lock: &m->tcf_lock);
374
375 return skb->len;
376
377nla_put_failure:
378 spin_unlock_bh(lock: &m->tcf_lock);
379 nlmsg_trim(skb, mark: b);
380 return -1;
381}
382
383static int mirred_device_event(struct notifier_block *unused,
384 unsigned long event, void *ptr)
385{
386 struct net_device *dev = netdev_notifier_info_to_dev(info: ptr);
387 struct tcf_mirred *m;
388
389 ASSERT_RTNL();
390 if (event == NETDEV_UNREGISTER) {
391 spin_lock(lock: &mirred_list_lock);
392 list_for_each_entry(m, &mirred_list, tcfm_list) {
393 spin_lock_bh(lock: &m->tcf_lock);
394 if (tcf_mirred_dev_dereference(m) == dev) {
395 netdev_put(dev, tracker: &m->tcfm_dev_tracker);
396 /* Note : no rcu grace period necessary, as
397 * net_device are already rcu protected.
398 */
399 RCU_INIT_POINTER(m->tcfm_dev, NULL);
400 }
401 spin_unlock_bh(lock: &m->tcf_lock);
402 }
403 spin_unlock(lock: &mirred_list_lock);
404 }
405
406 return NOTIFY_DONE;
407}
408
409static struct notifier_block mirred_device_notifier = {
410 .notifier_call = mirred_device_event,
411};
412
413static void tcf_mirred_dev_put(void *priv)
414{
415 struct net_device *dev = priv;
416
417 dev_put(dev);
418}
419
420static struct net_device *
421tcf_mirred_get_dev(const struct tc_action *a,
422 tc_action_priv_destructor *destructor)
423{
424 struct tcf_mirred *m = to_mirred(a);
425 struct net_device *dev;
426
427 rcu_read_lock();
428 dev = rcu_dereference(m->tcfm_dev);
429 if (dev) {
430 dev_hold(dev);
431 *destructor = tcf_mirred_dev_put;
432 }
433 rcu_read_unlock();
434
435 return dev;
436}
437
438static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
439{
440 return nla_total_size(payload: sizeof(struct tc_mirred));
441}
442
443static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
444 const struct tc_action *act)
445{
446 entry->dev = act->ops->get_dev(act, &entry->destructor);
447 if (!entry->dev)
448 return;
449 entry->destructor_priv = entry->dev;
450}
451
452static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
453 u32 *index_inc, bool bind,
454 struct netlink_ext_ack *extack)
455{
456 if (bind) {
457 struct flow_action_entry *entry = entry_data;
458
459 if (is_tcf_mirred_egress_redirect(a: act)) {
460 entry->id = FLOW_ACTION_REDIRECT;
461 tcf_offload_mirred_get_dev(entry, act);
462 } else if (is_tcf_mirred_egress_mirror(a: act)) {
463 entry->id = FLOW_ACTION_MIRRED;
464 tcf_offload_mirred_get_dev(entry, act);
465 } else if (is_tcf_mirred_ingress_redirect(a: act)) {
466 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
467 tcf_offload_mirred_get_dev(entry, act);
468 } else if (is_tcf_mirred_ingress_mirror(a: act)) {
469 entry->id = FLOW_ACTION_MIRRED_INGRESS;
470 tcf_offload_mirred_get_dev(entry, act);
471 } else {
472 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
473 return -EOPNOTSUPP;
474 }
475 *index_inc = 1;
476 } else {
477 struct flow_offload_action *fl_action = entry_data;
478
479 if (is_tcf_mirred_egress_redirect(a: act))
480 fl_action->id = FLOW_ACTION_REDIRECT;
481 else if (is_tcf_mirred_egress_mirror(a: act))
482 fl_action->id = FLOW_ACTION_MIRRED;
483 else if (is_tcf_mirred_ingress_redirect(a: act))
484 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
485 else if (is_tcf_mirred_ingress_mirror(a: act))
486 fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
487 else
488 return -EOPNOTSUPP;
489 }
490
491 return 0;
492}
493
494static struct tc_action_ops act_mirred_ops = {
495 .kind = "mirred",
496 .id = TCA_ID_MIRRED,
497 .owner = THIS_MODULE,
498 .act = tcf_mirred_act,
499 .stats_update = tcf_stats_update,
500 .dump = tcf_mirred_dump,
501 .cleanup = tcf_mirred_release,
502 .init = tcf_mirred_init,
503 .get_fill_size = tcf_mirred_get_fill_size,
504 .offload_act_setup = tcf_mirred_offload_act_setup,
505 .size = sizeof(struct tcf_mirred),
506 .get_dev = tcf_mirred_get_dev,
507};
508
509static __net_init int mirred_init_net(struct net *net)
510{
511 struct tc_action_net *tn = net_generic(net, id: act_mirred_ops.net_id);
512
513 return tc_action_net_init(net, tn, ops: &act_mirred_ops);
514}
515
516static void __net_exit mirred_exit_net(struct list_head *net_list)
517{
518 tc_action_net_exit(net_list, id: act_mirred_ops.net_id);
519}
520
521static struct pernet_operations mirred_net_ops = {
522 .init = mirred_init_net,
523 .exit_batch = mirred_exit_net,
524 .id = &act_mirred_ops.net_id,
525 .size = sizeof(struct tc_action_net),
526};
527
528MODULE_AUTHOR("Jamal Hadi Salim(2002)");
529MODULE_DESCRIPTION("Device Mirror/redirect actions");
530MODULE_LICENSE("GPL");
531
532static int __init mirred_init_module(void)
533{
534 int err = register_netdevice_notifier(nb: &mirred_device_notifier);
535 if (err)
536 return err;
537
538 pr_info("Mirror/redirect action on\n");
539 err = tcf_register_action(a: &act_mirred_ops, ops: &mirred_net_ops);
540 if (err)
541 unregister_netdevice_notifier(nb: &mirred_device_notifier);
542
543 return err;
544}
545
546static void __exit mirred_cleanup_module(void)
547{
548 tcf_unregister_action(a: &act_mirred_ops, ops: &mirred_net_ops);
549 unregister_netdevice_notifier(nb: &mirred_device_notifier);
550}
551
552module_init(mirred_init_module);
553module_exit(mirred_cleanup_module);
554

source code of linux/net/sched/act_mirred.c