1// SPDX-License-Identifier: GPL-2.0
2#include <linux/err.h>
3#include <linux/igmp.h>
4#include <linux/kernel.h>
5#include <linux/netdevice.h>
6#include <linux/rculist.h>
7#include <linux/skbuff.h>
8#include <linux/if_ether.h>
9#include <net/ip.h>
10#include <net/netlink.h>
11#include <net/switchdev.h>
12#if IS_ENABLED(CONFIG_IPV6)
13#include <net/ipv6.h>
14#include <net/addrconf.h>
15#endif
16
17#include "br_private.h"
18
19static bool
20br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 unsigned long *timer)
22{
23 *timer = br_timer_value(timer: &pmctx->ip4_mc_router_timer);
24 return !hlist_unhashed(h: &pmctx->ip4_rlist);
25}
26
27static bool
28br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 unsigned long *timer)
30{
31#if IS_ENABLED(CONFIG_IPV6)
32 *timer = br_timer_value(timer: &pmctx->ip6_mc_router_timer);
33 return !hlist_unhashed(h: &pmctx->ip6_rlist);
34#else
35 *timer = 0;
36 return false;
37#endif
38}
39
40static size_t __br_rports_one_size(void)
41{
42 return nla_total_size(payload: sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 nla_total_size(payload: sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 nla_total_size(payload: sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
45 nla_total_size(payload: sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 nla_total_size(payload: sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 nla_total_size(payload: sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
48}
49
50size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51{
52 struct net_bridge_mcast_port *pmctx;
53 size_t size = nla_total_size(payload: 0); /* MDBA_ROUTER */
54
55 rcu_read_lock();
56 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 ip4_rlist)
58 size += __br_rports_one_size();
59
60#if IS_ENABLED(CONFIG_IPV6)
61 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 ip6_rlist)
63 size += __br_rports_one_size();
64#endif
65 rcu_read_unlock();
66
67 return size;
68}
69
70int br_rports_fill_info(struct sk_buff *skb,
71 const struct net_bridge_mcast *brmctx)
72{
73 u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 unsigned long ip4_timer, ip6_timer;
76 struct nlattr *nest, *port_nest;
77 struct net_bridge_port *p;
78
79 if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 return 0;
81
82 nest = nla_nest_start_noflag(skb, attrtype: MDBA_ROUTER);
83 if (nest == NULL)
84 return -EMSGSIZE;
85
86 list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 struct net_bridge_mcast_port *pmctx;
88
89 if (vid) {
90 struct net_bridge_vlan *v;
91
92 v = br_vlan_find(vg: nbp_vlan_group(p), vid);
93 if (!v)
94 continue;
95 pmctx = &v->port_mcast_ctx;
96 } else {
97 pmctx = &p->multicast_ctx;
98 }
99
100 have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, timer: &ip4_timer);
101 have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, timer: &ip6_timer);
102
103 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 continue;
105
106 port_nest = nla_nest_start_noflag(skb, attrtype: MDBA_ROUTER_PORT);
107 if (!port_nest)
108 goto fail;
109
110 if (nla_put_nohdr(skb, attrlen: sizeof(u32), data: &p->dev->ifindex) ||
111 nla_put_u32(skb, attrtype: MDBA_ROUTER_PATTR_TIMER,
112 max(ip4_timer, ip6_timer)) ||
113 nla_put_u8(skb, attrtype: MDBA_ROUTER_PATTR_TYPE,
114 value: p->multicast_ctx.multicast_router) ||
115 (have_ip4_mc_rtr &&
116 nla_put_u32(skb, attrtype: MDBA_ROUTER_PATTR_INET_TIMER,
117 value: ip4_timer)) ||
118 (have_ip6_mc_rtr &&
119 nla_put_u32(skb, attrtype: MDBA_ROUTER_PATTR_INET6_TIMER,
120 value: ip6_timer)) ||
121 (vid && nla_put_u16(skb, attrtype: MDBA_ROUTER_PATTR_VID, value: vid))) {
122 nla_nest_cancel(skb, start: port_nest);
123 goto fail;
124 }
125 nla_nest_end(skb, start: port_nest);
126 }
127
128 nla_nest_end(skb, start: nest);
129 return 0;
130fail:
131 nla_nest_cancel(skb, start: nest);
132 return -EMSGSIZE;
133}
134
135static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136{
137 e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 e->flags = 0;
139 if (flags & MDB_PG_FLAGS_OFFLOAD)
140 e->flags |= MDB_FLAGS_OFFLOAD;
141 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 e->flags |= MDB_FLAGS_FAST_LEAVE;
143 if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 e->flags |= MDB_FLAGS_STAR_EXCL;
145 if (flags & MDB_PG_FLAGS_BLOCKED)
146 e->flags |= MDB_FLAGS_BLOCKED;
147}
148
149static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 struct nlattr **mdb_attrs)
151{
152 memset(ip, 0, sizeof(struct br_ip));
153 ip->vid = entry->vid;
154 ip->proto = entry->addr.proto;
155 switch (ip->proto) {
156 case htons(ETH_P_IP):
157 ip->dst.ip4 = entry->addr.u.ip4;
158 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 ip->src.ip4 = nla_get_in_addr(nla: mdb_attrs[MDBE_ATTR_SOURCE]);
160 break;
161#if IS_ENABLED(CONFIG_IPV6)
162 case htons(ETH_P_IPV6):
163 ip->dst.ip6 = entry->addr.u.ip6;
164 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 ip->src.ip6 = nla_get_in6_addr(nla: mdb_attrs[MDBE_ATTR_SOURCE]);
166 break;
167#endif
168 default:
169 ether_addr_copy(dst: ip->dst.mac_addr, src: entry->addr.u.mac_addr);
170 }
171
172}
173
174static int __mdb_fill_srcs(struct sk_buff *skb,
175 struct net_bridge_port_group *p)
176{
177 struct net_bridge_group_src *ent;
178 struct nlattr *nest, *nest_ent;
179
180 if (hlist_empty(h: &p->src_list))
181 return 0;
182
183 nest = nla_nest_start(skb, attrtype: MDBA_MDB_EATTR_SRC_LIST);
184 if (!nest)
185 return -EMSGSIZE;
186
187 hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 nest_ent = nla_nest_start(skb, attrtype: MDBA_MDB_SRCLIST_ENTRY);
190 if (!nest_ent)
191 goto out_cancel_err;
192 switch (ent->addr.proto) {
193 case htons(ETH_P_IP):
194 if (nla_put_in_addr(skb, attrtype: MDBA_MDB_SRCATTR_ADDRESS,
195 addr: ent->addr.src.ip4)) {
196 nla_nest_cancel(skb, start: nest_ent);
197 goto out_cancel_err;
198 }
199 break;
200#if IS_ENABLED(CONFIG_IPV6)
201 case htons(ETH_P_IPV6):
202 if (nla_put_in6_addr(skb, attrtype: MDBA_MDB_SRCATTR_ADDRESS,
203 addr: &ent->addr.src.ip6)) {
204 nla_nest_cancel(skb, start: nest_ent);
205 goto out_cancel_err;
206 }
207 break;
208#endif
209 default:
210 nla_nest_cancel(skb, start: nest_ent);
211 continue;
212 }
213 if (nla_put_u32(skb, attrtype: MDBA_MDB_SRCATTR_TIMER,
214 value: br_timer_value(timer: &ent->timer))) {
215 nla_nest_cancel(skb, start: nest_ent);
216 goto out_cancel_err;
217 }
218 nla_nest_end(skb, start: nest_ent);
219 }
220
221 nla_nest_end(skb, start: nest);
222
223 return 0;
224
225out_cancel_err:
226 nla_nest_cancel(skb, start: nest);
227 return -EMSGSIZE;
228}
229
230static int __mdb_fill_info(struct sk_buff *skb,
231 struct net_bridge_mdb_entry *mp,
232 struct net_bridge_port_group *p)
233{
234 bool dump_srcs_mode = false;
235 struct timer_list *mtimer;
236 struct nlattr *nest_ent;
237 struct br_mdb_entry e;
238 u8 flags = 0;
239 int ifindex;
240
241 memset(&e, 0, sizeof(e));
242 if (p) {
243 ifindex = p->key.port->dev->ifindex;
244 mtimer = &p->timer;
245 flags = p->flags;
246 } else {
247 ifindex = mp->br->dev->ifindex;
248 mtimer = &mp->timer;
249 }
250
251 __mdb_entry_fill_flags(e: &e, flags);
252 e.ifindex = ifindex;
253 e.vid = mp->addr.vid;
254 if (mp->addr.proto == htons(ETH_P_IP)) {
255 e.addr.u.ip4 = mp->addr.dst.ip4;
256#if IS_ENABLED(CONFIG_IPV6)
257 } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
258 e.addr.u.ip6 = mp->addr.dst.ip6;
259#endif
260 } else {
261 ether_addr_copy(dst: e.addr.u.mac_addr, src: mp->addr.dst.mac_addr);
262 e.state = MDB_PERMANENT;
263 }
264 e.addr.proto = mp->addr.proto;
265 nest_ent = nla_nest_start_noflag(skb,
266 attrtype: MDBA_MDB_ENTRY_INFO);
267 if (!nest_ent)
268 return -EMSGSIZE;
269
270 if (nla_put_nohdr(skb, attrlen: sizeof(e), data: &e) ||
271 nla_put_u32(skb,
272 attrtype: MDBA_MDB_EATTR_TIMER,
273 value: br_timer_value(timer: mtimer)))
274 goto nest_err;
275
276 switch (mp->addr.proto) {
277 case htons(ETH_P_IP):
278 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
279 if (mp->addr.src.ip4) {
280 if (nla_put_in_addr(skb, attrtype: MDBA_MDB_EATTR_SOURCE,
281 addr: mp->addr.src.ip4))
282 goto nest_err;
283 break;
284 }
285 break;
286#if IS_ENABLED(CONFIG_IPV6)
287 case htons(ETH_P_IPV6):
288 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
289 if (!ipv6_addr_any(a: &mp->addr.src.ip6)) {
290 if (nla_put_in6_addr(skb, attrtype: MDBA_MDB_EATTR_SOURCE,
291 addr: &mp->addr.src.ip6))
292 goto nest_err;
293 break;
294 }
295 break;
296#endif
297 default:
298 ether_addr_copy(dst: e.addr.u.mac_addr, src: mp->addr.dst.mac_addr);
299 }
300 if (p) {
301 if (nla_put_u8(skb, attrtype: MDBA_MDB_EATTR_RTPROT, value: p->rt_protocol))
302 goto nest_err;
303 if (dump_srcs_mode &&
304 (__mdb_fill_srcs(skb, p) ||
305 nla_put_u8(skb, attrtype: MDBA_MDB_EATTR_GROUP_MODE,
306 value: p->filter_mode)))
307 goto nest_err;
308 }
309 nla_nest_end(skb, start: nest_ent);
310
311 return 0;
312
313nest_err:
314 nla_nest_cancel(skb, start: nest_ent);
315 return -EMSGSIZE;
316}
317
318static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
319 struct net_device *dev)
320{
321 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
322 struct net_bridge *br = netdev_priv(dev);
323 struct net_bridge_mdb_entry *mp;
324 struct nlattr *nest, *nest2;
325
326 nest = nla_nest_start_noflag(skb, attrtype: MDBA_MDB);
327 if (nest == NULL)
328 return -EMSGSIZE;
329
330 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
331 struct net_bridge_port_group *p;
332 struct net_bridge_port_group __rcu **pp;
333
334 if (idx < s_idx)
335 goto skip;
336
337 nest2 = nla_nest_start_noflag(skb, attrtype: MDBA_MDB_ENTRY);
338 if (!nest2) {
339 err = -EMSGSIZE;
340 break;
341 }
342
343 if (!s_pidx && mp->host_joined) {
344 err = __mdb_fill_info(skb, mp, NULL);
345 if (err) {
346 nla_nest_cancel(skb, start: nest2);
347 break;
348 }
349 }
350
351 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
352 pp = &p->next) {
353 if (!p->key.port)
354 continue;
355 if (pidx < s_pidx)
356 goto skip_pg;
357
358 err = __mdb_fill_info(skb, mp, p);
359 if (err) {
360 nla_nest_end(skb, start: nest2);
361 goto out;
362 }
363skip_pg:
364 pidx++;
365 }
366 pidx = 0;
367 s_pidx = 0;
368 nla_nest_end(skb, start: nest2);
369skip:
370 idx++;
371 }
372
373out:
374 cb->args[1] = idx;
375 cb->args[2] = pidx;
376 nla_nest_end(skb, start: nest);
377 return err;
378}
379
380int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
381 struct netlink_callback *cb)
382{
383 struct net_bridge *br = netdev_priv(dev);
384 struct br_port_msg *bpm;
385 struct nlmsghdr *nlh;
386 int err;
387
388 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
389 seq: cb->nlh->nlmsg_seq, RTM_GETMDB, payload: sizeof(*bpm),
390 NLM_F_MULTI);
391 if (!nlh)
392 return -EMSGSIZE;
393
394 bpm = nlmsg_data(nlh);
395 memset(bpm, 0, sizeof(*bpm));
396 bpm->ifindex = dev->ifindex;
397
398 rcu_read_lock();
399
400 err = br_mdb_fill_info(skb, cb, dev);
401 if (err)
402 goto out;
403 err = br_rports_fill_info(skb, brmctx: &br->multicast_ctx);
404 if (err)
405 goto out;
406
407out:
408 rcu_read_unlock();
409 nlmsg_end(skb, nlh);
410 return err;
411}
412
413static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
414 struct net_device *dev,
415 struct net_bridge_mdb_entry *mp,
416 struct net_bridge_port_group *pg,
417 int type)
418{
419 struct nlmsghdr *nlh;
420 struct br_port_msg *bpm;
421 struct nlattr *nest, *nest2;
422
423 nlh = nlmsg_put(skb, portid: 0, seq: 0, type, payload: sizeof(*bpm), flags: 0);
424 if (!nlh)
425 return -EMSGSIZE;
426
427 bpm = nlmsg_data(nlh);
428 memset(bpm, 0, sizeof(*bpm));
429 bpm->family = AF_BRIDGE;
430 bpm->ifindex = dev->ifindex;
431 nest = nla_nest_start_noflag(skb, attrtype: MDBA_MDB);
432 if (nest == NULL)
433 goto cancel;
434 nest2 = nla_nest_start_noflag(skb, attrtype: MDBA_MDB_ENTRY);
435 if (nest2 == NULL)
436 goto end;
437
438 if (__mdb_fill_info(skb, mp, p: pg))
439 goto end;
440
441 nla_nest_end(skb, start: nest2);
442 nla_nest_end(skb, start: nest);
443 nlmsg_end(skb, nlh);
444 return 0;
445
446end:
447 nla_nest_end(skb, start: nest);
448cancel:
449 nlmsg_cancel(skb, nlh);
450 return -EMSGSIZE;
451}
452
453static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
454{
455 struct net_bridge_group_src *ent;
456 size_t nlmsg_size, addr_size = 0;
457
458 /* MDBA_MDB_ENTRY_INFO */
459 nlmsg_size = nla_total_size(payload: sizeof(struct br_mdb_entry)) +
460 /* MDBA_MDB_EATTR_TIMER */
461 nla_total_size(payload: sizeof(u32));
462
463 if (!pg)
464 goto out;
465
466 /* MDBA_MDB_EATTR_RTPROT */
467 nlmsg_size += nla_total_size(payload: sizeof(u8));
468
469 switch (pg->key.addr.proto) {
470 case htons(ETH_P_IP):
471 /* MDBA_MDB_EATTR_SOURCE */
472 if (pg->key.addr.src.ip4)
473 nlmsg_size += nla_total_size(payload: sizeof(__be32));
474 if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
475 goto out;
476 addr_size = sizeof(__be32);
477 break;
478#if IS_ENABLED(CONFIG_IPV6)
479 case htons(ETH_P_IPV6):
480 /* MDBA_MDB_EATTR_SOURCE */
481 if (!ipv6_addr_any(a: &pg->key.addr.src.ip6))
482 nlmsg_size += nla_total_size(payload: sizeof(struct in6_addr));
483 if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
484 goto out;
485 addr_size = sizeof(struct in6_addr);
486 break;
487#endif
488 }
489
490 /* MDBA_MDB_EATTR_GROUP_MODE */
491 nlmsg_size += nla_total_size(payload: sizeof(u8));
492
493 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
494 if (!hlist_empty(h: &pg->src_list))
495 nlmsg_size += nla_total_size(payload: 0);
496
497 hlist_for_each_entry(ent, &pg->src_list, node) {
498 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
499 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
500 */
501 nlmsg_size += nla_total_size(payload: 0) +
502 nla_total_size(payload: addr_size) +
503 nla_total_size(payload: sizeof(u32));
504 }
505out:
506 return nlmsg_size;
507}
508
509static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
510{
511 return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
512 /* MDBA_MDB */
513 nla_total_size(payload: 0) +
514 /* MDBA_MDB_ENTRY */
515 nla_total_size(payload: 0) +
516 /* Port group entry */
517 rtnl_mdb_nlmsg_pg_size(pg);
518}
519
520void br_mdb_notify(struct net_device *dev,
521 struct net_bridge_mdb_entry *mp,
522 struct net_bridge_port_group *pg,
523 int type)
524{
525 struct net *net = dev_net(dev);
526 struct sk_buff *skb;
527 int err = -ENOBUFS;
528
529 br_switchdev_mdb_notify(dev, mp, pg, type);
530
531 skb = nlmsg_new(payload: rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
532 if (!skb)
533 goto errout;
534
535 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
536 if (err < 0) {
537 kfree_skb(skb);
538 goto errout;
539 }
540
541 rtnl_notify(skb, net, pid: 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
542 return;
543errout:
544 rtnl_set_sk_err(net, RTNLGRP_MDB, error: err);
545}
546
547static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
548 struct net_device *dev,
549 int ifindex, u16 vid, u32 pid,
550 u32 seq, int type, unsigned int flags)
551{
552 struct nlattr *nest, *port_nest;
553 struct br_port_msg *bpm;
554 struct nlmsghdr *nlh;
555
556 nlh = nlmsg_put(skb, portid: pid, seq, type, payload: sizeof(*bpm), flags: 0);
557 if (!nlh)
558 return -EMSGSIZE;
559
560 bpm = nlmsg_data(nlh);
561 memset(bpm, 0, sizeof(*bpm));
562 bpm->family = AF_BRIDGE;
563 bpm->ifindex = dev->ifindex;
564 nest = nla_nest_start_noflag(skb, attrtype: MDBA_ROUTER);
565 if (!nest)
566 goto cancel;
567
568 port_nest = nla_nest_start_noflag(skb, attrtype: MDBA_ROUTER_PORT);
569 if (!port_nest)
570 goto end;
571 if (nla_put_nohdr(skb, attrlen: sizeof(u32), data: &ifindex)) {
572 nla_nest_cancel(skb, start: port_nest);
573 goto end;
574 }
575 if (vid && nla_put_u16(skb, attrtype: MDBA_ROUTER_PATTR_VID, value: vid)) {
576 nla_nest_cancel(skb, start: port_nest);
577 goto end;
578 }
579 nla_nest_end(skb, start: port_nest);
580
581 nla_nest_end(skb, start: nest);
582 nlmsg_end(skb, nlh);
583 return 0;
584
585end:
586 nla_nest_end(skb, start: nest);
587cancel:
588 nlmsg_cancel(skb, nlh);
589 return -EMSGSIZE;
590}
591
592static inline size_t rtnl_rtr_nlmsg_size(void)
593{
594 return NLMSG_ALIGN(sizeof(struct br_port_msg))
595 + nla_total_size(payload: sizeof(__u32))
596 + nla_total_size(payload: sizeof(u16));
597}
598
599void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
600 int type)
601{
602 struct net *net = dev_net(dev);
603 struct sk_buff *skb;
604 int err = -ENOBUFS;
605 int ifindex;
606 u16 vid;
607
608 ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
609 vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
610 0;
611 skb = nlmsg_new(payload: rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
612 if (!skb)
613 goto errout;
614
615 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, pid: 0, seq: 0, type,
616 NTF_SELF);
617 if (err < 0) {
618 kfree_skb(skb);
619 goto errout;
620 }
621
622 rtnl_notify(skb, net, pid: 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
623 return;
624
625errout:
626 rtnl_set_sk_err(net, RTNLGRP_MDB, error: err);
627}
628
629static const struct nla_policy
630br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
631 [MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
632 sizeof(struct in_addr),
633 sizeof(struct in6_addr)),
634};
635
636static const struct nla_policy
637br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
638 [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
639};
640
641static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
642 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
643 sizeof(struct in_addr),
644 sizeof(struct in6_addr)),
645 [MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
646 MCAST_INCLUDE),
647 [MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
648 [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
649};
650
651static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
652 struct netlink_ext_ack *extack)
653{
654 switch (proto) {
655 case htons(ETH_P_IP):
656 if (nla_len(nla: attr) != sizeof(struct in_addr)) {
657 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
658 return false;
659 }
660 if (ipv4_is_multicast(addr: nla_get_in_addr(nla: attr))) {
661 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
662 return false;
663 }
664 break;
665#if IS_ENABLED(CONFIG_IPV6)
666 case htons(ETH_P_IPV6): {
667 struct in6_addr src;
668
669 if (nla_len(nla: attr) != sizeof(struct in6_addr)) {
670 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
671 return false;
672 }
673 src = nla_get_in6_addr(nla: attr);
674 if (ipv6_addr_is_multicast(addr: &src)) {
675 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
676 return false;
677 }
678 break;
679 }
680#endif
681 default:
682 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
683 return false;
684 }
685
686 return true;
687}
688
689static struct net_bridge_mcast *
690__br_mdb_choose_context(struct net_bridge *br,
691 const struct br_mdb_entry *entry,
692 struct netlink_ext_ack *extack)
693{
694 struct net_bridge_mcast *brmctx = NULL;
695 struct net_bridge_vlan *v;
696
697 if (!br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
698 brmctx = &br->multicast_ctx;
699 goto out;
700 }
701
702 if (!entry->vid) {
703 NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
704 goto out;
705 }
706
707 v = br_vlan_find(vg: br_vlan_group(br), vid: entry->vid);
708 if (!v) {
709 NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
710 goto out;
711 }
712 if (br_multicast_ctx_vlan_global_disabled(brmctx: &v->br_mcast_ctx)) {
713 NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
714 goto out;
715 }
716 brmctx = &v->br_mcast_ctx;
717out:
718 return brmctx;
719}
720
721static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
722 struct net_bridge_mdb_entry *mp,
723 struct net_bridge_port_group *pg,
724 struct net_bridge_mcast *brmctx,
725 unsigned char flags)
726{
727 unsigned long now = jiffies;
728
729 pg->flags = flags;
730 pg->rt_protocol = cfg->rt_protocol;
731 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
732 mod_timer(timer: &pg->timer,
733 expires: now + brmctx->multicast_membership_interval);
734 else
735 del_timer(timer: &pg->timer);
736
737 br_mdb_notify(dev: cfg->br->dev, mp, pg, RTM_NEWMDB);
738
739 return 0;
740}
741
742static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
743 struct net_bridge_mdb_entry *mp,
744 struct net_bridge_mcast *brmctx,
745 unsigned char flags,
746 struct netlink_ext_ack *extack)
747{
748 struct net_bridge_port_group __rcu **pp;
749 struct net_bridge_port_group *p;
750 unsigned long now = jiffies;
751
752 for (pp = &mp->ports;
753 (p = mlock_dereference(*pp, cfg->br)) != NULL;
754 pp = &p->next) {
755 if (p->key.port == cfg->p) {
756 if (!(cfg->nlflags & NLM_F_REPLACE)) {
757 NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
758 return -EEXIST;
759 }
760 return br_mdb_replace_group_sg(cfg, mp, pg: p, brmctx,
761 flags);
762 }
763 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
764 break;
765 }
766
767 p = br_multicast_new_port_group(port: cfg->p, group: &cfg->group, next: *pp, flags, NULL,
768 MCAST_INCLUDE, rt_protocol: cfg->rt_protocol, extack);
769 if (unlikely(!p))
770 return -ENOMEM;
771
772 rcu_assign_pointer(*pp, p);
773 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
774 mod_timer(timer: &p->timer,
775 expires: now + brmctx->multicast_membership_interval);
776 br_mdb_notify(dev: cfg->br->dev, mp, pg: p, RTM_NEWMDB);
777
778 /* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
779 * proper replication.
780 */
781 if (br_multicast_should_handle_mode(brmctx, proto: cfg->group.proto)) {
782 struct net_bridge_mdb_entry *star_mp;
783 struct br_ip star_group;
784
785 star_group = p->key.addr;
786 memset(&star_group.src, 0, sizeof(star_group.src));
787 star_mp = br_mdb_ip_get(br: cfg->br, dst: &star_group);
788 if (star_mp)
789 br_multicast_sg_add_exclude_ports(star_mp, sg: p);
790 }
791
792 return 0;
793}
794
795static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
796 struct br_ip *src_ip,
797 struct net_bridge_mcast *brmctx,
798 struct netlink_ext_ack *extack)
799{
800 struct net_bridge_mdb_entry *sgmp;
801 struct br_mdb_config sg_cfg;
802 struct br_ip sg_ip;
803 u8 flags = 0;
804
805 sg_ip = cfg->group;
806 sg_ip.src = src_ip->src;
807 sgmp = br_multicast_new_group(br: cfg->br, group: &sg_ip);
808 if (IS_ERR(ptr: sgmp)) {
809 NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
810 return PTR_ERR(ptr: sgmp);
811 }
812
813 if (cfg->entry->state == MDB_PERMANENT)
814 flags |= MDB_PG_FLAGS_PERMANENT;
815 if (cfg->filter_mode == MCAST_EXCLUDE)
816 flags |= MDB_PG_FLAGS_BLOCKED;
817
818 memset(&sg_cfg, 0, sizeof(sg_cfg));
819 sg_cfg.br = cfg->br;
820 sg_cfg.p = cfg->p;
821 sg_cfg.entry = cfg->entry;
822 sg_cfg.group = sg_ip;
823 sg_cfg.src_entry = true;
824 sg_cfg.filter_mode = MCAST_INCLUDE;
825 sg_cfg.rt_protocol = cfg->rt_protocol;
826 sg_cfg.nlflags = cfg->nlflags;
827 return br_mdb_add_group_sg(cfg: &sg_cfg, mp: sgmp, brmctx, flags, extack);
828}
829
830static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
831 struct net_bridge_port_group *pg,
832 struct net_bridge_mcast *brmctx,
833 struct br_mdb_src_entry *src,
834 struct netlink_ext_ack *extack)
835{
836 struct net_bridge_group_src *ent;
837 unsigned long now = jiffies;
838 int err;
839
840 ent = br_multicast_find_group_src(pg, ip: &src->addr);
841 if (!ent) {
842 ent = br_multicast_new_group_src(pg, src_ip: &src->addr);
843 if (!ent) {
844 NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
845 return -ENOSPC;
846 }
847 } else if (!(cfg->nlflags & NLM_F_REPLACE)) {
848 NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
849 return -EEXIST;
850 }
851
852 if (cfg->filter_mode == MCAST_INCLUDE &&
853 cfg->entry->state == MDB_TEMPORARY)
854 mod_timer(timer: &ent->timer, expires: now + br_multicast_gmi(brmctx));
855 else
856 del_timer(timer: &ent->timer);
857
858 /* Install a (S, G) forwarding entry for the source. */
859 err = br_mdb_add_group_src_fwd(cfg, src_ip: &src->addr, brmctx, extack);
860 if (err)
861 goto err_del_sg;
862
863 ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
864
865 return 0;
866
867err_del_sg:
868 __br_multicast_del_group_src(src: ent);
869 return err;
870}
871
872static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
873 struct br_mdb_src_entry *src)
874{
875 struct net_bridge_group_src *ent;
876
877 ent = br_multicast_find_group_src(pg, ip: &src->addr);
878 if (WARN_ON_ONCE(!ent))
879 return;
880 br_multicast_del_group_src(src: ent, fastleave: false);
881}
882
883static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
884 struct net_bridge_port_group *pg,
885 struct net_bridge_mcast *brmctx,
886 struct netlink_ext_ack *extack)
887{
888 int i, err;
889
890 for (i = 0; i < cfg->num_src_entries; i++) {
891 err = br_mdb_add_group_src(cfg, pg, brmctx,
892 src: &cfg->src_entries[i], extack);
893 if (err)
894 goto err_del_group_srcs;
895 }
896
897 return 0;
898
899err_del_group_srcs:
900 for (i--; i >= 0; i--)
901 br_mdb_del_group_src(pg, src: &cfg->src_entries[i]);
902 return err;
903}
904
905static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
906 struct net_bridge_port_group *pg,
907 struct net_bridge_mcast *brmctx,
908 struct netlink_ext_ack *extack)
909{
910 struct net_bridge_group_src *ent;
911 struct hlist_node *tmp;
912 int err;
913
914 hlist_for_each_entry(ent, &pg->src_list, node)
915 ent->flags |= BR_SGRP_F_DELETE;
916
917 err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
918 if (err)
919 goto err_clear_delete;
920
921 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
922 if (ent->flags & BR_SGRP_F_DELETE)
923 br_multicast_del_group_src(src: ent, fastleave: false);
924 }
925
926 return 0;
927
928err_clear_delete:
929 hlist_for_each_entry(ent, &pg->src_list, node)
930 ent->flags &= ~BR_SGRP_F_DELETE;
931 return err;
932}
933
934static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
935 struct net_bridge_mdb_entry *mp,
936 struct net_bridge_port_group *pg,
937 struct net_bridge_mcast *brmctx,
938 unsigned char flags,
939 struct netlink_ext_ack *extack)
940{
941 unsigned long now = jiffies;
942 int err;
943
944 err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
945 if (err)
946 return err;
947
948 pg->flags = flags;
949 pg->filter_mode = cfg->filter_mode;
950 pg->rt_protocol = cfg->rt_protocol;
951 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
952 cfg->filter_mode == MCAST_EXCLUDE)
953 mod_timer(timer: &pg->timer,
954 expires: now + brmctx->multicast_membership_interval);
955 else
956 del_timer(timer: &pg->timer);
957
958 br_mdb_notify(dev: cfg->br->dev, mp, pg, RTM_NEWMDB);
959
960 if (br_multicast_should_handle_mode(brmctx, proto: cfg->group.proto))
961 br_multicast_star_g_handle_mode(pg, filter_mode: cfg->filter_mode);
962
963 return 0;
964}
965
966static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
967 struct net_bridge_mdb_entry *mp,
968 struct net_bridge_mcast *brmctx,
969 unsigned char flags,
970 struct netlink_ext_ack *extack)
971{
972 struct net_bridge_port_group __rcu **pp;
973 struct net_bridge_port_group *p;
974 unsigned long now = jiffies;
975 int err;
976
977 for (pp = &mp->ports;
978 (p = mlock_dereference(*pp, cfg->br)) != NULL;
979 pp = &p->next) {
980 if (p->key.port == cfg->p) {
981 if (!(cfg->nlflags & NLM_F_REPLACE)) {
982 NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
983 return -EEXIST;
984 }
985 return br_mdb_replace_group_star_g(cfg, mp, pg: p, brmctx,
986 flags, extack);
987 }
988 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
989 break;
990 }
991
992 p = br_multicast_new_port_group(port: cfg->p, group: &cfg->group, next: *pp, flags, NULL,
993 filter_mode: cfg->filter_mode, rt_protocol: cfg->rt_protocol,
994 extack);
995 if (unlikely(!p))
996 return -ENOMEM;
997
998 err = br_mdb_add_group_srcs(cfg, pg: p, brmctx, extack);
999 if (err)
1000 goto err_del_port_group;
1001
1002 rcu_assign_pointer(*pp, p);
1003 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1004 cfg->filter_mode == MCAST_EXCLUDE)
1005 mod_timer(timer: &p->timer,
1006 expires: now + brmctx->multicast_membership_interval);
1007 br_mdb_notify(dev: cfg->br->dev, mp, pg: p, RTM_NEWMDB);
1008 /* If we are adding a new EXCLUDE port group (*, G), it needs to be
1009 * also added to all (S, G) entries for proper replication.
1010 */
1011 if (br_multicast_should_handle_mode(brmctx, proto: cfg->group.proto) &&
1012 cfg->filter_mode == MCAST_EXCLUDE)
1013 br_multicast_star_g_handle_mode(pg: p, MCAST_EXCLUDE);
1014
1015 return 0;
1016
1017err_del_port_group:
1018 br_multicast_del_port_group(p);
1019 return err;
1020}
1021
1022static int br_mdb_add_group(const struct br_mdb_config *cfg,
1023 struct netlink_ext_ack *extack)
1024{
1025 struct br_mdb_entry *entry = cfg->entry;
1026 struct net_bridge_port *port = cfg->p;
1027 struct net_bridge_mdb_entry *mp;
1028 struct net_bridge *br = cfg->br;
1029 struct net_bridge_mcast *brmctx;
1030 struct br_ip group = cfg->group;
1031 unsigned char flags = 0;
1032
1033 brmctx = __br_mdb_choose_context(br, entry, extack);
1034 if (!brmctx)
1035 return -EINVAL;
1036
1037 mp = br_multicast_new_group(br, group: &group);
1038 if (IS_ERR(ptr: mp))
1039 return PTR_ERR(ptr: mp);
1040
1041 /* host join */
1042 if (!port) {
1043 if (mp->host_joined) {
1044 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1045 return -EEXIST;
1046 }
1047
1048 br_multicast_host_join(brmctx, mp, notify: false);
1049 br_mdb_notify(dev: br->dev, mp, NULL, RTM_NEWMDB);
1050
1051 return 0;
1052 }
1053
1054 if (entry->state == MDB_PERMANENT)
1055 flags |= MDB_PG_FLAGS_PERMANENT;
1056
1057 if (br_multicast_is_star_g(ip: &group))
1058 return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1059 else
1060 return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1061}
1062
1063static int __br_mdb_add(const struct br_mdb_config *cfg,
1064 struct netlink_ext_ack *extack)
1065{
1066 int ret;
1067
1068 spin_lock_bh(lock: &cfg->br->multicast_lock);
1069 ret = br_mdb_add_group(cfg, extack);
1070 spin_unlock_bh(lock: &cfg->br->multicast_lock);
1071
1072 return ret;
1073}
1074
1075static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1076 struct br_mdb_src_entry *src,
1077 __be16 proto,
1078 struct netlink_ext_ack *extack)
1079{
1080 struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1081 int err;
1082
1083 err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, nla: src_entry,
1084 policy: br_mdbe_src_list_entry_pol, extack);
1085 if (err)
1086 return err;
1087
1088 if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1089 return -EINVAL;
1090
1091 if (!is_valid_mdb_source(attr: tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1092 return -EINVAL;
1093
1094 src->addr.proto = proto;
1095 nla_memcpy(dest: &src->addr.src, src: tb[MDBE_SRCATTR_ADDRESS],
1096 count: nla_len(nla: tb[MDBE_SRCATTR_ADDRESS]));
1097
1098 return 0;
1099}
1100
1101static int br_mdb_config_src_list_init(struct nlattr *src_list,
1102 struct br_mdb_config *cfg,
1103 struct netlink_ext_ack *extack)
1104{
1105 struct nlattr *src_entry;
1106 int rem, err;
1107 int i = 0;
1108
1109 nla_for_each_nested(src_entry, src_list, rem)
1110 cfg->num_src_entries++;
1111
1112 if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1113 NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1114 PG_SRC_ENT_LIMIT - 1);
1115 return -EINVAL;
1116 }
1117
1118 cfg->src_entries = kcalloc(n: cfg->num_src_entries,
1119 size: sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1120 if (!cfg->src_entries)
1121 return -ENOMEM;
1122
1123 nla_for_each_nested(src_entry, src_list, rem) {
1124 err = br_mdb_config_src_entry_init(src_entry,
1125 src: &cfg->src_entries[i],
1126 proto: cfg->entry->addr.proto,
1127 extack);
1128 if (err)
1129 goto err_src_entry_init;
1130 i++;
1131 }
1132
1133 return 0;
1134
1135err_src_entry_init:
1136 kfree(objp: cfg->src_entries);
1137 return err;
1138}
1139
1140static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1141{
1142 kfree(objp: cfg->src_entries);
1143}
1144
1145static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1146 struct br_mdb_config *cfg,
1147 struct netlink_ext_ack *extack)
1148{
1149 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1150 int err;
1151
1152 err = nla_parse_nested(tb: mdb_attrs, MDBE_ATTR_MAX, nla: set_attrs,
1153 policy: br_mdbe_attrs_pol, extack);
1154 if (err)
1155 return err;
1156
1157 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1158 !is_valid_mdb_source(attr: mdb_attrs[MDBE_ATTR_SOURCE],
1159 proto: cfg->entry->addr.proto, extack))
1160 return -EINVAL;
1161
1162 __mdb_entry_to_br_ip(entry: cfg->entry, ip: &cfg->group, mdb_attrs);
1163
1164 if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1165 if (!cfg->p) {
1166 NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1167 return -EINVAL;
1168 }
1169 if (!br_multicast_is_star_g(ip: &cfg->group)) {
1170 NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1171 return -EINVAL;
1172 }
1173 cfg->filter_mode = nla_get_u8(nla: mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1174 } else {
1175 cfg->filter_mode = MCAST_EXCLUDE;
1176 }
1177
1178 if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1179 if (!cfg->p) {
1180 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1181 return -EINVAL;
1182 }
1183 if (!br_multicast_is_star_g(ip: &cfg->group)) {
1184 NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1185 return -EINVAL;
1186 }
1187 if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1188 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1189 return -EINVAL;
1190 }
1191 err = br_mdb_config_src_list_init(src_list: mdb_attrs[MDBE_ATTR_SRC_LIST],
1192 cfg, extack);
1193 if (err)
1194 return err;
1195 }
1196
1197 if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1198 NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1199 return -EINVAL;
1200 }
1201
1202 if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1203 if (!cfg->p) {
1204 NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1205 return -EINVAL;
1206 }
1207 cfg->rt_protocol = nla_get_u8(nla: mdb_attrs[MDBE_ATTR_RTPROT]);
1208 }
1209
1210 return 0;
1211}
1212
1213static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
1214 struct nlattr *tb[], u16 nlmsg_flags,
1215 struct netlink_ext_ack *extack)
1216{
1217 struct net *net = dev_net(dev);
1218
1219 memset(cfg, 0, sizeof(*cfg));
1220 cfg->filter_mode = MCAST_EXCLUDE;
1221 cfg->rt_protocol = RTPROT_STATIC;
1222 cfg->nlflags = nlmsg_flags;
1223
1224 cfg->br = netdev_priv(dev);
1225
1226 if (!netif_running(dev: cfg->br->dev)) {
1227 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1228 return -EINVAL;
1229 }
1230
1231 if (!br_opt_get(br: cfg->br, opt: BROPT_MULTICAST_ENABLED)) {
1232 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1233 return -EINVAL;
1234 }
1235
1236 cfg->entry = nla_data(nla: tb[MDBA_SET_ENTRY]);
1237
1238 if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1239 struct net_device *pdev;
1240
1241 pdev = __dev_get_by_index(net, ifindex: cfg->entry->ifindex);
1242 if (!pdev) {
1243 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1244 return -ENODEV;
1245 }
1246
1247 cfg->p = br_port_get_rtnl(dev: pdev);
1248 if (!cfg->p) {
1249 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1250 return -EINVAL;
1251 }
1252
1253 if (cfg->p->br != cfg->br) {
1254 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1255 return -EINVAL;
1256 }
1257 }
1258
1259 if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
1260 ipv4_is_zeronet(addr: cfg->entry->addr.u.ip4)) {
1261 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
1262 return -EINVAL;
1263 }
1264
1265 if (tb[MDBA_SET_ENTRY_ATTRS])
1266 return br_mdb_config_attrs_init(set_attrs: tb[MDBA_SET_ENTRY_ATTRS], cfg,
1267 extack);
1268 else
1269 __mdb_entry_to_br_ip(entry: cfg->entry, ip: &cfg->group, NULL);
1270
1271 return 0;
1272}
1273
1274static void br_mdb_config_fini(struct br_mdb_config *cfg)
1275{
1276 br_mdb_config_src_list_fini(cfg);
1277}
1278
1279int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
1280 struct netlink_ext_ack *extack)
1281{
1282 struct net_bridge_vlan_group *vg;
1283 struct net_bridge_vlan *v;
1284 struct br_mdb_config cfg;
1285 int err;
1286
1287 err = br_mdb_config_init(cfg: &cfg, dev, tb, nlmsg_flags, extack);
1288 if (err)
1289 return err;
1290
1291 err = -EINVAL;
1292 /* host join errors which can happen before creating the group */
1293 if (!cfg.p && !br_group_is_l2(group: &cfg.group)) {
1294 /* don't allow any flags for host-joined IP groups */
1295 if (cfg.entry->state) {
1296 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1297 goto out;
1298 }
1299 if (!br_multicast_is_star_g(ip: &cfg.group)) {
1300 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1301 goto out;
1302 }
1303 }
1304
1305 if (br_group_is_l2(group: &cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1306 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1307 goto out;
1308 }
1309
1310 if (cfg.p) {
1311 if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1312 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1313 goto out;
1314 }
1315 vg = nbp_vlan_group(p: cfg.p);
1316 } else {
1317 vg = br_vlan_group(br: cfg.br);
1318 }
1319
1320 /* If vlan filtering is enabled and VLAN is not specified
1321 * install mdb entry on all vlans configured on the port.
1322 */
1323 if (br_vlan_enabled(dev: cfg.br->dev) && vg && cfg.entry->vid == 0) {
1324 list_for_each_entry(v, &vg->vlan_list, vlist) {
1325 cfg.entry->vid = v->vid;
1326 cfg.group.vid = v->vid;
1327 err = __br_mdb_add(cfg: &cfg, extack);
1328 if (err)
1329 break;
1330 }
1331 } else {
1332 err = __br_mdb_add(cfg: &cfg, extack);
1333 }
1334
1335out:
1336 br_mdb_config_fini(cfg: &cfg);
1337 return err;
1338}
1339
1340static int __br_mdb_del(const struct br_mdb_config *cfg)
1341{
1342 struct br_mdb_entry *entry = cfg->entry;
1343 struct net_bridge *br = cfg->br;
1344 struct net_bridge_mdb_entry *mp;
1345 struct net_bridge_port_group *p;
1346 struct net_bridge_port_group __rcu **pp;
1347 struct br_ip ip = cfg->group;
1348 int err = -EINVAL;
1349
1350 spin_lock_bh(lock: &br->multicast_lock);
1351 mp = br_mdb_ip_get(br, dst: &ip);
1352 if (!mp)
1353 goto unlock;
1354
1355 /* host leave */
1356 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1357 br_multicast_host_leave(mp, notify: false);
1358 err = 0;
1359 br_mdb_notify(dev: br->dev, mp, NULL, RTM_DELMDB);
1360 if (!mp->ports && netif_running(dev: br->dev))
1361 mod_timer(timer: &mp->timer, expires: jiffies);
1362 goto unlock;
1363 }
1364
1365 for (pp = &mp->ports;
1366 (p = mlock_dereference(*pp, br)) != NULL;
1367 pp = &p->next) {
1368 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1369 continue;
1370
1371 br_multicast_del_pg(mp, pg: p, pp);
1372 err = 0;
1373 break;
1374 }
1375
1376unlock:
1377 spin_unlock_bh(lock: &br->multicast_lock);
1378 return err;
1379}
1380
1381int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1382 struct netlink_ext_ack *extack)
1383{
1384 struct net_bridge_vlan_group *vg;
1385 struct net_bridge_vlan *v;
1386 struct br_mdb_config cfg;
1387 int err;
1388
1389 err = br_mdb_config_init(cfg: &cfg, dev, tb, nlmsg_flags: 0, extack);
1390 if (err)
1391 return err;
1392
1393 if (cfg.p)
1394 vg = nbp_vlan_group(p: cfg.p);
1395 else
1396 vg = br_vlan_group(br: cfg.br);
1397
1398 /* If vlan filtering is enabled and VLAN is not specified
1399 * delete mdb entry on all vlans configured on the port.
1400 */
1401 if (br_vlan_enabled(dev: cfg.br->dev) && vg && cfg.entry->vid == 0) {
1402 list_for_each_entry(v, &vg->vlan_list, vlist) {
1403 cfg.entry->vid = v->vid;
1404 cfg.group.vid = v->vid;
1405 err = __br_mdb_del(cfg: &cfg);
1406 }
1407 } else {
1408 err = __br_mdb_del(cfg: &cfg);
1409 }
1410
1411 br_mdb_config_fini(cfg: &cfg);
1412 return err;
1413}
1414
1415static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
1416 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
1417 sizeof(struct in_addr),
1418 sizeof(struct in6_addr)),
1419};
1420
1421static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
1422 struct br_ip *group, struct netlink_ext_ack *extack)
1423{
1424 struct br_mdb_entry *entry = nla_data(nla: tb[MDBA_GET_ENTRY]);
1425 struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1426 int err;
1427
1428 if (!tb[MDBA_GET_ENTRY_ATTRS]) {
1429 __mdb_entry_to_br_ip(entry, ip: group, NULL);
1430 return 0;
1431 }
1432
1433 err = nla_parse_nested(tb: mdbe_attrs, MDBE_ATTR_MAX,
1434 nla: tb[MDBA_GET_ENTRY_ATTRS], policy: br_mdbe_attrs_get_pol,
1435 extack);
1436 if (err)
1437 return err;
1438
1439 if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
1440 !is_valid_mdb_source(attr: mdbe_attrs[MDBE_ATTR_SOURCE],
1441 proto: entry->addr.proto, extack))
1442 return -EINVAL;
1443
1444 __mdb_entry_to_br_ip(entry, ip: group, mdb_attrs: mdbe_attrs);
1445
1446 return 0;
1447}
1448
1449static struct sk_buff *
1450br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
1451{
1452 struct net_bridge_port_group *pg;
1453 size_t nlmsg_size;
1454
1455 nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
1456 /* MDBA_MDB */
1457 nla_total_size(payload: 0) +
1458 /* MDBA_MDB_ENTRY */
1459 nla_total_size(payload: 0);
1460
1461 if (mp->host_joined)
1462 nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
1463
1464 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1465 pg = mlock_dereference(pg->next, mp->br))
1466 nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
1467
1468 return nlmsg_new(payload: nlmsg_size, GFP_ATOMIC);
1469}
1470
1471static int br_mdb_get_reply_fill(struct sk_buff *skb,
1472 struct net_bridge_mdb_entry *mp, u32 portid,
1473 u32 seq)
1474{
1475 struct nlattr *mdb_nest, *mdb_entry_nest;
1476 struct net_bridge_port_group *pg;
1477 struct br_port_msg *bpm;
1478 struct nlmsghdr *nlh;
1479 int err;
1480
1481 nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, payload: sizeof(*bpm), flags: 0);
1482 if (!nlh)
1483 return -EMSGSIZE;
1484
1485 bpm = nlmsg_data(nlh);
1486 memset(bpm, 0, sizeof(*bpm));
1487 bpm->family = AF_BRIDGE;
1488 bpm->ifindex = mp->br->dev->ifindex;
1489 mdb_nest = nla_nest_start_noflag(skb, attrtype: MDBA_MDB);
1490 if (!mdb_nest) {
1491 err = -EMSGSIZE;
1492 goto cancel;
1493 }
1494 mdb_entry_nest = nla_nest_start_noflag(skb, attrtype: MDBA_MDB_ENTRY);
1495 if (!mdb_entry_nest) {
1496 err = -EMSGSIZE;
1497 goto cancel;
1498 }
1499
1500 if (mp->host_joined) {
1501 err = __mdb_fill_info(skb, mp, NULL);
1502 if (err)
1503 goto cancel;
1504 }
1505
1506 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1507 pg = mlock_dereference(pg->next, mp->br)) {
1508 err = __mdb_fill_info(skb, mp, p: pg);
1509 if (err)
1510 goto cancel;
1511 }
1512
1513 nla_nest_end(skb, start: mdb_entry_nest);
1514 nla_nest_end(skb, start: mdb_nest);
1515 nlmsg_end(skb, nlh);
1516
1517 return 0;
1518
1519cancel:
1520 nlmsg_cancel(skb, nlh);
1521 return err;
1522}
1523
1524int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
1525 struct netlink_ext_ack *extack)
1526{
1527 struct net_bridge *br = netdev_priv(dev);
1528 struct net_bridge_mdb_entry *mp;
1529 struct sk_buff *skb;
1530 struct br_ip group;
1531 int err;
1532
1533 err = br_mdb_get_parse(dev, tb, group: &group, extack);
1534 if (err)
1535 return err;
1536
1537 /* Hold the multicast lock to ensure that the MDB entry does not change
1538 * between the time the reply size is determined and when the reply is
1539 * filled in.
1540 */
1541 spin_lock_bh(lock: &br->multicast_lock);
1542
1543 mp = br_mdb_ip_get(br, dst: &group);
1544 if (!mp) {
1545 NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
1546 err = -ENOENT;
1547 goto unlock;
1548 }
1549
1550 skb = br_mdb_get_reply_alloc(mp);
1551 if (!skb) {
1552 err = -ENOMEM;
1553 goto unlock;
1554 }
1555
1556 err = br_mdb_get_reply_fill(skb, mp, portid, seq);
1557 if (err) {
1558 NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
1559 goto free;
1560 }
1561
1562 spin_unlock_bh(lock: &br->multicast_lock);
1563
1564 return rtnl_unicast(skb, net: dev_net(dev), pid: portid);
1565
1566free:
1567 kfree_skb(skb);
1568unlock:
1569 spin_unlock_bh(lock: &br->multicast_lock);
1570 return err;
1571}
1572

source code of linux/net/bridge/br_mdb.c