1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Bridge multicast support. |
4 | * |
5 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> |
6 | */ |
7 | |
8 | #include <linux/err.h> |
9 | #include <linux/export.h> |
10 | #include <linux/if_ether.h> |
11 | #include <linux/igmp.h> |
12 | #include <linux/in.h> |
13 | #include <linux/jhash.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/netdevice.h> |
17 | #include <linux/netfilter_bridge.h> |
18 | #include <linux/random.h> |
19 | #include <linux/rculist.h> |
20 | #include <linux/skbuff.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/timer.h> |
23 | #include <linux/inetdevice.h> |
24 | #include <linux/mroute.h> |
25 | #include <net/ip.h> |
26 | #include <net/switchdev.h> |
27 | #if IS_ENABLED(CONFIG_IPV6) |
28 | #include <linux/icmpv6.h> |
29 | #include <net/ipv6.h> |
30 | #include <net/mld.h> |
31 | #include <net/ip6_checksum.h> |
32 | #include <net/addrconf.h> |
33 | #endif |
34 | #include <trace/events/bridge.h> |
35 | |
36 | #include "br_private.h" |
37 | #include "br_private_mcast_eht.h" |
38 | |
39 | static const struct rhashtable_params br_mdb_rht_params = { |
40 | .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), |
41 | .key_offset = offsetof(struct net_bridge_mdb_entry, addr), |
42 | .key_len = sizeof(struct br_ip), |
43 | .automatic_shrinking = true, |
44 | }; |
45 | |
46 | static const struct rhashtable_params br_sg_port_rht_params = { |
47 | .head_offset = offsetof(struct net_bridge_port_group, rhnode), |
48 | .key_offset = offsetof(struct net_bridge_port_group, key), |
49 | .key_len = sizeof(struct net_bridge_port_group_sg_key), |
50 | .automatic_shrinking = true, |
51 | }; |
52 | |
53 | static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, |
54 | struct bridge_mcast_own_query *query); |
55 | static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, |
56 | struct net_bridge_mcast_port *pmctx); |
57 | static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, |
58 | struct net_bridge_mcast_port *pmctx, |
59 | __be32 group, |
60 | __u16 vid, |
61 | const unsigned char *src); |
62 | static void br_multicast_port_group_rexmit(struct timer_list *t); |
63 | |
64 | static void |
65 | br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); |
66 | static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, |
67 | struct net_bridge_mcast_port *pmctx); |
68 | #if IS_ENABLED(CONFIG_IPV6) |
69 | static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, |
70 | struct net_bridge_mcast_port *pmctx, |
71 | const struct in6_addr *group, |
72 | __u16 vid, const unsigned char *src); |
73 | #endif |
74 | static struct net_bridge_port_group * |
75 | __br_multicast_add_group(struct net_bridge_mcast *brmctx, |
76 | struct net_bridge_mcast_port *pmctx, |
77 | struct br_ip *group, |
78 | const unsigned char *src, |
79 | u8 filter_mode, |
80 | bool igmpv2_mldv1, |
81 | bool blocked); |
82 | static void br_multicast_find_del_pg(struct net_bridge *br, |
83 | struct net_bridge_port_group *pg); |
84 | static void __br_multicast_stop(struct net_bridge_mcast *brmctx); |
85 | |
86 | static int br_mc_disabled_update(struct net_device *dev, bool value, |
87 | struct netlink_ext_ack *extack); |
88 | |
89 | static struct net_bridge_port_group * |
90 | br_sg_port_find(struct net_bridge *br, |
91 | struct net_bridge_port_group_sg_key *sg_p) |
92 | { |
93 | lockdep_assert_held_once(&br->multicast_lock); |
94 | |
95 | return rhashtable_lookup_fast(ht: &br->sg_port_tbl, key: sg_p, |
96 | params: br_sg_port_rht_params); |
97 | } |
98 | |
99 | static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, |
100 | struct br_ip *dst) |
101 | { |
102 | return rhashtable_lookup(ht: &br->mdb_hash_tbl, key: dst, params: br_mdb_rht_params); |
103 | } |
104 | |
105 | struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, |
106 | struct br_ip *dst) |
107 | { |
108 | struct net_bridge_mdb_entry *ent; |
109 | |
110 | lockdep_assert_held_once(&br->multicast_lock); |
111 | |
112 | rcu_read_lock(); |
113 | ent = rhashtable_lookup(ht: &br->mdb_hash_tbl, key: dst, params: br_mdb_rht_params); |
114 | rcu_read_unlock(); |
115 | |
116 | return ent; |
117 | } |
118 | |
119 | static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, |
120 | __be32 dst, __u16 vid) |
121 | { |
122 | struct br_ip br_dst; |
123 | |
124 | memset(&br_dst, 0, sizeof(br_dst)); |
125 | br_dst.dst.ip4 = dst; |
126 | br_dst.proto = htons(ETH_P_IP); |
127 | br_dst.vid = vid; |
128 | |
129 | return br_mdb_ip_get(br, dst: &br_dst); |
130 | } |
131 | |
132 | #if IS_ENABLED(CONFIG_IPV6) |
133 | static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, |
134 | const struct in6_addr *dst, |
135 | __u16 vid) |
136 | { |
137 | struct br_ip br_dst; |
138 | |
139 | memset(&br_dst, 0, sizeof(br_dst)); |
140 | br_dst.dst.ip6 = *dst; |
141 | br_dst.proto = htons(ETH_P_IPV6); |
142 | br_dst.vid = vid; |
143 | |
144 | return br_mdb_ip_get(br, dst: &br_dst); |
145 | } |
146 | #endif |
147 | |
148 | struct net_bridge_mdb_entry * |
149 | br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb, |
150 | u16 vid) |
151 | { |
152 | struct net_bridge *br = brmctx->br; |
153 | struct br_ip ip; |
154 | |
155 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) || |
156 | br_multicast_ctx_vlan_global_disabled(brmctx)) |
157 | return NULL; |
158 | |
159 | if (BR_INPUT_SKB_CB(skb)->igmp) |
160 | return NULL; |
161 | |
162 | memset(&ip, 0, sizeof(ip)); |
163 | ip.proto = skb->protocol; |
164 | ip.vid = vid; |
165 | |
166 | switch (skb->protocol) { |
167 | case htons(ETH_P_IP): |
168 | ip.dst.ip4 = ip_hdr(skb)->daddr; |
169 | if (brmctx->multicast_igmp_version == 3) { |
170 | struct net_bridge_mdb_entry *mdb; |
171 | |
172 | ip.src.ip4 = ip_hdr(skb)->saddr; |
173 | mdb = br_mdb_ip_get_rcu(br, dst: &ip); |
174 | if (mdb) |
175 | return mdb; |
176 | ip.src.ip4 = 0; |
177 | } |
178 | break; |
179 | #if IS_ENABLED(CONFIG_IPV6) |
180 | case htons(ETH_P_IPV6): |
181 | ip.dst.ip6 = ipv6_hdr(skb)->daddr; |
182 | if (brmctx->multicast_mld_version == 2) { |
183 | struct net_bridge_mdb_entry *mdb; |
184 | |
185 | ip.src.ip6 = ipv6_hdr(skb)->saddr; |
186 | mdb = br_mdb_ip_get_rcu(br, dst: &ip); |
187 | if (mdb) |
188 | return mdb; |
189 | memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); |
190 | } |
191 | break; |
192 | #endif |
193 | default: |
194 | ip.proto = 0; |
195 | ether_addr_copy(dst: ip.dst.mac_addr, src: eth_hdr(skb)->h_dest); |
196 | } |
197 | |
198 | return br_mdb_ip_get_rcu(br, dst: &ip); |
199 | } |
200 | |
201 | /* IMPORTANT: this function must be used only when the contexts cannot be |
202 | * passed down (e.g. timer) and must be used for read-only purposes because |
203 | * the vlan snooping option can change, so it can return any context |
204 | * (non-vlan or vlan). Its initial intended purpose is to read timer values |
205 | * from the *current* context based on the option. At worst that could lead |
206 | * to inconsistent timers when the contexts are changed, i.e. src timer |
207 | * which needs to re-arm with a specific delay taken from the old context |
208 | */ |
209 | static struct net_bridge_mcast_port * |
210 | br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) |
211 | { |
212 | struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; |
213 | struct net_bridge_vlan *vlan; |
214 | |
215 | lockdep_assert_held_once(&pg->key.port->br->multicast_lock); |
216 | |
217 | /* if vlan snooping is disabled use the port's multicast context */ |
218 | if (!pg->key.addr.vid || |
219 | !br_opt_get(br: pg->key.port->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) |
220 | goto out; |
221 | |
222 | /* locking is tricky here, due to different rules for multicast and |
223 | * vlans we need to take rcu to find the vlan and make sure it has |
224 | * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under |
225 | * multicast_lock which must be already held here, so the vlan's pmctx |
226 | * can safely be used on return |
227 | */ |
228 | rcu_read_lock(); |
229 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: pg->key.port), vid: pg->key.addr.vid); |
230 | if (vlan && !br_multicast_port_ctx_vlan_disabled(pmctx: &vlan->port_mcast_ctx)) |
231 | pmctx = &vlan->port_mcast_ctx; |
232 | else |
233 | pmctx = NULL; |
234 | rcu_read_unlock(); |
235 | out: |
236 | return pmctx; |
237 | } |
238 | |
239 | static struct net_bridge_mcast_port * |
240 | br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid) |
241 | { |
242 | struct net_bridge_mcast_port *pmctx = NULL; |
243 | struct net_bridge_vlan *vlan; |
244 | |
245 | lockdep_assert_held_once(&port->br->multicast_lock); |
246 | |
247 | if (!br_opt_get(br: port->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) |
248 | return NULL; |
249 | |
250 | /* Take RCU to access the vlan. */ |
251 | rcu_read_lock(); |
252 | |
253 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: port), vid); |
254 | if (vlan && !br_multicast_port_ctx_vlan_disabled(pmctx: &vlan->port_mcast_ctx)) |
255 | pmctx = &vlan->port_mcast_ctx; |
256 | |
257 | rcu_read_unlock(); |
258 | |
259 | return pmctx; |
260 | } |
261 | |
262 | /* when snooping we need to check if the contexts should be used |
263 | * in the following order: |
264 | * - if pmctx is non-NULL (port), check if it should be used |
265 | * - if pmctx is NULL (bridge), check if brmctx should be used |
266 | */ |
267 | static bool |
268 | br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, |
269 | const struct net_bridge_mcast_port *pmctx) |
270 | { |
271 | if (!netif_running(dev: brmctx->br->dev)) |
272 | return false; |
273 | |
274 | if (pmctx) |
275 | return !br_multicast_port_ctx_state_disabled(pmctx); |
276 | else |
277 | return !br_multicast_ctx_vlan_disabled(brmctx); |
278 | } |
279 | |
280 | static bool br_port_group_equal(struct net_bridge_port_group *p, |
281 | struct net_bridge_port *port, |
282 | const unsigned char *src) |
283 | { |
284 | if (p->key.port != port) |
285 | return false; |
286 | |
287 | if (!(port->flags & BR_MULTICAST_TO_UNICAST)) |
288 | return true; |
289 | |
290 | return ether_addr_equal(addr1: src, addr2: p->eth_addr); |
291 | } |
292 | |
293 | static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, |
294 | struct net_bridge_port_group *pg, |
295 | struct br_ip *sg_ip) |
296 | { |
297 | struct net_bridge_port_group_sg_key sg_key; |
298 | struct net_bridge_port_group *src_pg; |
299 | struct net_bridge_mcast *brmctx; |
300 | |
301 | memset(&sg_key, 0, sizeof(sg_key)); |
302 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
303 | sg_key.port = pg->key.port; |
304 | sg_key.addr = *sg_ip; |
305 | if (br_sg_port_find(br: brmctx->br, sg_p: &sg_key)) |
306 | return; |
307 | |
308 | src_pg = __br_multicast_add_group(brmctx, pmctx, |
309 | group: sg_ip, src: pg->eth_addr, |
310 | MCAST_INCLUDE, igmpv2_mldv1: false, blocked: false); |
311 | if (IS_ERR_OR_NULL(ptr: src_pg) || |
312 | src_pg->rt_protocol != RTPROT_KERNEL) |
313 | return; |
314 | |
315 | src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; |
316 | } |
317 | |
318 | static void __fwd_del_star_excl(struct net_bridge_port_group *pg, |
319 | struct br_ip *sg_ip) |
320 | { |
321 | struct net_bridge_port_group_sg_key sg_key; |
322 | struct net_bridge *br = pg->key.port->br; |
323 | struct net_bridge_port_group *src_pg; |
324 | |
325 | memset(&sg_key, 0, sizeof(sg_key)); |
326 | sg_key.port = pg->key.port; |
327 | sg_key.addr = *sg_ip; |
328 | src_pg = br_sg_port_find(br, sg_p: &sg_key); |
329 | if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || |
330 | src_pg->rt_protocol != RTPROT_KERNEL) |
331 | return; |
332 | |
333 | br_multicast_find_del_pg(br, pg: src_pg); |
334 | } |
335 | |
336 | /* When a port group transitions to (or is added as) EXCLUDE we need to add it |
337 | * to all other ports' S,G entries which are not blocked by the current group |
338 | * for proper replication, the assumption is that any S,G blocked entries |
339 | * are already added so the S,G,port lookup should skip them. |
340 | * When a port group transitions from EXCLUDE -> INCLUDE mode or is being |
341 | * deleted we need to remove it from all ports' S,G entries where it was |
342 | * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). |
343 | */ |
344 | void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, |
345 | u8 filter_mode) |
346 | { |
347 | struct net_bridge *br = pg->key.port->br; |
348 | struct net_bridge_port_group *pg_lst; |
349 | struct net_bridge_mcast_port *pmctx; |
350 | struct net_bridge_mdb_entry *mp; |
351 | struct br_ip sg_ip; |
352 | |
353 | if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) |
354 | return; |
355 | |
356 | mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
357 | if (!mp) |
358 | return; |
359 | pmctx = br_multicast_pg_to_port_ctx(pg); |
360 | if (!pmctx) |
361 | return; |
362 | |
363 | memset(&sg_ip, 0, sizeof(sg_ip)); |
364 | sg_ip = pg->key.addr; |
365 | |
366 | for (pg_lst = mlock_dereference(mp->ports, br); |
367 | pg_lst; |
368 | pg_lst = mlock_dereference(pg_lst->next, br)) { |
369 | struct net_bridge_group_src *src_ent; |
370 | |
371 | if (pg_lst == pg) |
372 | continue; |
373 | hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { |
374 | if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) |
375 | continue; |
376 | sg_ip.src = src_ent->addr.src; |
377 | switch (filter_mode) { |
378 | case MCAST_INCLUDE: |
379 | __fwd_del_star_excl(pg, sg_ip: &sg_ip); |
380 | break; |
381 | case MCAST_EXCLUDE: |
382 | __fwd_add_star_excl(pmctx, pg, sg_ip: &sg_ip); |
383 | break; |
384 | } |
385 | } |
386 | } |
387 | } |
388 | |
389 | /* called when adding a new S,G with host_joined == false by default */ |
390 | static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, |
391 | struct net_bridge_port_group *sg) |
392 | { |
393 | struct net_bridge_mdb_entry *sg_mp; |
394 | |
395 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
396 | return; |
397 | if (!star_mp->host_joined) |
398 | return; |
399 | |
400 | sg_mp = br_mdb_ip_get(br: star_mp->br, dst: &sg->key.addr); |
401 | if (!sg_mp) |
402 | return; |
403 | sg_mp->host_joined = true; |
404 | } |
405 | |
406 | /* set the host_joined state of all of *,G's S,G entries */ |
407 | static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) |
408 | { |
409 | struct net_bridge *br = star_mp->br; |
410 | struct net_bridge_mdb_entry *sg_mp; |
411 | struct net_bridge_port_group *pg; |
412 | struct br_ip sg_ip; |
413 | |
414 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
415 | return; |
416 | |
417 | memset(&sg_ip, 0, sizeof(sg_ip)); |
418 | sg_ip = star_mp->addr; |
419 | for (pg = mlock_dereference(star_mp->ports, br); |
420 | pg; |
421 | pg = mlock_dereference(pg->next, br)) { |
422 | struct net_bridge_group_src *src_ent; |
423 | |
424 | hlist_for_each_entry(src_ent, &pg->src_list, node) { |
425 | if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) |
426 | continue; |
427 | sg_ip.src = src_ent->addr.src; |
428 | sg_mp = br_mdb_ip_get(br, dst: &sg_ip); |
429 | if (!sg_mp) |
430 | continue; |
431 | sg_mp->host_joined = star_mp->host_joined; |
432 | } |
433 | } |
434 | } |
435 | |
436 | static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) |
437 | { |
438 | struct net_bridge_port_group __rcu **pp; |
439 | struct net_bridge_port_group *p; |
440 | |
441 | /* *,G exclude ports are only added to S,G entries */ |
442 | if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) |
443 | return; |
444 | |
445 | /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports |
446 | * we should ignore perm entries since they're managed by user-space |
447 | */ |
448 | for (pp = &sgmp->ports; |
449 | (p = mlock_dereference(*pp, sgmp->br)) != NULL; |
450 | pp = &p->next) |
451 | if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | |
452 | MDB_PG_FLAGS_PERMANENT))) |
453 | return; |
454 | |
455 | /* currently the host can only have joined the *,G which means |
456 | * we treat it as EXCLUDE {}, so for an S,G it's considered a |
457 | * STAR_EXCLUDE entry and we can safely leave it |
458 | */ |
459 | sgmp->host_joined = false; |
460 | |
461 | for (pp = &sgmp->ports; |
462 | (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { |
463 | if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) |
464 | br_multicast_del_pg(mp: sgmp, pg: p, pp); |
465 | else |
466 | pp = &p->next; |
467 | } |
468 | } |
469 | |
470 | void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, |
471 | struct net_bridge_port_group *sg) |
472 | { |
473 | struct net_bridge_port_group_sg_key sg_key; |
474 | struct net_bridge *br = star_mp->br; |
475 | struct net_bridge_mcast_port *pmctx; |
476 | struct net_bridge_port_group *pg; |
477 | struct net_bridge_mcast *brmctx; |
478 | |
479 | if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) |
480 | return; |
481 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
482 | return; |
483 | |
484 | br_multicast_sg_host_state(star_mp, sg); |
485 | memset(&sg_key, 0, sizeof(sg_key)); |
486 | sg_key.addr = sg->key.addr; |
487 | /* we need to add all exclude ports to the S,G */ |
488 | for (pg = mlock_dereference(star_mp->ports, br); |
489 | pg; |
490 | pg = mlock_dereference(pg->next, br)) { |
491 | struct net_bridge_port_group *src_pg; |
492 | |
493 | if (pg == sg || pg->filter_mode == MCAST_INCLUDE) |
494 | continue; |
495 | |
496 | sg_key.port = pg->key.port; |
497 | if (br_sg_port_find(br, sg_p: &sg_key)) |
498 | continue; |
499 | |
500 | pmctx = br_multicast_pg_to_port_ctx(pg); |
501 | if (!pmctx) |
502 | continue; |
503 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
504 | |
505 | src_pg = __br_multicast_add_group(brmctx, pmctx, |
506 | group: &sg->key.addr, |
507 | src: sg->eth_addr, |
508 | MCAST_INCLUDE, igmpv2_mldv1: false, blocked: false); |
509 | if (IS_ERR_OR_NULL(ptr: src_pg) || |
510 | src_pg->rt_protocol != RTPROT_KERNEL) |
511 | continue; |
512 | src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; |
513 | } |
514 | } |
515 | |
516 | static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) |
517 | { |
518 | struct net_bridge_mdb_entry *star_mp; |
519 | struct net_bridge_mcast_port *pmctx; |
520 | struct net_bridge_port_group *sg; |
521 | struct net_bridge_mcast *brmctx; |
522 | struct br_ip sg_ip; |
523 | |
524 | if (src->flags & BR_SGRP_F_INSTALLED) |
525 | return; |
526 | |
527 | memset(&sg_ip, 0, sizeof(sg_ip)); |
528 | pmctx = br_multicast_pg_to_port_ctx(pg: src->pg); |
529 | if (!pmctx) |
530 | return; |
531 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
532 | sg_ip = src->pg->key.addr; |
533 | sg_ip.src = src->addr.src; |
534 | |
535 | sg = __br_multicast_add_group(brmctx, pmctx, group: &sg_ip, |
536 | src: src->pg->eth_addr, MCAST_INCLUDE, igmpv2_mldv1: false, |
537 | blocked: !timer_pending(timer: &src->timer)); |
538 | if (IS_ERR_OR_NULL(ptr: sg)) |
539 | return; |
540 | src->flags |= BR_SGRP_F_INSTALLED; |
541 | sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; |
542 | |
543 | /* if it was added by user-space as perm we can skip next steps */ |
544 | if (sg->rt_protocol != RTPROT_KERNEL && |
545 | (sg->flags & MDB_PG_FLAGS_PERMANENT)) |
546 | return; |
547 | |
548 | /* the kernel is now responsible for removing this S,G */ |
549 | del_timer(timer: &sg->timer); |
550 | star_mp = br_mdb_ip_get(br: src->br, dst: &src->pg->key.addr); |
551 | if (!star_mp) |
552 | return; |
553 | |
554 | br_multicast_sg_add_exclude_ports(star_mp, sg); |
555 | } |
556 | |
557 | static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, |
558 | bool fastleave) |
559 | { |
560 | struct net_bridge_port_group *p, *pg = src->pg; |
561 | struct net_bridge_port_group __rcu **pp; |
562 | struct net_bridge_mdb_entry *mp; |
563 | struct br_ip sg_ip; |
564 | |
565 | memset(&sg_ip, 0, sizeof(sg_ip)); |
566 | sg_ip = pg->key.addr; |
567 | sg_ip.src = src->addr.src; |
568 | |
569 | mp = br_mdb_ip_get(br: src->br, dst: &sg_ip); |
570 | if (!mp) |
571 | return; |
572 | |
573 | for (pp = &mp->ports; |
574 | (p = mlock_dereference(*pp, src->br)) != NULL; |
575 | pp = &p->next) { |
576 | if (!br_port_group_equal(p, port: pg->key.port, src: pg->eth_addr)) |
577 | continue; |
578 | |
579 | if (p->rt_protocol != RTPROT_KERNEL && |
580 | (p->flags & MDB_PG_FLAGS_PERMANENT) && |
581 | !(src->flags & BR_SGRP_F_USER_ADDED)) |
582 | break; |
583 | |
584 | if (fastleave) |
585 | p->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
586 | br_multicast_del_pg(mp, pg: p, pp); |
587 | break; |
588 | } |
589 | src->flags &= ~BR_SGRP_F_INSTALLED; |
590 | } |
591 | |
592 | /* install S,G and based on src's timer enable or disable forwarding */ |
593 | static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) |
594 | { |
595 | struct net_bridge_port_group_sg_key sg_key; |
596 | struct net_bridge_port_group *sg; |
597 | u8 old_flags; |
598 | |
599 | br_multicast_fwd_src_add(src); |
600 | |
601 | memset(&sg_key, 0, sizeof(sg_key)); |
602 | sg_key.addr = src->pg->key.addr; |
603 | sg_key.addr.src = src->addr.src; |
604 | sg_key.port = src->pg->key.port; |
605 | |
606 | sg = br_sg_port_find(br: src->br, sg_p: &sg_key); |
607 | if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) |
608 | return; |
609 | |
610 | old_flags = sg->flags; |
611 | if (timer_pending(timer: &src->timer)) |
612 | sg->flags &= ~MDB_PG_FLAGS_BLOCKED; |
613 | else |
614 | sg->flags |= MDB_PG_FLAGS_BLOCKED; |
615 | |
616 | if (old_flags != sg->flags) { |
617 | struct net_bridge_mdb_entry *sg_mp; |
618 | |
619 | sg_mp = br_mdb_ip_get(br: src->br, dst: &sg_key.addr); |
620 | if (!sg_mp) |
621 | return; |
622 | br_mdb_notify(dev: src->br->dev, mp: sg_mp, pg: sg, RTM_NEWMDB); |
623 | } |
624 | } |
625 | |
626 | static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) |
627 | { |
628 | struct net_bridge_mdb_entry *mp; |
629 | |
630 | mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); |
631 | WARN_ON(!hlist_unhashed(&mp->mdb_node)); |
632 | WARN_ON(mp->ports); |
633 | |
634 | timer_shutdown_sync(timer: &mp->timer); |
635 | kfree_rcu(mp, rcu); |
636 | } |
637 | |
638 | static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) |
639 | { |
640 | struct net_bridge *br = mp->br; |
641 | |
642 | rhashtable_remove_fast(ht: &br->mdb_hash_tbl, obj: &mp->rhnode, |
643 | params: br_mdb_rht_params); |
644 | hlist_del_init_rcu(n: &mp->mdb_node); |
645 | hlist_add_head(n: &mp->mcast_gc.gc_node, h: &br->mcast_gc_list); |
646 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
647 | } |
648 | |
649 | static void br_multicast_group_expired(struct timer_list *t) |
650 | { |
651 | struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); |
652 | struct net_bridge *br = mp->br; |
653 | |
654 | spin_lock(lock: &br->multicast_lock); |
655 | if (hlist_unhashed(h: &mp->mdb_node) || !netif_running(dev: br->dev) || |
656 | timer_pending(timer: &mp->timer)) |
657 | goto out; |
658 | |
659 | br_multicast_host_leave(mp, notify: true); |
660 | |
661 | if (mp->ports) |
662 | goto out; |
663 | br_multicast_del_mdb_entry(mp); |
664 | out: |
665 | spin_unlock(lock: &br->multicast_lock); |
666 | } |
667 | |
668 | static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) |
669 | { |
670 | struct net_bridge_group_src *src; |
671 | |
672 | src = container_of(gc, struct net_bridge_group_src, mcast_gc); |
673 | WARN_ON(!hlist_unhashed(&src->node)); |
674 | |
675 | timer_shutdown_sync(timer: &src->timer); |
676 | kfree_rcu(src, rcu); |
677 | } |
678 | |
679 | void __br_multicast_del_group_src(struct net_bridge_group_src *src) |
680 | { |
681 | struct net_bridge *br = src->pg->key.port->br; |
682 | |
683 | hlist_del_init_rcu(n: &src->node); |
684 | src->pg->src_ents--; |
685 | hlist_add_head(n: &src->mcast_gc.gc_node, h: &br->mcast_gc_list); |
686 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
687 | } |
688 | |
689 | void br_multicast_del_group_src(struct net_bridge_group_src *src, |
690 | bool fastleave) |
691 | { |
692 | br_multicast_fwd_src_remove(src, fastleave); |
693 | __br_multicast_del_group_src(src); |
694 | } |
695 | |
696 | static int |
697 | br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx, |
698 | struct netlink_ext_ack *extack, |
699 | const char *what) |
700 | { |
701 | u32 max = READ_ONCE(pmctx->mdb_max_entries); |
702 | u32 n = READ_ONCE(pmctx->mdb_n_entries); |
703 | |
704 | if (max && n >= max) { |
705 | NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u" , |
706 | what, n, max); |
707 | return -E2BIG; |
708 | } |
709 | |
710 | WRITE_ONCE(pmctx->mdb_n_entries, n + 1); |
711 | return 0; |
712 | } |
713 | |
714 | static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx) |
715 | { |
716 | u32 n = READ_ONCE(pmctx->mdb_n_entries); |
717 | |
718 | WARN_ON_ONCE(n == 0); |
719 | WRITE_ONCE(pmctx->mdb_n_entries, n - 1); |
720 | } |
721 | |
722 | static int br_multicast_port_ngroups_inc(struct net_bridge_port *port, |
723 | const struct br_ip *group, |
724 | struct netlink_ext_ack *extack) |
725 | { |
726 | struct net_bridge_mcast_port *pmctx; |
727 | int err; |
728 | |
729 | lockdep_assert_held_once(&port->br->multicast_lock); |
730 | |
731 | /* Always count on the port context. */ |
732 | err = br_multicast_port_ngroups_inc_one(pmctx: &port->multicast_ctx, extack, |
733 | what: "Port" ); |
734 | if (err) { |
735 | trace_br_mdb_full(dev: port->dev, group); |
736 | return err; |
737 | } |
738 | |
739 | /* Only count on the VLAN context if VID is given, and if snooping on |
740 | * that VLAN is enabled. |
741 | */ |
742 | if (!group->vid) |
743 | return 0; |
744 | |
745 | pmctx = br_multicast_port_vid_to_port_ctx(port, vid: group->vid); |
746 | if (!pmctx) |
747 | return 0; |
748 | |
749 | err = br_multicast_port_ngroups_inc_one(pmctx, extack, what: "Port-VLAN" ); |
750 | if (err) { |
751 | trace_br_mdb_full(dev: port->dev, group); |
752 | goto dec_one_out; |
753 | } |
754 | |
755 | return 0; |
756 | |
757 | dec_one_out: |
758 | br_multicast_port_ngroups_dec_one(pmctx: &port->multicast_ctx); |
759 | return err; |
760 | } |
761 | |
762 | static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid) |
763 | { |
764 | struct net_bridge_mcast_port *pmctx; |
765 | |
766 | lockdep_assert_held_once(&port->br->multicast_lock); |
767 | |
768 | if (vid) { |
769 | pmctx = br_multicast_port_vid_to_port_ctx(port, vid); |
770 | if (pmctx) |
771 | br_multicast_port_ngroups_dec_one(pmctx); |
772 | } |
773 | br_multicast_port_ngroups_dec_one(pmctx: &port->multicast_ctx); |
774 | } |
775 | |
776 | u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx) |
777 | { |
778 | return READ_ONCE(pmctx->mdb_n_entries); |
779 | } |
780 | |
781 | void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max) |
782 | { |
783 | WRITE_ONCE(pmctx->mdb_max_entries, max); |
784 | } |
785 | |
786 | u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx) |
787 | { |
788 | return READ_ONCE(pmctx->mdb_max_entries); |
789 | } |
790 | |
791 | static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) |
792 | { |
793 | struct net_bridge_port_group *pg; |
794 | |
795 | pg = container_of(gc, struct net_bridge_port_group, mcast_gc); |
796 | WARN_ON(!hlist_unhashed(&pg->mglist)); |
797 | WARN_ON(!hlist_empty(&pg->src_list)); |
798 | |
799 | timer_shutdown_sync(timer: &pg->rexmit_timer); |
800 | timer_shutdown_sync(timer: &pg->timer); |
801 | kfree_rcu(pg, rcu); |
802 | } |
803 | |
804 | void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, |
805 | struct net_bridge_port_group *pg, |
806 | struct net_bridge_port_group __rcu **pp) |
807 | { |
808 | struct net_bridge *br = pg->key.port->br; |
809 | struct net_bridge_group_src *ent; |
810 | struct hlist_node *tmp; |
811 | |
812 | rcu_assign_pointer(*pp, pg->next); |
813 | hlist_del_init(n: &pg->mglist); |
814 | br_multicast_eht_clean_sets(pg); |
815 | hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) |
816 | br_multicast_del_group_src(src: ent, fastleave: false); |
817 | br_mdb_notify(dev: br->dev, mp, pg, RTM_DELMDB); |
818 | if (!br_multicast_is_star_g(ip: &mp->addr)) { |
819 | rhashtable_remove_fast(ht: &br->sg_port_tbl, obj: &pg->rhnode, |
820 | params: br_sg_port_rht_params); |
821 | br_multicast_sg_del_exclude_ports(sgmp: mp); |
822 | } else { |
823 | br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); |
824 | } |
825 | br_multicast_port_ngroups_dec(port: pg->key.port, vid: pg->key.addr.vid); |
826 | hlist_add_head(n: &pg->mcast_gc.gc_node, h: &br->mcast_gc_list); |
827 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
828 | |
829 | if (!mp->ports && !mp->host_joined && netif_running(dev: br->dev)) |
830 | mod_timer(timer: &mp->timer, expires: jiffies); |
831 | } |
832 | |
833 | static void br_multicast_find_del_pg(struct net_bridge *br, |
834 | struct net_bridge_port_group *pg) |
835 | { |
836 | struct net_bridge_port_group __rcu **pp; |
837 | struct net_bridge_mdb_entry *mp; |
838 | struct net_bridge_port_group *p; |
839 | |
840 | mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
841 | if (WARN_ON(!mp)) |
842 | return; |
843 | |
844 | for (pp = &mp->ports; |
845 | (p = mlock_dereference(*pp, br)) != NULL; |
846 | pp = &p->next) { |
847 | if (p != pg) |
848 | continue; |
849 | |
850 | br_multicast_del_pg(mp, pg, pp); |
851 | return; |
852 | } |
853 | |
854 | WARN_ON(1); |
855 | } |
856 | |
857 | static void br_multicast_port_group_expired(struct timer_list *t) |
858 | { |
859 | struct net_bridge_port_group *pg = from_timer(pg, t, timer); |
860 | struct net_bridge_group_src *src_ent; |
861 | struct net_bridge *br = pg->key.port->br; |
862 | struct hlist_node *tmp; |
863 | bool changed; |
864 | |
865 | spin_lock(lock: &br->multicast_lock); |
866 | if (!netif_running(dev: br->dev) || timer_pending(timer: &pg->timer) || |
867 | hlist_unhashed(h: &pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) |
868 | goto out; |
869 | |
870 | changed = !!(pg->filter_mode == MCAST_EXCLUDE); |
871 | pg->filter_mode = MCAST_INCLUDE; |
872 | hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { |
873 | if (!timer_pending(timer: &src_ent->timer)) { |
874 | br_multicast_del_group_src(src: src_ent, fastleave: false); |
875 | changed = true; |
876 | } |
877 | } |
878 | |
879 | if (hlist_empty(h: &pg->src_list)) { |
880 | br_multicast_find_del_pg(br, pg); |
881 | } else if (changed) { |
882 | struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
883 | |
884 | if (changed && br_multicast_is_star_g(ip: &pg->key.addr)) |
885 | br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); |
886 | |
887 | if (WARN_ON(!mp)) |
888 | goto out; |
889 | br_mdb_notify(dev: br->dev, mp, pg, RTM_NEWMDB); |
890 | } |
891 | out: |
892 | spin_unlock(lock: &br->multicast_lock); |
893 | } |
894 | |
895 | static void br_multicast_gc(struct hlist_head *head) |
896 | { |
897 | struct net_bridge_mcast_gc *gcent; |
898 | struct hlist_node *tmp; |
899 | |
900 | hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { |
901 | hlist_del_init(n: &gcent->gc_node); |
902 | gcent->destroy(gcent); |
903 | } |
904 | } |
905 | |
906 | static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, |
907 | struct net_bridge_mcast_port *pmctx, |
908 | struct sk_buff *skb) |
909 | { |
910 | struct net_bridge_vlan *vlan = NULL; |
911 | |
912 | if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) |
913 | vlan = pmctx->vlan; |
914 | else if (br_multicast_ctx_is_vlan(brmctx)) |
915 | vlan = brmctx->vlan; |
916 | |
917 | if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { |
918 | u16 vlan_proto; |
919 | |
920 | if (br_vlan_get_proto(dev: brmctx->br->dev, p_proto: &vlan_proto) != 0) |
921 | return; |
922 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci: vlan->vid); |
923 | } |
924 | } |
925 | |
926 | static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
927 | struct net_bridge_mcast_port *pmctx, |
928 | struct net_bridge_port_group *pg, |
929 | __be32 ip_dst, __be32 group, |
930 | bool with_srcs, bool over_lmqt, |
931 | u8 sflag, u8 *igmp_type, |
932 | bool *need_rexmit) |
933 | { |
934 | struct net_bridge_port *p = pg ? pg->key.port : NULL; |
935 | struct net_bridge_group_src *ent; |
936 | size_t pkt_size, igmp_hdr_size; |
937 | unsigned long now = jiffies; |
938 | struct igmpv3_query *ihv3; |
939 | void *csum_start = NULL; |
940 | __sum16 *csum = NULL; |
941 | struct sk_buff *skb; |
942 | struct igmphdr *ih; |
943 | struct ethhdr *eth; |
944 | unsigned long lmqt; |
945 | struct iphdr *iph; |
946 | u16 lmqt_srcs = 0; |
947 | |
948 | igmp_hdr_size = sizeof(*ih); |
949 | if (brmctx->multicast_igmp_version == 3) { |
950 | igmp_hdr_size = sizeof(*ihv3); |
951 | if (pg && with_srcs) { |
952 | lmqt = now + (brmctx->multicast_last_member_interval * |
953 | brmctx->multicast_last_member_count); |
954 | hlist_for_each_entry(ent, &pg->src_list, node) { |
955 | if (over_lmqt == time_after(ent->timer.expires, |
956 | lmqt) && |
957 | ent->src_query_rexmit_cnt > 0) |
958 | lmqt_srcs++; |
959 | } |
960 | |
961 | if (!lmqt_srcs) |
962 | return NULL; |
963 | igmp_hdr_size += lmqt_srcs * sizeof(__be32); |
964 | } |
965 | } |
966 | |
967 | pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; |
968 | if ((p && pkt_size > p->dev->mtu) || |
969 | pkt_size > brmctx->br->dev->mtu) |
970 | return NULL; |
971 | |
972 | skb = netdev_alloc_skb_ip_align(dev: brmctx->br->dev, length: pkt_size); |
973 | if (!skb) |
974 | goto out; |
975 | |
976 | __br_multicast_query_handle_vlan(brmctx, pmctx, skb); |
977 | skb->protocol = htons(ETH_P_IP); |
978 | |
979 | skb_reset_mac_header(skb); |
980 | eth = eth_hdr(skb); |
981 | |
982 | ether_addr_copy(dst: eth->h_source, src: brmctx->br->dev->dev_addr); |
983 | ip_eth_mc_map(naddr: ip_dst, buf: eth->h_dest); |
984 | eth->h_proto = htons(ETH_P_IP); |
985 | skb_put(skb, len: sizeof(*eth)); |
986 | |
987 | skb_set_network_header(skb, offset: skb->len); |
988 | iph = ip_hdr(skb); |
989 | iph->tot_len = htons(pkt_size - sizeof(*eth)); |
990 | |
991 | iph->version = 4; |
992 | iph->ihl = 6; |
993 | iph->tos = 0xc0; |
994 | iph->id = 0; |
995 | iph->frag_off = htons(IP_DF); |
996 | iph->ttl = 1; |
997 | iph->protocol = IPPROTO_IGMP; |
998 | iph->saddr = br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_QUERY_USE_IFADDR) ? |
999 | inet_select_addr(dev: brmctx->br->dev, dst: 0, scope: RT_SCOPE_LINK) : 0; |
1000 | iph->daddr = ip_dst; |
1001 | ((u8 *)&iph[1])[0] = IPOPT_RA; |
1002 | ((u8 *)&iph[1])[1] = 4; |
1003 | ((u8 *)&iph[1])[2] = 0; |
1004 | ((u8 *)&iph[1])[3] = 0; |
1005 | ip_send_check(ip: iph); |
1006 | skb_put(skb, len: 24); |
1007 | |
1008 | skb_set_transport_header(skb, offset: skb->len); |
1009 | *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; |
1010 | |
1011 | switch (brmctx->multicast_igmp_version) { |
1012 | case 2: |
1013 | ih = igmp_hdr(skb); |
1014 | ih->type = IGMP_HOST_MEMBERSHIP_QUERY; |
1015 | ih->code = (group ? brmctx->multicast_last_member_interval : |
1016 | brmctx->multicast_query_response_interval) / |
1017 | (HZ / IGMP_TIMER_SCALE); |
1018 | ih->group = group; |
1019 | ih->csum = 0; |
1020 | csum = &ih->csum; |
1021 | csum_start = (void *)ih; |
1022 | break; |
1023 | case 3: |
1024 | ihv3 = igmpv3_query_hdr(skb); |
1025 | ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; |
1026 | ihv3->code = (group ? brmctx->multicast_last_member_interval : |
1027 | brmctx->multicast_query_response_interval) / |
1028 | (HZ / IGMP_TIMER_SCALE); |
1029 | ihv3->group = group; |
1030 | ihv3->qqic = brmctx->multicast_query_interval / HZ; |
1031 | ihv3->nsrcs = htons(lmqt_srcs); |
1032 | ihv3->resv = 0; |
1033 | ihv3->suppress = sflag; |
1034 | ihv3->qrv = 2; |
1035 | ihv3->csum = 0; |
1036 | csum = &ihv3->csum; |
1037 | csum_start = (void *)ihv3; |
1038 | if (!pg || !with_srcs) |
1039 | break; |
1040 | |
1041 | lmqt_srcs = 0; |
1042 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1043 | if (over_lmqt == time_after(ent->timer.expires, |
1044 | lmqt) && |
1045 | ent->src_query_rexmit_cnt > 0) { |
1046 | ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; |
1047 | ent->src_query_rexmit_cnt--; |
1048 | if (need_rexmit && ent->src_query_rexmit_cnt) |
1049 | *need_rexmit = true; |
1050 | } |
1051 | } |
1052 | if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { |
1053 | kfree_skb(skb); |
1054 | return NULL; |
1055 | } |
1056 | break; |
1057 | } |
1058 | |
1059 | if (WARN_ON(!csum || !csum_start)) { |
1060 | kfree_skb(skb); |
1061 | return NULL; |
1062 | } |
1063 | |
1064 | *csum = ip_compute_csum(buff: csum_start, len: igmp_hdr_size); |
1065 | skb_put(skb, len: igmp_hdr_size); |
1066 | __skb_pull(skb, len: sizeof(*eth)); |
1067 | |
1068 | out: |
1069 | return skb; |
1070 | } |
1071 | |
1072 | #if IS_ENABLED(CONFIG_IPV6) |
1073 | static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
1074 | struct net_bridge_mcast_port *pmctx, |
1075 | struct net_bridge_port_group *pg, |
1076 | const struct in6_addr *ip6_dst, |
1077 | const struct in6_addr *group, |
1078 | bool with_srcs, bool over_llqt, |
1079 | u8 sflag, u8 *igmp_type, |
1080 | bool *need_rexmit) |
1081 | { |
1082 | struct net_bridge_port *p = pg ? pg->key.port : NULL; |
1083 | struct net_bridge_group_src *ent; |
1084 | size_t pkt_size, mld_hdr_size; |
1085 | unsigned long now = jiffies; |
1086 | struct mld2_query *mld2q; |
1087 | void *csum_start = NULL; |
1088 | unsigned long interval; |
1089 | __sum16 *csum = NULL; |
1090 | struct ipv6hdr *ip6h; |
1091 | struct mld_msg *mldq; |
1092 | struct sk_buff *skb; |
1093 | unsigned long llqt; |
1094 | struct ethhdr *eth; |
1095 | u16 llqt_srcs = 0; |
1096 | u8 *hopopt; |
1097 | |
1098 | mld_hdr_size = sizeof(*mldq); |
1099 | if (brmctx->multicast_mld_version == 2) { |
1100 | mld_hdr_size = sizeof(*mld2q); |
1101 | if (pg && with_srcs) { |
1102 | llqt = now + (brmctx->multicast_last_member_interval * |
1103 | brmctx->multicast_last_member_count); |
1104 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1105 | if (over_llqt == time_after(ent->timer.expires, |
1106 | llqt) && |
1107 | ent->src_query_rexmit_cnt > 0) |
1108 | llqt_srcs++; |
1109 | } |
1110 | |
1111 | if (!llqt_srcs) |
1112 | return NULL; |
1113 | mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); |
1114 | } |
1115 | } |
1116 | |
1117 | pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; |
1118 | if ((p && pkt_size > p->dev->mtu) || |
1119 | pkt_size > brmctx->br->dev->mtu) |
1120 | return NULL; |
1121 | |
1122 | skb = netdev_alloc_skb_ip_align(dev: brmctx->br->dev, length: pkt_size); |
1123 | if (!skb) |
1124 | goto out; |
1125 | |
1126 | __br_multicast_query_handle_vlan(brmctx, pmctx, skb); |
1127 | skb->protocol = htons(ETH_P_IPV6); |
1128 | |
1129 | /* Ethernet header */ |
1130 | skb_reset_mac_header(skb); |
1131 | eth = eth_hdr(skb); |
1132 | |
1133 | ether_addr_copy(dst: eth->h_source, src: brmctx->br->dev->dev_addr); |
1134 | eth->h_proto = htons(ETH_P_IPV6); |
1135 | skb_put(skb, len: sizeof(*eth)); |
1136 | |
1137 | /* IPv6 header + HbH option */ |
1138 | skb_set_network_header(skb, offset: skb->len); |
1139 | ip6h = ipv6_hdr(skb); |
1140 | |
1141 | *(__force __be32 *)ip6h = htonl(0x60000000); |
1142 | ip6h->payload_len = htons(8 + mld_hdr_size); |
1143 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
1144 | ip6h->hop_limit = 1; |
1145 | ip6h->daddr = *ip6_dst; |
1146 | if (ipv6_dev_get_saddr(net: dev_net(dev: brmctx->br->dev), dev: brmctx->br->dev, |
1147 | daddr: &ip6h->daddr, srcprefs: 0, saddr: &ip6h->saddr)) { |
1148 | kfree_skb(skb); |
1149 | br_opt_toggle(br: brmctx->br, opt: BROPT_HAS_IPV6_ADDR, on: false); |
1150 | return NULL; |
1151 | } |
1152 | |
1153 | br_opt_toggle(br: brmctx->br, opt: BROPT_HAS_IPV6_ADDR, on: true); |
1154 | ipv6_eth_mc_map(addr: &ip6h->daddr, buf: eth->h_dest); |
1155 | |
1156 | hopopt = (u8 *)(ip6h + 1); |
1157 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ |
1158 | hopopt[1] = 0; /* length of HbH */ |
1159 | hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ |
1160 | hopopt[3] = 2; /* Length of RA Option */ |
1161 | hopopt[4] = 0; /* Type = 0x0000 (MLD) */ |
1162 | hopopt[5] = 0; |
1163 | hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ |
1164 | hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ |
1165 | |
1166 | skb_put(skb, len: sizeof(*ip6h) + 8); |
1167 | |
1168 | /* ICMPv6 */ |
1169 | skb_set_transport_header(skb, offset: skb->len); |
1170 | interval = ipv6_addr_any(a: group) ? |
1171 | brmctx->multicast_query_response_interval : |
1172 | brmctx->multicast_last_member_interval; |
1173 | *igmp_type = ICMPV6_MGM_QUERY; |
1174 | switch (brmctx->multicast_mld_version) { |
1175 | case 1: |
1176 | mldq = (struct mld_msg *)icmp6_hdr(skb); |
1177 | mldq->mld_type = ICMPV6_MGM_QUERY; |
1178 | mldq->mld_code = 0; |
1179 | mldq->mld_cksum = 0; |
1180 | mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); |
1181 | mldq->mld_reserved = 0; |
1182 | mldq->mld_mca = *group; |
1183 | csum = &mldq->mld_cksum; |
1184 | csum_start = (void *)mldq; |
1185 | break; |
1186 | case 2: |
1187 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
1188 | mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); |
1189 | mld2q->mld2q_type = ICMPV6_MGM_QUERY; |
1190 | mld2q->mld2q_code = 0; |
1191 | mld2q->mld2q_cksum = 0; |
1192 | mld2q->mld2q_resv1 = 0; |
1193 | mld2q->mld2q_resv2 = 0; |
1194 | mld2q->mld2q_suppress = sflag; |
1195 | mld2q->mld2q_qrv = 2; |
1196 | mld2q->mld2q_nsrcs = htons(llqt_srcs); |
1197 | mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; |
1198 | mld2q->mld2q_mca = *group; |
1199 | csum = &mld2q->mld2q_cksum; |
1200 | csum_start = (void *)mld2q; |
1201 | if (!pg || !with_srcs) |
1202 | break; |
1203 | |
1204 | llqt_srcs = 0; |
1205 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1206 | if (over_llqt == time_after(ent->timer.expires, |
1207 | llqt) && |
1208 | ent->src_query_rexmit_cnt > 0) { |
1209 | mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; |
1210 | ent->src_query_rexmit_cnt--; |
1211 | if (need_rexmit && ent->src_query_rexmit_cnt) |
1212 | *need_rexmit = true; |
1213 | } |
1214 | } |
1215 | if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { |
1216 | kfree_skb(skb); |
1217 | return NULL; |
1218 | } |
1219 | break; |
1220 | } |
1221 | |
1222 | if (WARN_ON(!csum || !csum_start)) { |
1223 | kfree_skb(skb); |
1224 | return NULL; |
1225 | } |
1226 | |
1227 | *csum = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, len: mld_hdr_size, |
1228 | IPPROTO_ICMPV6, |
1229 | sum: csum_partial(buff: csum_start, len: mld_hdr_size, sum: 0)); |
1230 | skb_put(skb, len: mld_hdr_size); |
1231 | __skb_pull(skb, len: sizeof(*eth)); |
1232 | |
1233 | out: |
1234 | return skb; |
1235 | } |
1236 | #endif |
1237 | |
1238 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
1239 | struct net_bridge_mcast_port *pmctx, |
1240 | struct net_bridge_port_group *pg, |
1241 | struct br_ip *ip_dst, |
1242 | struct br_ip *group, |
1243 | bool with_srcs, bool over_lmqt, |
1244 | u8 sflag, u8 *igmp_type, |
1245 | bool *need_rexmit) |
1246 | { |
1247 | __be32 ip4_dst; |
1248 | |
1249 | switch (group->proto) { |
1250 | case htons(ETH_P_IP): |
1251 | ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); |
1252 | return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, |
1253 | ip_dst: ip4_dst, group: group->dst.ip4, |
1254 | with_srcs, over_lmqt, |
1255 | sflag, igmp_type, |
1256 | need_rexmit); |
1257 | #if IS_ENABLED(CONFIG_IPV6) |
1258 | case htons(ETH_P_IPV6): { |
1259 | struct in6_addr ip6_dst; |
1260 | |
1261 | if (ip_dst) |
1262 | ip6_dst = ip_dst->dst.ip6; |
1263 | else |
1264 | ipv6_addr_set(addr: &ip6_dst, htonl(0xff020000), w2: 0, w3: 0, |
1265 | htonl(1)); |
1266 | |
1267 | return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, |
1268 | ip6_dst: &ip6_dst, group: &group->dst.ip6, |
1269 | with_srcs, over_llqt: over_lmqt, |
1270 | sflag, igmp_type, |
1271 | need_rexmit); |
1272 | } |
1273 | #endif |
1274 | } |
1275 | return NULL; |
1276 | } |
1277 | |
1278 | struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, |
1279 | struct br_ip *group) |
1280 | { |
1281 | struct net_bridge_mdb_entry *mp; |
1282 | int err; |
1283 | |
1284 | mp = br_mdb_ip_get(br, dst: group); |
1285 | if (mp) |
1286 | return mp; |
1287 | |
1288 | if (atomic_read(v: &br->mdb_hash_tbl.nelems) >= br->hash_max) { |
1289 | trace_br_mdb_full(dev: br->dev, group); |
1290 | br_mc_disabled_update(dev: br->dev, value: false, NULL); |
1291 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: false); |
1292 | return ERR_PTR(error: -E2BIG); |
1293 | } |
1294 | |
1295 | mp = kzalloc(size: sizeof(*mp), GFP_ATOMIC); |
1296 | if (unlikely(!mp)) |
1297 | return ERR_PTR(error: -ENOMEM); |
1298 | |
1299 | mp->br = br; |
1300 | mp->addr = *group; |
1301 | mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; |
1302 | timer_setup(&mp->timer, br_multicast_group_expired, 0); |
1303 | err = rhashtable_lookup_insert_fast(ht: &br->mdb_hash_tbl, obj: &mp->rhnode, |
1304 | params: br_mdb_rht_params); |
1305 | if (err) { |
1306 | kfree(objp: mp); |
1307 | mp = ERR_PTR(error: err); |
1308 | } else { |
1309 | hlist_add_head_rcu(n: &mp->mdb_node, h: &br->mdb_list); |
1310 | } |
1311 | |
1312 | return mp; |
1313 | } |
1314 | |
1315 | static void br_multicast_group_src_expired(struct timer_list *t) |
1316 | { |
1317 | struct net_bridge_group_src *src = from_timer(src, t, timer); |
1318 | struct net_bridge_port_group *pg; |
1319 | struct net_bridge *br = src->br; |
1320 | |
1321 | spin_lock(lock: &br->multicast_lock); |
1322 | if (hlist_unhashed(h: &src->node) || !netif_running(dev: br->dev) || |
1323 | timer_pending(timer: &src->timer)) |
1324 | goto out; |
1325 | |
1326 | pg = src->pg; |
1327 | if (pg->filter_mode == MCAST_INCLUDE) { |
1328 | br_multicast_del_group_src(src, fastleave: false); |
1329 | if (!hlist_empty(h: &pg->src_list)) |
1330 | goto out; |
1331 | br_multicast_find_del_pg(br, pg); |
1332 | } else { |
1333 | br_multicast_fwd_src_handle(src); |
1334 | } |
1335 | |
1336 | out: |
1337 | spin_unlock(lock: &br->multicast_lock); |
1338 | } |
1339 | |
1340 | struct net_bridge_group_src * |
1341 | br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) |
1342 | { |
1343 | struct net_bridge_group_src *ent; |
1344 | |
1345 | switch (ip->proto) { |
1346 | case htons(ETH_P_IP): |
1347 | hlist_for_each_entry(ent, &pg->src_list, node) |
1348 | if (ip->src.ip4 == ent->addr.src.ip4) |
1349 | return ent; |
1350 | break; |
1351 | #if IS_ENABLED(CONFIG_IPV6) |
1352 | case htons(ETH_P_IPV6): |
1353 | hlist_for_each_entry(ent, &pg->src_list, node) |
1354 | if (!ipv6_addr_cmp(a1: &ent->addr.src.ip6, a2: &ip->src.ip6)) |
1355 | return ent; |
1356 | break; |
1357 | #endif |
1358 | } |
1359 | |
1360 | return NULL; |
1361 | } |
1362 | |
1363 | struct net_bridge_group_src * |
1364 | br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) |
1365 | { |
1366 | struct net_bridge_group_src *grp_src; |
1367 | |
1368 | if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) |
1369 | return NULL; |
1370 | |
1371 | switch (src_ip->proto) { |
1372 | case htons(ETH_P_IP): |
1373 | if (ipv4_is_zeronet(addr: src_ip->src.ip4) || |
1374 | ipv4_is_multicast(addr: src_ip->src.ip4)) |
1375 | return NULL; |
1376 | break; |
1377 | #if IS_ENABLED(CONFIG_IPV6) |
1378 | case htons(ETH_P_IPV6): |
1379 | if (ipv6_addr_any(a: &src_ip->src.ip6) || |
1380 | ipv6_addr_is_multicast(addr: &src_ip->src.ip6)) |
1381 | return NULL; |
1382 | break; |
1383 | #endif |
1384 | } |
1385 | |
1386 | grp_src = kzalloc(size: sizeof(*grp_src), GFP_ATOMIC); |
1387 | if (unlikely(!grp_src)) |
1388 | return NULL; |
1389 | |
1390 | grp_src->pg = pg; |
1391 | grp_src->br = pg->key.port->br; |
1392 | grp_src->addr = *src_ip; |
1393 | grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; |
1394 | timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); |
1395 | |
1396 | hlist_add_head_rcu(n: &grp_src->node, h: &pg->src_list); |
1397 | pg->src_ents++; |
1398 | |
1399 | return grp_src; |
1400 | } |
1401 | |
1402 | struct net_bridge_port_group *br_multicast_new_port_group( |
1403 | struct net_bridge_port *port, |
1404 | const struct br_ip *group, |
1405 | struct net_bridge_port_group __rcu *next, |
1406 | unsigned char flags, |
1407 | const unsigned char *src, |
1408 | u8 filter_mode, |
1409 | u8 rt_protocol, |
1410 | struct netlink_ext_ack *extack) |
1411 | { |
1412 | struct net_bridge_port_group *p; |
1413 | int err; |
1414 | |
1415 | err = br_multicast_port_ngroups_inc(port, group, extack); |
1416 | if (err) |
1417 | return NULL; |
1418 | |
1419 | p = kzalloc(size: sizeof(*p), GFP_ATOMIC); |
1420 | if (unlikely(!p)) { |
1421 | NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group" ); |
1422 | goto dec_out; |
1423 | } |
1424 | |
1425 | p->key.addr = *group; |
1426 | p->key.port = port; |
1427 | p->flags = flags; |
1428 | p->filter_mode = filter_mode; |
1429 | p->rt_protocol = rt_protocol; |
1430 | p->eht_host_tree = RB_ROOT; |
1431 | p->eht_set_tree = RB_ROOT; |
1432 | p->mcast_gc.destroy = br_multicast_destroy_port_group; |
1433 | INIT_HLIST_HEAD(&p->src_list); |
1434 | |
1435 | if (!br_multicast_is_star_g(ip: group) && |
1436 | rhashtable_lookup_insert_fast(ht: &port->br->sg_port_tbl, obj: &p->rhnode, |
1437 | params: br_sg_port_rht_params)) { |
1438 | NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group" ); |
1439 | goto free_out; |
1440 | } |
1441 | |
1442 | rcu_assign_pointer(p->next, next); |
1443 | timer_setup(&p->timer, br_multicast_port_group_expired, 0); |
1444 | timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); |
1445 | hlist_add_head(n: &p->mglist, h: &port->mglist); |
1446 | |
1447 | if (src) |
1448 | memcpy(p->eth_addr, src, ETH_ALEN); |
1449 | else |
1450 | eth_broadcast_addr(addr: p->eth_addr); |
1451 | |
1452 | return p; |
1453 | |
1454 | free_out: |
1455 | kfree(objp: p); |
1456 | dec_out: |
1457 | br_multicast_port_ngroups_dec(port, vid: group->vid); |
1458 | return NULL; |
1459 | } |
1460 | |
1461 | void br_multicast_del_port_group(struct net_bridge_port_group *p) |
1462 | { |
1463 | struct net_bridge_port *port = p->key.port; |
1464 | __u16 vid = p->key.addr.vid; |
1465 | |
1466 | hlist_del_init(n: &p->mglist); |
1467 | if (!br_multicast_is_star_g(ip: &p->key.addr)) |
1468 | rhashtable_remove_fast(ht: &port->br->sg_port_tbl, obj: &p->rhnode, |
1469 | params: br_sg_port_rht_params); |
1470 | kfree(objp: p); |
1471 | br_multicast_port_ngroups_dec(port, vid); |
1472 | } |
1473 | |
1474 | void br_multicast_host_join(const struct net_bridge_mcast *brmctx, |
1475 | struct net_bridge_mdb_entry *mp, bool notify) |
1476 | { |
1477 | if (!mp->host_joined) { |
1478 | mp->host_joined = true; |
1479 | if (br_multicast_is_star_g(ip: &mp->addr)) |
1480 | br_multicast_star_g_host_state(star_mp: mp); |
1481 | if (notify) |
1482 | br_mdb_notify(dev: mp->br->dev, mp, NULL, RTM_NEWMDB); |
1483 | } |
1484 | |
1485 | if (br_group_is_l2(group: &mp->addr)) |
1486 | return; |
1487 | |
1488 | mod_timer(timer: &mp->timer, expires: jiffies + brmctx->multicast_membership_interval); |
1489 | } |
1490 | |
1491 | void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) |
1492 | { |
1493 | if (!mp->host_joined) |
1494 | return; |
1495 | |
1496 | mp->host_joined = false; |
1497 | if (br_multicast_is_star_g(ip: &mp->addr)) |
1498 | br_multicast_star_g_host_state(star_mp: mp); |
1499 | if (notify) |
1500 | br_mdb_notify(dev: mp->br->dev, mp, NULL, RTM_DELMDB); |
1501 | } |
1502 | |
1503 | static struct net_bridge_port_group * |
1504 | __br_multicast_add_group(struct net_bridge_mcast *brmctx, |
1505 | struct net_bridge_mcast_port *pmctx, |
1506 | struct br_ip *group, |
1507 | const unsigned char *src, |
1508 | u8 filter_mode, |
1509 | bool igmpv2_mldv1, |
1510 | bool blocked) |
1511 | { |
1512 | struct net_bridge_port_group __rcu **pp; |
1513 | struct net_bridge_port_group *p = NULL; |
1514 | struct net_bridge_mdb_entry *mp; |
1515 | unsigned long now = jiffies; |
1516 | |
1517 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
1518 | goto out; |
1519 | |
1520 | mp = br_multicast_new_group(br: brmctx->br, group); |
1521 | if (IS_ERR(ptr: mp)) |
1522 | return ERR_CAST(ptr: mp); |
1523 | |
1524 | if (!pmctx) { |
1525 | br_multicast_host_join(brmctx, mp, notify: true); |
1526 | goto out; |
1527 | } |
1528 | |
1529 | for (pp = &mp->ports; |
1530 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
1531 | pp = &p->next) { |
1532 | if (br_port_group_equal(p, port: pmctx->port, src)) |
1533 | goto found; |
1534 | if ((unsigned long)p->key.port < (unsigned long)pmctx->port) |
1535 | break; |
1536 | } |
1537 | |
1538 | p = br_multicast_new_port_group(port: pmctx->port, group, next: *pp, flags: 0, src, |
1539 | filter_mode, RTPROT_KERNEL, NULL); |
1540 | if (unlikely(!p)) { |
1541 | p = ERR_PTR(error: -ENOMEM); |
1542 | goto out; |
1543 | } |
1544 | rcu_assign_pointer(*pp, p); |
1545 | if (blocked) |
1546 | p->flags |= MDB_PG_FLAGS_BLOCKED; |
1547 | br_mdb_notify(dev: brmctx->br->dev, mp, pg: p, RTM_NEWMDB); |
1548 | |
1549 | found: |
1550 | if (igmpv2_mldv1) |
1551 | mod_timer(timer: &p->timer, |
1552 | expires: now + brmctx->multicast_membership_interval); |
1553 | |
1554 | out: |
1555 | return p; |
1556 | } |
1557 | |
1558 | static int br_multicast_add_group(struct net_bridge_mcast *brmctx, |
1559 | struct net_bridge_mcast_port *pmctx, |
1560 | struct br_ip *group, |
1561 | const unsigned char *src, |
1562 | u8 filter_mode, |
1563 | bool igmpv2_mldv1) |
1564 | { |
1565 | struct net_bridge_port_group *pg; |
1566 | int err; |
1567 | |
1568 | spin_lock(lock: &brmctx->br->multicast_lock); |
1569 | pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, |
1570 | igmpv2_mldv1, blocked: false); |
1571 | /* NULL is considered valid for host joined groups */ |
1572 | err = PTR_ERR_OR_ZERO(ptr: pg); |
1573 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1574 | |
1575 | return err; |
1576 | } |
1577 | |
1578 | static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, |
1579 | struct net_bridge_mcast_port *pmctx, |
1580 | __be32 group, |
1581 | __u16 vid, |
1582 | const unsigned char *src, |
1583 | bool igmpv2) |
1584 | { |
1585 | struct br_ip br_group; |
1586 | u8 filter_mode; |
1587 | |
1588 | if (ipv4_is_local_multicast(addr: group)) |
1589 | return 0; |
1590 | |
1591 | memset(&br_group, 0, sizeof(br_group)); |
1592 | br_group.dst.ip4 = group; |
1593 | br_group.proto = htons(ETH_P_IP); |
1594 | br_group.vid = vid; |
1595 | filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; |
1596 | |
1597 | return br_multicast_add_group(brmctx, pmctx, group: &br_group, src, |
1598 | filter_mode, igmpv2_mldv1: igmpv2); |
1599 | } |
1600 | |
1601 | #if IS_ENABLED(CONFIG_IPV6) |
1602 | static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, |
1603 | struct net_bridge_mcast_port *pmctx, |
1604 | const struct in6_addr *group, |
1605 | __u16 vid, |
1606 | const unsigned char *src, |
1607 | bool mldv1) |
1608 | { |
1609 | struct br_ip br_group; |
1610 | u8 filter_mode; |
1611 | |
1612 | if (ipv6_addr_is_ll_all_nodes(addr: group)) |
1613 | return 0; |
1614 | |
1615 | memset(&br_group, 0, sizeof(br_group)); |
1616 | br_group.dst.ip6 = *group; |
1617 | br_group.proto = htons(ETH_P_IPV6); |
1618 | br_group.vid = vid; |
1619 | filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; |
1620 | |
1621 | return br_multicast_add_group(brmctx, pmctx, group: &br_group, src, |
1622 | filter_mode, igmpv2_mldv1: mldv1); |
1623 | } |
1624 | #endif |
1625 | |
1626 | static bool br_multicast_rport_del(struct hlist_node *rlist) |
1627 | { |
1628 | if (hlist_unhashed(h: rlist)) |
1629 | return false; |
1630 | |
1631 | hlist_del_init_rcu(n: rlist); |
1632 | return true; |
1633 | } |
1634 | |
1635 | static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) |
1636 | { |
1637 | return br_multicast_rport_del(rlist: &pmctx->ip4_rlist); |
1638 | } |
1639 | |
1640 | static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) |
1641 | { |
1642 | #if IS_ENABLED(CONFIG_IPV6) |
1643 | return br_multicast_rport_del(rlist: &pmctx->ip6_rlist); |
1644 | #else |
1645 | return false; |
1646 | #endif |
1647 | } |
1648 | |
1649 | static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, |
1650 | struct timer_list *t, |
1651 | struct hlist_node *rlist) |
1652 | { |
1653 | struct net_bridge *br = pmctx->port->br; |
1654 | bool del; |
1655 | |
1656 | spin_lock(lock: &br->multicast_lock); |
1657 | if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
1658 | pmctx->multicast_router == MDB_RTR_TYPE_PERM || |
1659 | timer_pending(timer: t)) |
1660 | goto out; |
1661 | |
1662 | del = br_multicast_rport_del(rlist); |
1663 | br_multicast_rport_del_notify(pmctx, deleted: del); |
1664 | out: |
1665 | spin_unlock(lock: &br->multicast_lock); |
1666 | } |
1667 | |
1668 | static void br_ip4_multicast_router_expired(struct timer_list *t) |
1669 | { |
1670 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1671 | ip4_mc_router_timer); |
1672 | |
1673 | br_multicast_router_expired(pmctx, t, rlist: &pmctx->ip4_rlist); |
1674 | } |
1675 | |
1676 | #if IS_ENABLED(CONFIG_IPV6) |
1677 | static void br_ip6_multicast_router_expired(struct timer_list *t) |
1678 | { |
1679 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1680 | ip6_mc_router_timer); |
1681 | |
1682 | br_multicast_router_expired(pmctx, t, rlist: &pmctx->ip6_rlist); |
1683 | } |
1684 | #endif |
1685 | |
1686 | static void br_mc_router_state_change(struct net_bridge *p, |
1687 | bool is_mc_router) |
1688 | { |
1689 | struct switchdev_attr attr = { |
1690 | .orig_dev = p->dev, |
1691 | .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, |
1692 | .flags = SWITCHDEV_F_DEFER, |
1693 | .u.mrouter = is_mc_router, |
1694 | }; |
1695 | |
1696 | switchdev_port_attr_set(dev: p->dev, attr: &attr, NULL); |
1697 | } |
1698 | |
1699 | static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, |
1700 | struct timer_list *timer) |
1701 | { |
1702 | spin_lock(lock: &brmctx->br->multicast_lock); |
1703 | if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
1704 | brmctx->multicast_router == MDB_RTR_TYPE_PERM || |
1705 | br_ip4_multicast_is_router(brmctx) || |
1706 | br_ip6_multicast_is_router(brmctx)) |
1707 | goto out; |
1708 | |
1709 | br_mc_router_state_change(p: brmctx->br, is_mc_router: false); |
1710 | out: |
1711 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1712 | } |
1713 | |
1714 | static void br_ip4_multicast_local_router_expired(struct timer_list *t) |
1715 | { |
1716 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1717 | ip4_mc_router_timer); |
1718 | |
1719 | br_multicast_local_router_expired(brmctx, timer: t); |
1720 | } |
1721 | |
1722 | #if IS_ENABLED(CONFIG_IPV6) |
1723 | static void br_ip6_multicast_local_router_expired(struct timer_list *t) |
1724 | { |
1725 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1726 | ip6_mc_router_timer); |
1727 | |
1728 | br_multicast_local_router_expired(brmctx, timer: t); |
1729 | } |
1730 | #endif |
1731 | |
1732 | static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, |
1733 | struct bridge_mcast_own_query *query) |
1734 | { |
1735 | spin_lock(lock: &brmctx->br->multicast_lock); |
1736 | if (!netif_running(dev: brmctx->br->dev) || |
1737 | br_multicast_ctx_vlan_global_disabled(brmctx) || |
1738 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
1739 | goto out; |
1740 | |
1741 | br_multicast_start_querier(brmctx, query); |
1742 | |
1743 | out: |
1744 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1745 | } |
1746 | |
1747 | static void br_ip4_multicast_querier_expired(struct timer_list *t) |
1748 | { |
1749 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1750 | ip4_other_query.timer); |
1751 | |
1752 | br_multicast_querier_expired(brmctx, query: &brmctx->ip4_own_query); |
1753 | } |
1754 | |
1755 | #if IS_ENABLED(CONFIG_IPV6) |
1756 | static void br_ip6_multicast_querier_expired(struct timer_list *t) |
1757 | { |
1758 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1759 | ip6_other_query.timer); |
1760 | |
1761 | br_multicast_querier_expired(brmctx, query: &brmctx->ip6_own_query); |
1762 | } |
1763 | #endif |
1764 | |
1765 | static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, |
1766 | struct br_ip *ip, |
1767 | struct sk_buff *skb) |
1768 | { |
1769 | if (ip->proto == htons(ETH_P_IP)) |
1770 | brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; |
1771 | #if IS_ENABLED(CONFIG_IPV6) |
1772 | else |
1773 | brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; |
1774 | #endif |
1775 | } |
1776 | |
1777 | static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, |
1778 | struct net_bridge_mcast_port *pmctx, |
1779 | struct net_bridge_port_group *pg, |
1780 | struct br_ip *ip_dst, |
1781 | struct br_ip *group, |
1782 | bool with_srcs, |
1783 | u8 sflag, |
1784 | bool *need_rexmit) |
1785 | { |
1786 | bool over_lmqt = !!sflag; |
1787 | struct sk_buff *skb; |
1788 | u8 igmp_type; |
1789 | |
1790 | if (!br_multicast_ctx_should_use(brmctx, pmctx) || |
1791 | !br_multicast_ctx_matches_vlan_snooping(brmctx)) |
1792 | return; |
1793 | |
1794 | again_under_lmqt: |
1795 | skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, |
1796 | with_srcs, over_lmqt, sflag, igmp_type: &igmp_type, |
1797 | need_rexmit); |
1798 | if (!skb) |
1799 | return; |
1800 | |
1801 | if (pmctx) { |
1802 | skb->dev = pmctx->port->dev; |
1803 | br_multicast_count(br: brmctx->br, p: pmctx->port, skb, type: igmp_type, |
1804 | dir: BR_MCAST_DIR_TX); |
1805 | NF_HOOK(pf: NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, |
1806 | net: dev_net(dev: pmctx->port->dev), NULL, skb, NULL, out: skb->dev, |
1807 | okfn: br_dev_queue_push_xmit); |
1808 | |
1809 | if (over_lmqt && with_srcs && sflag) { |
1810 | over_lmqt = false; |
1811 | goto again_under_lmqt; |
1812 | } |
1813 | } else { |
1814 | br_multicast_select_own_querier(brmctx, ip: group, skb); |
1815 | br_multicast_count(br: brmctx->br, NULL, skb, type: igmp_type, |
1816 | dir: BR_MCAST_DIR_RX); |
1817 | netif_rx(skb); |
1818 | } |
1819 | } |
1820 | |
1821 | static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, |
1822 | struct bridge_mcast_querier *dest) |
1823 | { |
1824 | unsigned int seq; |
1825 | |
1826 | memset(dest, 0, sizeof(*dest)); |
1827 | do { |
1828 | seq = read_seqcount_begin(&querier->seq); |
1829 | dest->port_ifidx = querier->port_ifidx; |
1830 | memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); |
1831 | } while (read_seqcount_retry(&querier->seq, seq)); |
1832 | } |
1833 | |
1834 | static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, |
1835 | struct bridge_mcast_querier *querier, |
1836 | int ifindex, |
1837 | struct br_ip *saddr) |
1838 | { |
1839 | write_seqcount_begin(&querier->seq); |
1840 | querier->port_ifidx = ifindex; |
1841 | memcpy(&querier->addr, saddr, sizeof(*saddr)); |
1842 | write_seqcount_end(&querier->seq); |
1843 | } |
1844 | |
1845 | static void br_multicast_send_query(struct net_bridge_mcast *brmctx, |
1846 | struct net_bridge_mcast_port *pmctx, |
1847 | struct bridge_mcast_own_query *own_query) |
1848 | { |
1849 | struct bridge_mcast_other_query *other_query = NULL; |
1850 | struct bridge_mcast_querier *querier; |
1851 | struct br_ip br_group; |
1852 | unsigned long time; |
1853 | |
1854 | if (!br_multicast_ctx_should_use(brmctx, pmctx) || |
1855 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED) || |
1856 | !brmctx->multicast_querier) |
1857 | return; |
1858 | |
1859 | memset(&br_group.dst, 0, sizeof(br_group.dst)); |
1860 | |
1861 | if (pmctx ? (own_query == &pmctx->ip4_own_query) : |
1862 | (own_query == &brmctx->ip4_own_query)) { |
1863 | querier = &brmctx->ip4_querier; |
1864 | other_query = &brmctx->ip4_other_query; |
1865 | br_group.proto = htons(ETH_P_IP); |
1866 | #if IS_ENABLED(CONFIG_IPV6) |
1867 | } else { |
1868 | querier = &brmctx->ip6_querier; |
1869 | other_query = &brmctx->ip6_other_query; |
1870 | br_group.proto = htons(ETH_P_IPV6); |
1871 | #endif |
1872 | } |
1873 | |
1874 | if (!other_query || timer_pending(timer: &other_query->timer)) |
1875 | return; |
1876 | |
1877 | /* we're about to select ourselves as querier */ |
1878 | if (!pmctx && querier->port_ifidx) { |
1879 | struct br_ip zeroip = {}; |
1880 | |
1881 | br_multicast_update_querier(brmctx, querier, ifindex: 0, saddr: &zeroip); |
1882 | } |
1883 | |
1884 | __br_multicast_send_query(brmctx, pmctx, NULL, NULL, group: &br_group, with_srcs: false, |
1885 | sflag: 0, NULL); |
1886 | |
1887 | time = jiffies; |
1888 | time += own_query->startup_sent < brmctx->multicast_startup_query_count ? |
1889 | brmctx->multicast_startup_query_interval : |
1890 | brmctx->multicast_query_interval; |
1891 | mod_timer(timer: &own_query->timer, expires: time); |
1892 | } |
1893 | |
1894 | static void |
1895 | br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, |
1896 | struct bridge_mcast_own_query *query) |
1897 | { |
1898 | struct net_bridge *br = pmctx->port->br; |
1899 | struct net_bridge_mcast *brmctx; |
1900 | |
1901 | spin_lock(lock: &br->multicast_lock); |
1902 | if (br_multicast_port_ctx_state_stopped(pmctx)) |
1903 | goto out; |
1904 | |
1905 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
1906 | if (query->startup_sent < brmctx->multicast_startup_query_count) |
1907 | query->startup_sent++; |
1908 | |
1909 | br_multicast_send_query(brmctx, pmctx, own_query: query); |
1910 | |
1911 | out: |
1912 | spin_unlock(lock: &br->multicast_lock); |
1913 | } |
1914 | |
1915 | static void br_ip4_multicast_port_query_expired(struct timer_list *t) |
1916 | { |
1917 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1918 | ip4_own_query.timer); |
1919 | |
1920 | br_multicast_port_query_expired(pmctx, query: &pmctx->ip4_own_query); |
1921 | } |
1922 | |
1923 | #if IS_ENABLED(CONFIG_IPV6) |
1924 | static void br_ip6_multicast_port_query_expired(struct timer_list *t) |
1925 | { |
1926 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1927 | ip6_own_query.timer); |
1928 | |
1929 | br_multicast_port_query_expired(pmctx, query: &pmctx->ip6_own_query); |
1930 | } |
1931 | #endif |
1932 | |
1933 | static void br_multicast_port_group_rexmit(struct timer_list *t) |
1934 | { |
1935 | struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); |
1936 | struct bridge_mcast_other_query *other_query = NULL; |
1937 | struct net_bridge *br = pg->key.port->br; |
1938 | struct net_bridge_mcast_port *pmctx; |
1939 | struct net_bridge_mcast *brmctx; |
1940 | bool need_rexmit = false; |
1941 | |
1942 | spin_lock(lock: &br->multicast_lock); |
1943 | if (!netif_running(dev: br->dev) || hlist_unhashed(h: &pg->mglist) || |
1944 | !br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
1945 | goto out; |
1946 | |
1947 | pmctx = br_multicast_pg_to_port_ctx(pg); |
1948 | if (!pmctx) |
1949 | goto out; |
1950 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
1951 | if (!brmctx->multicast_querier) |
1952 | goto out; |
1953 | |
1954 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
1955 | other_query = &brmctx->ip4_other_query; |
1956 | #if IS_ENABLED(CONFIG_IPV6) |
1957 | else |
1958 | other_query = &brmctx->ip6_other_query; |
1959 | #endif |
1960 | |
1961 | if (!other_query || timer_pending(timer: &other_query->timer)) |
1962 | goto out; |
1963 | |
1964 | if (pg->grp_query_rexmit_cnt) { |
1965 | pg->grp_query_rexmit_cnt--; |
1966 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
1967 | group: &pg->key.addr, with_srcs: false, sflag: 1, NULL); |
1968 | } |
1969 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
1970 | group: &pg->key.addr, with_srcs: true, sflag: 0, need_rexmit: &need_rexmit); |
1971 | |
1972 | if (pg->grp_query_rexmit_cnt || need_rexmit) |
1973 | mod_timer(timer: &pg->rexmit_timer, expires: jiffies + |
1974 | brmctx->multicast_last_member_interval); |
1975 | out: |
1976 | spin_unlock(lock: &br->multicast_lock); |
1977 | } |
1978 | |
1979 | static int br_mc_disabled_update(struct net_device *dev, bool value, |
1980 | struct netlink_ext_ack *extack) |
1981 | { |
1982 | struct switchdev_attr attr = { |
1983 | .orig_dev = dev, |
1984 | .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, |
1985 | .flags = SWITCHDEV_F_DEFER, |
1986 | .u.mc_disabled = !value, |
1987 | }; |
1988 | |
1989 | return switchdev_port_attr_set(dev, attr: &attr, extack); |
1990 | } |
1991 | |
1992 | void br_multicast_port_ctx_init(struct net_bridge_port *port, |
1993 | struct net_bridge_vlan *vlan, |
1994 | struct net_bridge_mcast_port *pmctx) |
1995 | { |
1996 | pmctx->port = port; |
1997 | pmctx->vlan = vlan; |
1998 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
1999 | timer_setup(&pmctx->ip4_mc_router_timer, |
2000 | br_ip4_multicast_router_expired, 0); |
2001 | timer_setup(&pmctx->ip4_own_query.timer, |
2002 | br_ip4_multicast_port_query_expired, 0); |
2003 | #if IS_ENABLED(CONFIG_IPV6) |
2004 | timer_setup(&pmctx->ip6_mc_router_timer, |
2005 | br_ip6_multicast_router_expired, 0); |
2006 | timer_setup(&pmctx->ip6_own_query.timer, |
2007 | br_ip6_multicast_port_query_expired, 0); |
2008 | #endif |
2009 | } |
2010 | |
2011 | void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) |
2012 | { |
2013 | #if IS_ENABLED(CONFIG_IPV6) |
2014 | del_timer_sync(timer: &pmctx->ip6_mc_router_timer); |
2015 | #endif |
2016 | del_timer_sync(timer: &pmctx->ip4_mc_router_timer); |
2017 | } |
2018 | |
2019 | int br_multicast_add_port(struct net_bridge_port *port) |
2020 | { |
2021 | int err; |
2022 | |
2023 | port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; |
2024 | br_multicast_port_ctx_init(port, NULL, pmctx: &port->multicast_ctx); |
2025 | |
2026 | err = br_mc_disabled_update(dev: port->dev, |
2027 | value: br_opt_get(br: port->br, |
2028 | opt: BROPT_MULTICAST_ENABLED), |
2029 | NULL); |
2030 | if (err && err != -EOPNOTSUPP) |
2031 | return err; |
2032 | |
2033 | port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); |
2034 | if (!port->mcast_stats) |
2035 | return -ENOMEM; |
2036 | |
2037 | return 0; |
2038 | } |
2039 | |
2040 | void br_multicast_del_port(struct net_bridge_port *port) |
2041 | { |
2042 | struct net_bridge *br = port->br; |
2043 | struct net_bridge_port_group *pg; |
2044 | HLIST_HEAD(deleted_head); |
2045 | struct hlist_node *n; |
2046 | |
2047 | /* Take care of the remaining groups, only perm ones should be left */ |
2048 | spin_lock_bh(lock: &br->multicast_lock); |
2049 | hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) |
2050 | br_multicast_find_del_pg(br, pg); |
2051 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
2052 | spin_unlock_bh(lock: &br->multicast_lock); |
2053 | br_multicast_gc(head: &deleted_head); |
2054 | br_multicast_port_ctx_deinit(pmctx: &port->multicast_ctx); |
2055 | free_percpu(pdata: port->mcast_stats); |
2056 | } |
2057 | |
2058 | static void br_multicast_enable(struct bridge_mcast_own_query *query) |
2059 | { |
2060 | query->startup_sent = 0; |
2061 | |
2062 | if (try_to_del_timer_sync(timer: &query->timer) >= 0 || |
2063 | del_timer(timer: &query->timer)) |
2064 | mod_timer(timer: &query->timer, expires: jiffies); |
2065 | } |
2066 | |
2067 | static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) |
2068 | { |
2069 | struct net_bridge *br = pmctx->port->br; |
2070 | struct net_bridge_mcast *brmctx; |
2071 | |
2072 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
2073 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) || |
2074 | !netif_running(dev: br->dev)) |
2075 | return; |
2076 | |
2077 | br_multicast_enable(query: &pmctx->ip4_own_query); |
2078 | #if IS_ENABLED(CONFIG_IPV6) |
2079 | br_multicast_enable(query: &pmctx->ip6_own_query); |
2080 | #endif |
2081 | if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { |
2082 | br_ip4_multicast_add_router(brmctx, pmctx); |
2083 | br_ip6_multicast_add_router(brmctx, pmctx); |
2084 | } |
2085 | |
2086 | if (br_multicast_port_ctx_is_vlan(pmctx)) { |
2087 | struct net_bridge_port_group *pg; |
2088 | u32 n = 0; |
2089 | |
2090 | /* The mcast_n_groups counter might be wrong. First, |
2091 | * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries |
2092 | * are flushed, thus mcast_n_groups after the toggle does not |
2093 | * reflect the true values. And second, permanent entries added |
2094 | * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected |
2095 | * either. Thus we have to refresh the counter. |
2096 | */ |
2097 | |
2098 | hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) { |
2099 | if (pg->key.addr.vid == pmctx->vlan->vid) |
2100 | n++; |
2101 | } |
2102 | WRITE_ONCE(pmctx->mdb_n_entries, n); |
2103 | } |
2104 | } |
2105 | |
2106 | void br_multicast_enable_port(struct net_bridge_port *port) |
2107 | { |
2108 | struct net_bridge *br = port->br; |
2109 | |
2110 | spin_lock_bh(lock: &br->multicast_lock); |
2111 | __br_multicast_enable_port_ctx(pmctx: &port->multicast_ctx); |
2112 | spin_unlock_bh(lock: &br->multicast_lock); |
2113 | } |
2114 | |
2115 | static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) |
2116 | { |
2117 | struct net_bridge_port_group *pg; |
2118 | struct hlist_node *n; |
2119 | bool del = false; |
2120 | |
2121 | hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) |
2122 | if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && |
2123 | (!br_multicast_port_ctx_is_vlan(pmctx) || |
2124 | pg->key.addr.vid == pmctx->vlan->vid)) |
2125 | br_multicast_find_del_pg(br: pmctx->port->br, pg); |
2126 | |
2127 | del |= br_ip4_multicast_rport_del(pmctx); |
2128 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
2129 | del_timer(timer: &pmctx->ip4_own_query.timer); |
2130 | del |= br_ip6_multicast_rport_del(pmctx); |
2131 | #if IS_ENABLED(CONFIG_IPV6) |
2132 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
2133 | del_timer(timer: &pmctx->ip6_own_query.timer); |
2134 | #endif |
2135 | br_multicast_rport_del_notify(pmctx, deleted: del); |
2136 | } |
2137 | |
2138 | void br_multicast_disable_port(struct net_bridge_port *port) |
2139 | { |
2140 | spin_lock_bh(lock: &port->br->multicast_lock); |
2141 | __br_multicast_disable_port_ctx(pmctx: &port->multicast_ctx); |
2142 | spin_unlock_bh(lock: &port->br->multicast_lock); |
2143 | } |
2144 | |
2145 | static int __grp_src_delete_marked(struct net_bridge_port_group *pg) |
2146 | { |
2147 | struct net_bridge_group_src *ent; |
2148 | struct hlist_node *tmp; |
2149 | int deleted = 0; |
2150 | |
2151 | hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) |
2152 | if (ent->flags & BR_SGRP_F_DELETE) { |
2153 | br_multicast_del_group_src(src: ent, fastleave: false); |
2154 | deleted++; |
2155 | } |
2156 | |
2157 | return deleted; |
2158 | } |
2159 | |
2160 | static void __grp_src_mod_timer(struct net_bridge_group_src *src, |
2161 | unsigned long expires) |
2162 | { |
2163 | mod_timer(timer: &src->timer, expires); |
2164 | br_multicast_fwd_src_handle(src); |
2165 | } |
2166 | |
2167 | static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, |
2168 | struct net_bridge_mcast_port *pmctx, |
2169 | struct net_bridge_port_group *pg) |
2170 | { |
2171 | struct bridge_mcast_other_query *other_query = NULL; |
2172 | u32 lmqc = brmctx->multicast_last_member_count; |
2173 | unsigned long lmqt, lmi, now = jiffies; |
2174 | struct net_bridge_group_src *ent; |
2175 | |
2176 | if (!netif_running(dev: brmctx->br->dev) || |
2177 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
2178 | return; |
2179 | |
2180 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
2181 | other_query = &brmctx->ip4_other_query; |
2182 | #if IS_ENABLED(CONFIG_IPV6) |
2183 | else |
2184 | other_query = &brmctx->ip6_other_query; |
2185 | #endif |
2186 | |
2187 | lmqt = now + br_multicast_lmqt(brmctx); |
2188 | hlist_for_each_entry(ent, &pg->src_list, node) { |
2189 | if (ent->flags & BR_SGRP_F_SEND) { |
2190 | ent->flags &= ~BR_SGRP_F_SEND; |
2191 | if (ent->timer.expires > lmqt) { |
2192 | if (brmctx->multicast_querier && |
2193 | other_query && |
2194 | !timer_pending(timer: &other_query->timer)) |
2195 | ent->src_query_rexmit_cnt = lmqc; |
2196 | __grp_src_mod_timer(src: ent, expires: lmqt); |
2197 | } |
2198 | } |
2199 | } |
2200 | |
2201 | if (!brmctx->multicast_querier || |
2202 | !other_query || timer_pending(timer: &other_query->timer)) |
2203 | return; |
2204 | |
2205 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
2206 | group: &pg->key.addr, with_srcs: true, sflag: 1, NULL); |
2207 | |
2208 | lmi = now + brmctx->multicast_last_member_interval; |
2209 | if (!timer_pending(timer: &pg->rexmit_timer) || |
2210 | time_after(pg->rexmit_timer.expires, lmi)) |
2211 | mod_timer(timer: &pg->rexmit_timer, expires: lmi); |
2212 | } |
2213 | |
2214 | static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, |
2215 | struct net_bridge_mcast_port *pmctx, |
2216 | struct net_bridge_port_group *pg) |
2217 | { |
2218 | struct bridge_mcast_other_query *other_query = NULL; |
2219 | unsigned long now = jiffies, lmi; |
2220 | |
2221 | if (!netif_running(dev: brmctx->br->dev) || |
2222 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
2223 | return; |
2224 | |
2225 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
2226 | other_query = &brmctx->ip4_other_query; |
2227 | #if IS_ENABLED(CONFIG_IPV6) |
2228 | else |
2229 | other_query = &brmctx->ip6_other_query; |
2230 | #endif |
2231 | |
2232 | if (brmctx->multicast_querier && |
2233 | other_query && !timer_pending(timer: &other_query->timer)) { |
2234 | lmi = now + brmctx->multicast_last_member_interval; |
2235 | pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; |
2236 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
2237 | group: &pg->key.addr, with_srcs: false, sflag: 0, NULL); |
2238 | if (!timer_pending(timer: &pg->rexmit_timer) || |
2239 | time_after(pg->rexmit_timer.expires, lmi)) |
2240 | mod_timer(timer: &pg->rexmit_timer, expires: lmi); |
2241 | } |
2242 | |
2243 | if (pg->filter_mode == MCAST_EXCLUDE && |
2244 | (!timer_pending(timer: &pg->timer) || |
2245 | time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) |
2246 | mod_timer(timer: &pg->timer, expires: now + br_multicast_lmqt(brmctx)); |
2247 | } |
2248 | |
2249 | /* State Msg type New state Actions |
2250 | * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI |
2251 | * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI |
2252 | * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI |
2253 | */ |
2254 | static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, |
2255 | struct net_bridge_port_group *pg, void *h_addr, |
2256 | void *srcs, u32 nsrcs, size_t addr_size, |
2257 | int grec_type) |
2258 | { |
2259 | struct net_bridge_group_src *ent; |
2260 | unsigned long now = jiffies; |
2261 | bool changed = false; |
2262 | struct br_ip src_ip; |
2263 | u32 src_idx; |
2264 | |
2265 | memset(&src_ip, 0, sizeof(src_ip)); |
2266 | src_ip.proto = pg->key.addr.proto; |
2267 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2268 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2269 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2270 | if (!ent) { |
2271 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2272 | if (ent) |
2273 | changed = true; |
2274 | } |
2275 | |
2276 | if (ent) |
2277 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2278 | } |
2279 | |
2280 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2281 | grec_type)) |
2282 | changed = true; |
2283 | |
2284 | return changed; |
2285 | } |
2286 | |
2287 | /* State Msg type New state Actions |
2288 | * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 |
2289 | * Delete (A-B) |
2290 | * Group Timer=GMI |
2291 | */ |
2292 | static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, |
2293 | struct net_bridge_port_group *pg, void *h_addr, |
2294 | void *srcs, u32 nsrcs, size_t addr_size, |
2295 | int grec_type) |
2296 | { |
2297 | struct net_bridge_group_src *ent; |
2298 | struct br_ip src_ip; |
2299 | u32 src_idx; |
2300 | |
2301 | hlist_for_each_entry(ent, &pg->src_list, node) |
2302 | ent->flags |= BR_SGRP_F_DELETE; |
2303 | |
2304 | memset(&src_ip, 0, sizeof(src_ip)); |
2305 | src_ip.proto = pg->key.addr.proto; |
2306 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2307 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2308 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2309 | if (ent) |
2310 | ent->flags &= ~BR_SGRP_F_DELETE; |
2311 | else |
2312 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2313 | if (ent) |
2314 | br_multicast_fwd_src_handle(src: ent); |
2315 | } |
2316 | |
2317 | br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2318 | grec_type); |
2319 | |
2320 | __grp_src_delete_marked(pg); |
2321 | } |
2322 | |
2323 | /* State Msg type New state Actions |
2324 | * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI |
2325 | * Delete (X-A) |
2326 | * Delete (Y-A) |
2327 | * Group Timer=GMI |
2328 | */ |
2329 | static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, |
2330 | struct net_bridge_port_group *pg, void *h_addr, |
2331 | void *srcs, u32 nsrcs, size_t addr_size, |
2332 | int grec_type) |
2333 | { |
2334 | struct net_bridge_group_src *ent; |
2335 | unsigned long now = jiffies; |
2336 | bool changed = false; |
2337 | struct br_ip src_ip; |
2338 | u32 src_idx; |
2339 | |
2340 | hlist_for_each_entry(ent, &pg->src_list, node) |
2341 | ent->flags |= BR_SGRP_F_DELETE; |
2342 | |
2343 | memset(&src_ip, 0, sizeof(src_ip)); |
2344 | src_ip.proto = pg->key.addr.proto; |
2345 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2346 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2347 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2348 | if (ent) { |
2349 | ent->flags &= ~BR_SGRP_F_DELETE; |
2350 | } else { |
2351 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2352 | if (ent) { |
2353 | __grp_src_mod_timer(src: ent, |
2354 | expires: now + br_multicast_gmi(brmctx)); |
2355 | changed = true; |
2356 | } |
2357 | } |
2358 | } |
2359 | |
2360 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2361 | grec_type)) |
2362 | changed = true; |
2363 | |
2364 | if (__grp_src_delete_marked(pg)) |
2365 | changed = true; |
2366 | |
2367 | return changed; |
2368 | } |
2369 | |
2370 | static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, |
2371 | struct net_bridge_port_group *pg, void *h_addr, |
2372 | void *srcs, u32 nsrcs, size_t addr_size, |
2373 | int grec_type) |
2374 | { |
2375 | bool changed = false; |
2376 | |
2377 | switch (pg->filter_mode) { |
2378 | case MCAST_INCLUDE: |
2379 | __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2380 | grec_type); |
2381 | br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); |
2382 | changed = true; |
2383 | break; |
2384 | case MCAST_EXCLUDE: |
2385 | changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, |
2386 | addr_size, grec_type); |
2387 | break; |
2388 | } |
2389 | |
2390 | pg->filter_mode = MCAST_EXCLUDE; |
2391 | mod_timer(timer: &pg->timer, expires: jiffies + br_multicast_gmi(brmctx)); |
2392 | |
2393 | return changed; |
2394 | } |
2395 | |
2396 | /* State Msg type New state Actions |
2397 | * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI |
2398 | * Send Q(G,A-B) |
2399 | */ |
2400 | static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, |
2401 | struct net_bridge_mcast_port *pmctx, |
2402 | struct net_bridge_port_group *pg, void *h_addr, |
2403 | void *srcs, u32 nsrcs, size_t addr_size, |
2404 | int grec_type) |
2405 | { |
2406 | u32 src_idx, to_send = pg->src_ents; |
2407 | struct net_bridge_group_src *ent; |
2408 | unsigned long now = jiffies; |
2409 | bool changed = false; |
2410 | struct br_ip src_ip; |
2411 | |
2412 | hlist_for_each_entry(ent, &pg->src_list, node) |
2413 | ent->flags |= BR_SGRP_F_SEND; |
2414 | |
2415 | memset(&src_ip, 0, sizeof(src_ip)); |
2416 | src_ip.proto = pg->key.addr.proto; |
2417 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2418 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2419 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2420 | if (ent) { |
2421 | ent->flags &= ~BR_SGRP_F_SEND; |
2422 | to_send--; |
2423 | } else { |
2424 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2425 | if (ent) |
2426 | changed = true; |
2427 | } |
2428 | if (ent) |
2429 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2430 | } |
2431 | |
2432 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2433 | grec_type)) |
2434 | changed = true; |
2435 | |
2436 | if (to_send) |
2437 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2438 | |
2439 | return changed; |
2440 | } |
2441 | |
2442 | /* State Msg type New state Actions |
2443 | * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI |
2444 | * Send Q(G,X-A) |
2445 | * Send Q(G) |
2446 | */ |
2447 | static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, |
2448 | struct net_bridge_mcast_port *pmctx, |
2449 | struct net_bridge_port_group *pg, void *h_addr, |
2450 | void *srcs, u32 nsrcs, size_t addr_size, |
2451 | int grec_type) |
2452 | { |
2453 | u32 src_idx, to_send = pg->src_ents; |
2454 | struct net_bridge_group_src *ent; |
2455 | unsigned long now = jiffies; |
2456 | bool changed = false; |
2457 | struct br_ip src_ip; |
2458 | |
2459 | hlist_for_each_entry(ent, &pg->src_list, node) |
2460 | if (timer_pending(timer: &ent->timer)) |
2461 | ent->flags |= BR_SGRP_F_SEND; |
2462 | |
2463 | memset(&src_ip, 0, sizeof(src_ip)); |
2464 | src_ip.proto = pg->key.addr.proto; |
2465 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2466 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2467 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2468 | if (ent) { |
2469 | if (timer_pending(timer: &ent->timer)) { |
2470 | ent->flags &= ~BR_SGRP_F_SEND; |
2471 | to_send--; |
2472 | } |
2473 | } else { |
2474 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2475 | if (ent) |
2476 | changed = true; |
2477 | } |
2478 | if (ent) |
2479 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2480 | } |
2481 | |
2482 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2483 | grec_type)) |
2484 | changed = true; |
2485 | |
2486 | if (to_send) |
2487 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2488 | |
2489 | __grp_send_query_and_rexmit(brmctx, pmctx, pg); |
2490 | |
2491 | return changed; |
2492 | } |
2493 | |
2494 | static bool br_multicast_toin(struct net_bridge_mcast *brmctx, |
2495 | struct net_bridge_mcast_port *pmctx, |
2496 | struct net_bridge_port_group *pg, void *h_addr, |
2497 | void *srcs, u32 nsrcs, size_t addr_size, |
2498 | int grec_type) |
2499 | { |
2500 | bool changed = false; |
2501 | |
2502 | switch (pg->filter_mode) { |
2503 | case MCAST_INCLUDE: |
2504 | changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, |
2505 | nsrcs, addr_size, grec_type); |
2506 | break; |
2507 | case MCAST_EXCLUDE: |
2508 | changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, |
2509 | nsrcs, addr_size, grec_type); |
2510 | break; |
2511 | } |
2512 | |
2513 | if (br_multicast_eht_should_del_pg(pg)) { |
2514 | pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
2515 | br_multicast_find_del_pg(br: pg->key.port->br, pg); |
2516 | /* a notification has already been sent and we shouldn't |
2517 | * access pg after the delete so we have to return false |
2518 | */ |
2519 | changed = false; |
2520 | } |
2521 | |
2522 | return changed; |
2523 | } |
2524 | |
2525 | /* State Msg type New state Actions |
2526 | * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 |
2527 | * Delete (A-B) |
2528 | * Send Q(G,A*B) |
2529 | * Group Timer=GMI |
2530 | */ |
2531 | static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, |
2532 | struct net_bridge_mcast_port *pmctx, |
2533 | struct net_bridge_port_group *pg, void *h_addr, |
2534 | void *srcs, u32 nsrcs, size_t addr_size, |
2535 | int grec_type) |
2536 | { |
2537 | struct net_bridge_group_src *ent; |
2538 | u32 src_idx, to_send = 0; |
2539 | struct br_ip src_ip; |
2540 | |
2541 | hlist_for_each_entry(ent, &pg->src_list, node) |
2542 | ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; |
2543 | |
2544 | memset(&src_ip, 0, sizeof(src_ip)); |
2545 | src_ip.proto = pg->key.addr.proto; |
2546 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2547 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2548 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2549 | if (ent) { |
2550 | ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | |
2551 | BR_SGRP_F_SEND; |
2552 | to_send++; |
2553 | } else { |
2554 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2555 | } |
2556 | if (ent) |
2557 | br_multicast_fwd_src_handle(src: ent); |
2558 | } |
2559 | |
2560 | br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2561 | grec_type); |
2562 | |
2563 | __grp_src_delete_marked(pg); |
2564 | if (to_send) |
2565 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2566 | } |
2567 | |
2568 | /* State Msg type New state Actions |
2569 | * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer |
2570 | * Delete (X-A) |
2571 | * Delete (Y-A) |
2572 | * Send Q(G,A-Y) |
2573 | * Group Timer=GMI |
2574 | */ |
2575 | static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, |
2576 | struct net_bridge_mcast_port *pmctx, |
2577 | struct net_bridge_port_group *pg, void *h_addr, |
2578 | void *srcs, u32 nsrcs, size_t addr_size, |
2579 | int grec_type) |
2580 | { |
2581 | struct net_bridge_group_src *ent; |
2582 | u32 src_idx, to_send = 0; |
2583 | bool changed = false; |
2584 | struct br_ip src_ip; |
2585 | |
2586 | hlist_for_each_entry(ent, &pg->src_list, node) |
2587 | ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; |
2588 | |
2589 | memset(&src_ip, 0, sizeof(src_ip)); |
2590 | src_ip.proto = pg->key.addr.proto; |
2591 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2592 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2593 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2594 | if (ent) { |
2595 | ent->flags &= ~BR_SGRP_F_DELETE; |
2596 | } else { |
2597 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2598 | if (ent) { |
2599 | __grp_src_mod_timer(src: ent, expires: pg->timer.expires); |
2600 | changed = true; |
2601 | } |
2602 | } |
2603 | if (ent && timer_pending(timer: &ent->timer)) { |
2604 | ent->flags |= BR_SGRP_F_SEND; |
2605 | to_send++; |
2606 | } |
2607 | } |
2608 | |
2609 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2610 | grec_type)) |
2611 | changed = true; |
2612 | |
2613 | if (__grp_src_delete_marked(pg)) |
2614 | changed = true; |
2615 | if (to_send) |
2616 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2617 | |
2618 | return changed; |
2619 | } |
2620 | |
2621 | static bool br_multicast_toex(struct net_bridge_mcast *brmctx, |
2622 | struct net_bridge_mcast_port *pmctx, |
2623 | struct net_bridge_port_group *pg, void *h_addr, |
2624 | void *srcs, u32 nsrcs, size_t addr_size, |
2625 | int grec_type) |
2626 | { |
2627 | bool changed = false; |
2628 | |
2629 | switch (pg->filter_mode) { |
2630 | case MCAST_INCLUDE: |
2631 | __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, |
2632 | addr_size, grec_type); |
2633 | br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); |
2634 | changed = true; |
2635 | break; |
2636 | case MCAST_EXCLUDE: |
2637 | changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, |
2638 | nsrcs, addr_size, grec_type); |
2639 | break; |
2640 | } |
2641 | |
2642 | pg->filter_mode = MCAST_EXCLUDE; |
2643 | mod_timer(timer: &pg->timer, expires: jiffies + br_multicast_gmi(brmctx)); |
2644 | |
2645 | return changed; |
2646 | } |
2647 | |
2648 | /* State Msg type New state Actions |
2649 | * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) |
2650 | */ |
2651 | static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, |
2652 | struct net_bridge_mcast_port *pmctx, |
2653 | struct net_bridge_port_group *pg, void *h_addr, |
2654 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2655 | { |
2656 | struct net_bridge_group_src *ent; |
2657 | u32 src_idx, to_send = 0; |
2658 | bool changed = false; |
2659 | struct br_ip src_ip; |
2660 | |
2661 | hlist_for_each_entry(ent, &pg->src_list, node) |
2662 | ent->flags &= ~BR_SGRP_F_SEND; |
2663 | |
2664 | memset(&src_ip, 0, sizeof(src_ip)); |
2665 | src_ip.proto = pg->key.addr.proto; |
2666 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2667 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2668 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2669 | if (ent) { |
2670 | ent->flags |= BR_SGRP_F_SEND; |
2671 | to_send++; |
2672 | } |
2673 | } |
2674 | |
2675 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2676 | grec_type)) |
2677 | changed = true; |
2678 | |
2679 | if (to_send) |
2680 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2681 | |
2682 | return changed; |
2683 | } |
2684 | |
2685 | /* State Msg type New state Actions |
2686 | * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer |
2687 | * Send Q(G,A-Y) |
2688 | */ |
2689 | static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, |
2690 | struct net_bridge_mcast_port *pmctx, |
2691 | struct net_bridge_port_group *pg, void *h_addr, |
2692 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2693 | { |
2694 | struct net_bridge_group_src *ent; |
2695 | u32 src_idx, to_send = 0; |
2696 | bool changed = false; |
2697 | struct br_ip src_ip; |
2698 | |
2699 | hlist_for_each_entry(ent, &pg->src_list, node) |
2700 | ent->flags &= ~BR_SGRP_F_SEND; |
2701 | |
2702 | memset(&src_ip, 0, sizeof(src_ip)); |
2703 | src_ip.proto = pg->key.addr.proto; |
2704 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2705 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2706 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2707 | if (!ent) { |
2708 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2709 | if (ent) { |
2710 | __grp_src_mod_timer(src: ent, expires: pg->timer.expires); |
2711 | changed = true; |
2712 | } |
2713 | } |
2714 | if (ent && timer_pending(timer: &ent->timer)) { |
2715 | ent->flags |= BR_SGRP_F_SEND; |
2716 | to_send++; |
2717 | } |
2718 | } |
2719 | |
2720 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2721 | grec_type)) |
2722 | changed = true; |
2723 | |
2724 | if (to_send) |
2725 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2726 | |
2727 | return changed; |
2728 | } |
2729 | |
2730 | static bool br_multicast_block(struct net_bridge_mcast *brmctx, |
2731 | struct net_bridge_mcast_port *pmctx, |
2732 | struct net_bridge_port_group *pg, void *h_addr, |
2733 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2734 | { |
2735 | bool changed = false; |
2736 | |
2737 | switch (pg->filter_mode) { |
2738 | case MCAST_INCLUDE: |
2739 | changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, |
2740 | nsrcs, addr_size, grec_type); |
2741 | break; |
2742 | case MCAST_EXCLUDE: |
2743 | changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, |
2744 | nsrcs, addr_size, grec_type); |
2745 | break; |
2746 | } |
2747 | |
2748 | if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(h: &pg->src_list)) || |
2749 | br_multicast_eht_should_del_pg(pg)) { |
2750 | if (br_multicast_eht_should_del_pg(pg)) |
2751 | pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
2752 | br_multicast_find_del_pg(br: pg->key.port->br, pg); |
2753 | /* a notification has already been sent and we shouldn't |
2754 | * access pg after the delete so we have to return false |
2755 | */ |
2756 | changed = false; |
2757 | } |
2758 | |
2759 | return changed; |
2760 | } |
2761 | |
2762 | static struct net_bridge_port_group * |
2763 | br_multicast_find_port(struct net_bridge_mdb_entry *mp, |
2764 | struct net_bridge_port *p, |
2765 | const unsigned char *src) |
2766 | { |
2767 | struct net_bridge *br __maybe_unused = mp->br; |
2768 | struct net_bridge_port_group *pg; |
2769 | |
2770 | for (pg = mlock_dereference(mp->ports, br); |
2771 | pg; |
2772 | pg = mlock_dereference(pg->next, br)) |
2773 | if (br_port_group_equal(p: pg, port: p, src)) |
2774 | return pg; |
2775 | |
2776 | return NULL; |
2777 | } |
2778 | |
2779 | static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, |
2780 | struct net_bridge_mcast_port *pmctx, |
2781 | struct sk_buff *skb, |
2782 | u16 vid) |
2783 | { |
2784 | bool igmpv2 = brmctx->multicast_igmp_version == 2; |
2785 | struct net_bridge_mdb_entry *mdst; |
2786 | struct net_bridge_port_group *pg; |
2787 | const unsigned char *src; |
2788 | struct igmpv3_report *ih; |
2789 | struct igmpv3_grec *grec; |
2790 | int i, len, num, type; |
2791 | __be32 group, *h_addr; |
2792 | bool changed = false; |
2793 | int err = 0; |
2794 | u16 nsrcs; |
2795 | |
2796 | ih = igmpv3_report_hdr(skb); |
2797 | num = ntohs(ih->ngrec); |
2798 | len = skb_transport_offset(skb) + sizeof(*ih); |
2799 | |
2800 | for (i = 0; i < num; i++) { |
2801 | len += sizeof(*grec); |
2802 | if (!ip_mc_may_pull(skb, len)) |
2803 | return -EINVAL; |
2804 | |
2805 | grec = (void *)(skb->data + len - sizeof(*grec)); |
2806 | group = grec->grec_mca; |
2807 | type = grec->grec_type; |
2808 | nsrcs = ntohs(grec->grec_nsrcs); |
2809 | |
2810 | len += nsrcs * 4; |
2811 | if (!ip_mc_may_pull(skb, len)) |
2812 | return -EINVAL; |
2813 | |
2814 | switch (type) { |
2815 | case IGMPV3_MODE_IS_INCLUDE: |
2816 | case IGMPV3_MODE_IS_EXCLUDE: |
2817 | case IGMPV3_CHANGE_TO_INCLUDE: |
2818 | case IGMPV3_CHANGE_TO_EXCLUDE: |
2819 | case IGMPV3_ALLOW_NEW_SOURCES: |
2820 | case IGMPV3_BLOCK_OLD_SOURCES: |
2821 | break; |
2822 | |
2823 | default: |
2824 | continue; |
2825 | } |
2826 | |
2827 | src = eth_hdr(skb)->h_source; |
2828 | if (nsrcs == 0 && |
2829 | (type == IGMPV3_CHANGE_TO_INCLUDE || |
2830 | type == IGMPV3_MODE_IS_INCLUDE)) { |
2831 | if (!pmctx || igmpv2) { |
2832 | br_ip4_multicast_leave_group(brmctx, pmctx, |
2833 | group, vid, src); |
2834 | continue; |
2835 | } |
2836 | } else { |
2837 | err = br_ip4_multicast_add_group(brmctx, pmctx, group, |
2838 | vid, src, igmpv2); |
2839 | if (err) |
2840 | break; |
2841 | } |
2842 | |
2843 | if (!pmctx || igmpv2) |
2844 | continue; |
2845 | |
2846 | spin_lock(lock: &brmctx->br->multicast_lock); |
2847 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
2848 | goto unlock_continue; |
2849 | |
2850 | mdst = br_mdb_ip4_get(br: brmctx->br, dst: group, vid); |
2851 | if (!mdst) |
2852 | goto unlock_continue; |
2853 | pg = br_multicast_find_port(mp: mdst, p: pmctx->port, src); |
2854 | if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) |
2855 | goto unlock_continue; |
2856 | /* reload grec and host addr */ |
2857 | grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); |
2858 | h_addr = &ip_hdr(skb)->saddr; |
2859 | switch (type) { |
2860 | case IGMPV3_ALLOW_NEW_SOURCES: |
2861 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
2862 | srcs: grec->grec_src, |
2863 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2864 | break; |
2865 | case IGMPV3_MODE_IS_INCLUDE: |
2866 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
2867 | srcs: grec->grec_src, |
2868 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2869 | break; |
2870 | case IGMPV3_MODE_IS_EXCLUDE: |
2871 | changed = br_multicast_isexc(brmctx, pg, h_addr, |
2872 | srcs: grec->grec_src, |
2873 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2874 | break; |
2875 | case IGMPV3_CHANGE_TO_INCLUDE: |
2876 | changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, |
2877 | srcs: grec->grec_src, |
2878 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2879 | break; |
2880 | case IGMPV3_CHANGE_TO_EXCLUDE: |
2881 | changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, |
2882 | srcs: grec->grec_src, |
2883 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2884 | break; |
2885 | case IGMPV3_BLOCK_OLD_SOURCES: |
2886 | changed = br_multicast_block(brmctx, pmctx, pg, h_addr, |
2887 | srcs: grec->grec_src, |
2888 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2889 | break; |
2890 | } |
2891 | if (changed) |
2892 | br_mdb_notify(dev: brmctx->br->dev, mp: mdst, pg, RTM_NEWMDB); |
2893 | unlock_continue: |
2894 | spin_unlock(lock: &brmctx->br->multicast_lock); |
2895 | } |
2896 | |
2897 | return err; |
2898 | } |
2899 | |
2900 | #if IS_ENABLED(CONFIG_IPV6) |
2901 | static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, |
2902 | struct net_bridge_mcast_port *pmctx, |
2903 | struct sk_buff *skb, |
2904 | u16 vid) |
2905 | { |
2906 | bool mldv1 = brmctx->multicast_mld_version == 1; |
2907 | struct net_bridge_mdb_entry *mdst; |
2908 | struct net_bridge_port_group *pg; |
2909 | unsigned int nsrcs_offset; |
2910 | struct mld2_report *mld2r; |
2911 | const unsigned char *src; |
2912 | struct in6_addr *h_addr; |
2913 | struct mld2_grec *grec; |
2914 | unsigned int grec_len; |
2915 | bool changed = false; |
2916 | int i, len, num; |
2917 | int err = 0; |
2918 | |
2919 | if (!ipv6_mc_may_pull(skb, len: sizeof(*mld2r))) |
2920 | return -EINVAL; |
2921 | |
2922 | mld2r = (struct mld2_report *)icmp6_hdr(skb); |
2923 | num = ntohs(mld2r->mld2r_ngrec); |
2924 | len = skb_transport_offset(skb) + sizeof(*mld2r); |
2925 | |
2926 | for (i = 0; i < num; i++) { |
2927 | __be16 *_nsrcs, __nsrcs; |
2928 | u16 nsrcs; |
2929 | |
2930 | nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); |
2931 | |
2932 | if (skb_transport_offset(skb) + ipv6_transport_len(skb) < |
2933 | nsrcs_offset + sizeof(__nsrcs)) |
2934 | return -EINVAL; |
2935 | |
2936 | _nsrcs = skb_header_pointer(skb, offset: nsrcs_offset, |
2937 | len: sizeof(__nsrcs), buffer: &__nsrcs); |
2938 | if (!_nsrcs) |
2939 | return -EINVAL; |
2940 | |
2941 | nsrcs = ntohs(*_nsrcs); |
2942 | grec_len = struct_size(grec, grec_src, nsrcs); |
2943 | |
2944 | if (!ipv6_mc_may_pull(skb, len: len + grec_len)) |
2945 | return -EINVAL; |
2946 | |
2947 | grec = (struct mld2_grec *)(skb->data + len); |
2948 | len += grec_len; |
2949 | |
2950 | switch (grec->grec_type) { |
2951 | case MLD2_MODE_IS_INCLUDE: |
2952 | case MLD2_MODE_IS_EXCLUDE: |
2953 | case MLD2_CHANGE_TO_INCLUDE: |
2954 | case MLD2_CHANGE_TO_EXCLUDE: |
2955 | case MLD2_ALLOW_NEW_SOURCES: |
2956 | case MLD2_BLOCK_OLD_SOURCES: |
2957 | break; |
2958 | |
2959 | default: |
2960 | continue; |
2961 | } |
2962 | |
2963 | src = eth_hdr(skb)->h_source; |
2964 | if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || |
2965 | grec->grec_type == MLD2_MODE_IS_INCLUDE) && |
2966 | nsrcs == 0) { |
2967 | if (!pmctx || mldv1) { |
2968 | br_ip6_multicast_leave_group(brmctx, pmctx, |
2969 | group: &grec->grec_mca, |
2970 | vid, src); |
2971 | continue; |
2972 | } |
2973 | } else { |
2974 | err = br_ip6_multicast_add_group(brmctx, pmctx, |
2975 | group: &grec->grec_mca, vid, |
2976 | src, mldv1); |
2977 | if (err) |
2978 | break; |
2979 | } |
2980 | |
2981 | if (!pmctx || mldv1) |
2982 | continue; |
2983 | |
2984 | spin_lock(lock: &brmctx->br->multicast_lock); |
2985 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
2986 | goto unlock_continue; |
2987 | |
2988 | mdst = br_mdb_ip6_get(br: brmctx->br, dst: &grec->grec_mca, vid); |
2989 | if (!mdst) |
2990 | goto unlock_continue; |
2991 | pg = br_multicast_find_port(mp: mdst, p: pmctx->port, src); |
2992 | if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) |
2993 | goto unlock_continue; |
2994 | h_addr = &ipv6_hdr(skb)->saddr; |
2995 | switch (grec->grec_type) { |
2996 | case MLD2_ALLOW_NEW_SOURCES: |
2997 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
2998 | srcs: grec->grec_src, nsrcs, |
2999 | addr_size: sizeof(struct in6_addr), |
3000 | grec_type: grec->grec_type); |
3001 | break; |
3002 | case MLD2_MODE_IS_INCLUDE: |
3003 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
3004 | srcs: grec->grec_src, nsrcs, |
3005 | addr_size: sizeof(struct in6_addr), |
3006 | grec_type: grec->grec_type); |
3007 | break; |
3008 | case MLD2_MODE_IS_EXCLUDE: |
3009 | changed = br_multicast_isexc(brmctx, pg, h_addr, |
3010 | srcs: grec->grec_src, nsrcs, |
3011 | addr_size: sizeof(struct in6_addr), |
3012 | grec_type: grec->grec_type); |
3013 | break; |
3014 | case MLD2_CHANGE_TO_INCLUDE: |
3015 | changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, |
3016 | srcs: grec->grec_src, nsrcs, |
3017 | addr_size: sizeof(struct in6_addr), |
3018 | grec_type: grec->grec_type); |
3019 | break; |
3020 | case MLD2_CHANGE_TO_EXCLUDE: |
3021 | changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, |
3022 | srcs: grec->grec_src, nsrcs, |
3023 | addr_size: sizeof(struct in6_addr), |
3024 | grec_type: grec->grec_type); |
3025 | break; |
3026 | case MLD2_BLOCK_OLD_SOURCES: |
3027 | changed = br_multicast_block(brmctx, pmctx, pg, h_addr, |
3028 | srcs: grec->grec_src, nsrcs, |
3029 | addr_size: sizeof(struct in6_addr), |
3030 | grec_type: grec->grec_type); |
3031 | break; |
3032 | } |
3033 | if (changed) |
3034 | br_mdb_notify(dev: brmctx->br->dev, mp: mdst, pg, RTM_NEWMDB); |
3035 | unlock_continue: |
3036 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3037 | } |
3038 | |
3039 | return err; |
3040 | } |
3041 | #endif |
3042 | |
3043 | static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, |
3044 | struct net_bridge_mcast_port *pmctx, |
3045 | struct br_ip *saddr) |
3046 | { |
3047 | int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; |
3048 | struct timer_list *own_timer, *other_timer; |
3049 | struct bridge_mcast_querier *querier; |
3050 | |
3051 | switch (saddr->proto) { |
3052 | case htons(ETH_P_IP): |
3053 | querier = &brmctx->ip4_querier; |
3054 | own_timer = &brmctx->ip4_own_query.timer; |
3055 | other_timer = &brmctx->ip4_other_query.timer; |
3056 | if (!querier->addr.src.ip4 || |
3057 | ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) |
3058 | goto update; |
3059 | break; |
3060 | #if IS_ENABLED(CONFIG_IPV6) |
3061 | case htons(ETH_P_IPV6): |
3062 | querier = &brmctx->ip6_querier; |
3063 | own_timer = &brmctx->ip6_own_query.timer; |
3064 | other_timer = &brmctx->ip6_other_query.timer; |
3065 | if (ipv6_addr_cmp(a1: &saddr->src.ip6, a2: &querier->addr.src.ip6) <= 0) |
3066 | goto update; |
3067 | break; |
3068 | #endif |
3069 | default: |
3070 | return false; |
3071 | } |
3072 | |
3073 | if (!timer_pending(timer: own_timer) && !timer_pending(timer: other_timer)) |
3074 | goto update; |
3075 | |
3076 | return false; |
3077 | |
3078 | update: |
3079 | br_multicast_update_querier(brmctx, querier, ifindex: port_ifidx, saddr); |
3080 | |
3081 | return true; |
3082 | } |
3083 | |
3084 | static struct net_bridge_port * |
3085 | __br_multicast_get_querier_port(struct net_bridge *br, |
3086 | const struct bridge_mcast_querier *querier) |
3087 | { |
3088 | int port_ifidx = READ_ONCE(querier->port_ifidx); |
3089 | struct net_bridge_port *p; |
3090 | struct net_device *dev; |
3091 | |
3092 | if (port_ifidx == 0) |
3093 | return NULL; |
3094 | |
3095 | dev = dev_get_by_index_rcu(net: dev_net(dev: br->dev), ifindex: port_ifidx); |
3096 | if (!dev) |
3097 | return NULL; |
3098 | p = br_port_get_rtnl_rcu(dev); |
3099 | if (!p || p->br != br) |
3100 | return NULL; |
3101 | |
3102 | return p; |
3103 | } |
3104 | |
3105 | size_t br_multicast_querier_state_size(void) |
3106 | { |
3107 | return nla_total_size(payload: 0) + /* nest attribute */ |
3108 | nla_total_size(payload: sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ |
3109 | nla_total_size(payload: sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ |
3110 | nla_total_size_64bit(payload: sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ |
3111 | #if IS_ENABLED(CONFIG_IPV6) |
3112 | nla_total_size(payload: sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ |
3113 | nla_total_size(payload: sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ |
3114 | nla_total_size_64bit(payload: sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ |
3115 | #endif |
3116 | 0; |
3117 | } |
3118 | |
3119 | /* protected by rtnl or rcu */ |
3120 | int br_multicast_dump_querier_state(struct sk_buff *skb, |
3121 | const struct net_bridge_mcast *brmctx, |
3122 | int nest_attr) |
3123 | { |
3124 | struct bridge_mcast_querier querier = {}; |
3125 | struct net_bridge_port *p; |
3126 | struct nlattr *nest; |
3127 | |
3128 | if (!br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED) || |
3129 | br_multicast_ctx_vlan_global_disabled(brmctx)) |
3130 | return 0; |
3131 | |
3132 | nest = nla_nest_start(skb, attrtype: nest_attr); |
3133 | if (!nest) |
3134 | return -EMSGSIZE; |
3135 | |
3136 | rcu_read_lock(); |
3137 | if (!brmctx->multicast_querier && |
3138 | !timer_pending(timer: &brmctx->ip4_other_query.timer)) |
3139 | goto out_v6; |
3140 | |
3141 | br_multicast_read_querier(querier: &brmctx->ip4_querier, dest: &querier); |
3142 | if (nla_put_in_addr(skb, attrtype: BRIDGE_QUERIER_IP_ADDRESS, |
3143 | addr: querier.addr.src.ip4)) { |
3144 | rcu_read_unlock(); |
3145 | goto out_err; |
3146 | } |
3147 | |
3148 | p = __br_multicast_get_querier_port(br: brmctx->br, querier: &querier); |
3149 | if (timer_pending(timer: &brmctx->ip4_other_query.timer) && |
3150 | (nla_put_u64_64bit(skb, attrtype: BRIDGE_QUERIER_IP_OTHER_TIMER, |
3151 | value: br_timer_value(timer: &brmctx->ip4_other_query.timer), |
3152 | padattr: BRIDGE_QUERIER_PAD) || |
3153 | (p && nla_put_u32(skb, attrtype: BRIDGE_QUERIER_IP_PORT, value: p->dev->ifindex)))) { |
3154 | rcu_read_unlock(); |
3155 | goto out_err; |
3156 | } |
3157 | |
3158 | out_v6: |
3159 | #if IS_ENABLED(CONFIG_IPV6) |
3160 | if (!brmctx->multicast_querier && |
3161 | !timer_pending(timer: &brmctx->ip6_other_query.timer)) |
3162 | goto out; |
3163 | |
3164 | br_multicast_read_querier(querier: &brmctx->ip6_querier, dest: &querier); |
3165 | if (nla_put_in6_addr(skb, attrtype: BRIDGE_QUERIER_IPV6_ADDRESS, |
3166 | addr: &querier.addr.src.ip6)) { |
3167 | rcu_read_unlock(); |
3168 | goto out_err; |
3169 | } |
3170 | |
3171 | p = __br_multicast_get_querier_port(br: brmctx->br, querier: &querier); |
3172 | if (timer_pending(timer: &brmctx->ip6_other_query.timer) && |
3173 | (nla_put_u64_64bit(skb, attrtype: BRIDGE_QUERIER_IPV6_OTHER_TIMER, |
3174 | value: br_timer_value(timer: &brmctx->ip6_other_query.timer), |
3175 | padattr: BRIDGE_QUERIER_PAD) || |
3176 | (p && nla_put_u32(skb, attrtype: BRIDGE_QUERIER_IPV6_PORT, |
3177 | value: p->dev->ifindex)))) { |
3178 | rcu_read_unlock(); |
3179 | goto out_err; |
3180 | } |
3181 | out: |
3182 | #endif |
3183 | rcu_read_unlock(); |
3184 | nla_nest_end(skb, start: nest); |
3185 | if (!nla_len(nla: nest)) |
3186 | nla_nest_cancel(skb, start: nest); |
3187 | |
3188 | return 0; |
3189 | |
3190 | out_err: |
3191 | nla_nest_cancel(skb, start: nest); |
3192 | return -EMSGSIZE; |
3193 | } |
3194 | |
3195 | static void |
3196 | br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, |
3197 | struct bridge_mcast_other_query *query, |
3198 | unsigned long max_delay) |
3199 | { |
3200 | if (!timer_pending(timer: &query->timer)) |
3201 | query->delay_time = jiffies + max_delay; |
3202 | |
3203 | mod_timer(timer: &query->timer, expires: jiffies + brmctx->multicast_querier_interval); |
3204 | } |
3205 | |
3206 | static void br_port_mc_router_state_change(struct net_bridge_port *p, |
3207 | bool is_mc_router) |
3208 | { |
3209 | struct switchdev_attr attr = { |
3210 | .orig_dev = p->dev, |
3211 | .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, |
3212 | .flags = SWITCHDEV_F_DEFER, |
3213 | .u.mrouter = is_mc_router, |
3214 | }; |
3215 | |
3216 | switchdev_port_attr_set(dev: p->dev, attr: &attr, NULL); |
3217 | } |
3218 | |
3219 | static struct net_bridge_port * |
3220 | br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, |
3221 | struct hlist_head *mc_router_list, |
3222 | struct hlist_node *rlist) |
3223 | { |
3224 | struct net_bridge_mcast_port *pmctx; |
3225 | |
3226 | #if IS_ENABLED(CONFIG_IPV6) |
3227 | if (mc_router_list == &brmctx->ip6_mc_router_list) |
3228 | pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, |
3229 | ip6_rlist); |
3230 | else |
3231 | #endif |
3232 | pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, |
3233 | ip4_rlist); |
3234 | |
3235 | return pmctx->port; |
3236 | } |
3237 | |
3238 | static struct hlist_node * |
3239 | br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, |
3240 | struct net_bridge_port *port, |
3241 | struct hlist_head *mc_router_list) |
3242 | |
3243 | { |
3244 | struct hlist_node *slot = NULL; |
3245 | struct net_bridge_port *p; |
3246 | struct hlist_node *rlist; |
3247 | |
3248 | hlist_for_each(rlist, mc_router_list) { |
3249 | p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); |
3250 | |
3251 | if ((unsigned long)port >= (unsigned long)p) |
3252 | break; |
3253 | |
3254 | slot = rlist; |
3255 | } |
3256 | |
3257 | return slot; |
3258 | } |
3259 | |
3260 | static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, |
3261 | struct hlist_node *rnode) |
3262 | { |
3263 | #if IS_ENABLED(CONFIG_IPV6) |
3264 | if (rnode != &pmctx->ip6_rlist) |
3265 | return hlist_unhashed(h: &pmctx->ip6_rlist); |
3266 | else |
3267 | return hlist_unhashed(h: &pmctx->ip4_rlist); |
3268 | #else |
3269 | return true; |
3270 | #endif |
3271 | } |
3272 | |
3273 | /* Add port to router_list |
3274 | * list is maintained ordered by pointer value |
3275 | * and locked by br->multicast_lock and RCU |
3276 | */ |
3277 | static void br_multicast_add_router(struct net_bridge_mcast *brmctx, |
3278 | struct net_bridge_mcast_port *pmctx, |
3279 | struct hlist_node *rlist, |
3280 | struct hlist_head *mc_router_list) |
3281 | { |
3282 | struct hlist_node *slot; |
3283 | |
3284 | if (!hlist_unhashed(h: rlist)) |
3285 | return; |
3286 | |
3287 | slot = br_multicast_get_rport_slot(brmctx, port: pmctx->port, mc_router_list); |
3288 | |
3289 | if (slot) |
3290 | hlist_add_behind_rcu(n: rlist, prev: slot); |
3291 | else |
3292 | hlist_add_head_rcu(n: rlist, h: mc_router_list); |
3293 | |
3294 | /* For backwards compatibility for now, only notify if we |
3295 | * switched from no IPv4/IPv6 multicast router to a new |
3296 | * IPv4 or IPv6 multicast router. |
3297 | */ |
3298 | if (br_multicast_no_router_otherpf(pmctx, rnode: rlist)) { |
3299 | br_rtr_notify(dev: pmctx->port->br->dev, pmctx, RTM_NEWMDB); |
3300 | br_port_mc_router_state_change(p: pmctx->port, is_mc_router: true); |
3301 | } |
3302 | } |
3303 | |
3304 | /* Add port to router_list |
3305 | * list is maintained ordered by pointer value |
3306 | * and locked by br->multicast_lock and RCU |
3307 | */ |
3308 | static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, |
3309 | struct net_bridge_mcast_port *pmctx) |
3310 | { |
3311 | br_multicast_add_router(brmctx, pmctx, rlist: &pmctx->ip4_rlist, |
3312 | mc_router_list: &brmctx->ip4_mc_router_list); |
3313 | } |
3314 | |
3315 | /* Add port to router_list |
3316 | * list is maintained ordered by pointer value |
3317 | * and locked by br->multicast_lock and RCU |
3318 | */ |
3319 | static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, |
3320 | struct net_bridge_mcast_port *pmctx) |
3321 | { |
3322 | #if IS_ENABLED(CONFIG_IPV6) |
3323 | br_multicast_add_router(brmctx, pmctx, rlist: &pmctx->ip6_rlist, |
3324 | mc_router_list: &brmctx->ip6_mc_router_list); |
3325 | #endif |
3326 | } |
3327 | |
3328 | static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3329 | struct net_bridge_mcast_port *pmctx, |
3330 | struct timer_list *timer, |
3331 | struct hlist_node *rlist, |
3332 | struct hlist_head *mc_router_list) |
3333 | { |
3334 | unsigned long now = jiffies; |
3335 | |
3336 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3337 | return; |
3338 | |
3339 | if (!pmctx) { |
3340 | if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { |
3341 | if (!br_ip4_multicast_is_router(brmctx) && |
3342 | !br_ip6_multicast_is_router(brmctx)) |
3343 | br_mc_router_state_change(p: brmctx->br, is_mc_router: true); |
3344 | mod_timer(timer, expires: now + brmctx->multicast_querier_interval); |
3345 | } |
3346 | return; |
3347 | } |
3348 | |
3349 | if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
3350 | pmctx->multicast_router == MDB_RTR_TYPE_PERM) |
3351 | return; |
3352 | |
3353 | br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); |
3354 | mod_timer(timer, expires: now + brmctx->multicast_querier_interval); |
3355 | } |
3356 | |
3357 | static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3358 | struct net_bridge_mcast_port *pmctx) |
3359 | { |
3360 | struct timer_list *timer = &brmctx->ip4_mc_router_timer; |
3361 | struct hlist_node *rlist = NULL; |
3362 | |
3363 | if (pmctx) { |
3364 | timer = &pmctx->ip4_mc_router_timer; |
3365 | rlist = &pmctx->ip4_rlist; |
3366 | } |
3367 | |
3368 | br_multicast_mark_router(brmctx, pmctx, timer, rlist, |
3369 | mc_router_list: &brmctx->ip4_mc_router_list); |
3370 | } |
3371 | |
3372 | static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3373 | struct net_bridge_mcast_port *pmctx) |
3374 | { |
3375 | #if IS_ENABLED(CONFIG_IPV6) |
3376 | struct timer_list *timer = &brmctx->ip6_mc_router_timer; |
3377 | struct hlist_node *rlist = NULL; |
3378 | |
3379 | if (pmctx) { |
3380 | timer = &pmctx->ip6_mc_router_timer; |
3381 | rlist = &pmctx->ip6_rlist; |
3382 | } |
3383 | |
3384 | br_multicast_mark_router(brmctx, pmctx, timer, rlist, |
3385 | mc_router_list: &brmctx->ip6_mc_router_list); |
3386 | #endif |
3387 | } |
3388 | |
3389 | static void |
3390 | br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, |
3391 | struct net_bridge_mcast_port *pmctx, |
3392 | struct bridge_mcast_other_query *query, |
3393 | struct br_ip *saddr, |
3394 | unsigned long max_delay) |
3395 | { |
3396 | if (!br_multicast_select_querier(brmctx, pmctx, saddr)) |
3397 | return; |
3398 | |
3399 | br_multicast_update_query_timer(brmctx, query, max_delay); |
3400 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3401 | } |
3402 | |
3403 | #if IS_ENABLED(CONFIG_IPV6) |
3404 | static void |
3405 | br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, |
3406 | struct net_bridge_mcast_port *pmctx, |
3407 | struct bridge_mcast_other_query *query, |
3408 | struct br_ip *saddr, |
3409 | unsigned long max_delay) |
3410 | { |
3411 | if (!br_multicast_select_querier(brmctx, pmctx, saddr)) |
3412 | return; |
3413 | |
3414 | br_multicast_update_query_timer(brmctx, query, max_delay); |
3415 | br_ip6_multicast_mark_router(brmctx, pmctx); |
3416 | } |
3417 | #endif |
3418 | |
3419 | static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, |
3420 | struct net_bridge_mcast_port *pmctx, |
3421 | struct sk_buff *skb, |
3422 | u16 vid) |
3423 | { |
3424 | unsigned int transport_len = ip_transport_len(skb); |
3425 | const struct iphdr *iph = ip_hdr(skb); |
3426 | struct igmphdr *ih = igmp_hdr(skb); |
3427 | struct net_bridge_mdb_entry *mp; |
3428 | struct igmpv3_query *ih3; |
3429 | struct net_bridge_port_group *p; |
3430 | struct net_bridge_port_group __rcu **pp; |
3431 | struct br_ip saddr = {}; |
3432 | unsigned long max_delay; |
3433 | unsigned long now = jiffies; |
3434 | __be32 group; |
3435 | |
3436 | spin_lock(lock: &brmctx->br->multicast_lock); |
3437 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3438 | goto out; |
3439 | |
3440 | group = ih->group; |
3441 | |
3442 | if (transport_len == sizeof(*ih)) { |
3443 | max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); |
3444 | |
3445 | if (!max_delay) { |
3446 | max_delay = 10 * HZ; |
3447 | group = 0; |
3448 | } |
3449 | } else if (transport_len >= sizeof(*ih3)) { |
3450 | ih3 = igmpv3_query_hdr(skb); |
3451 | if (ih3->nsrcs || |
3452 | (brmctx->multicast_igmp_version == 3 && group && |
3453 | ih3->suppress)) |
3454 | goto out; |
3455 | |
3456 | max_delay = ih3->code ? |
3457 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
3458 | } else { |
3459 | goto out; |
3460 | } |
3461 | |
3462 | if (!group) { |
3463 | saddr.proto = htons(ETH_P_IP); |
3464 | saddr.src.ip4 = iph->saddr; |
3465 | |
3466 | br_ip4_multicast_query_received(brmctx, pmctx, |
3467 | query: &brmctx->ip4_other_query, |
3468 | saddr: &saddr, max_delay); |
3469 | goto out; |
3470 | } |
3471 | |
3472 | mp = br_mdb_ip4_get(br: brmctx->br, dst: group, vid); |
3473 | if (!mp) |
3474 | goto out; |
3475 | |
3476 | max_delay *= brmctx->multicast_last_member_count; |
3477 | |
3478 | if (mp->host_joined && |
3479 | (timer_pending(timer: &mp->timer) ? |
3480 | time_after(mp->timer.expires, now + max_delay) : |
3481 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) |
3482 | mod_timer(timer: &mp->timer, expires: now + max_delay); |
3483 | |
3484 | for (pp = &mp->ports; |
3485 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3486 | pp = &p->next) { |
3487 | if (timer_pending(timer: &p->timer) ? |
3488 | time_after(p->timer.expires, now + max_delay) : |
3489 | try_to_del_timer_sync(timer: &p->timer) >= 0 && |
3490 | (brmctx->multicast_igmp_version == 2 || |
3491 | p->filter_mode == MCAST_EXCLUDE)) |
3492 | mod_timer(timer: &p->timer, expires: now + max_delay); |
3493 | } |
3494 | |
3495 | out: |
3496 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3497 | } |
3498 | |
3499 | #if IS_ENABLED(CONFIG_IPV6) |
3500 | static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, |
3501 | struct net_bridge_mcast_port *pmctx, |
3502 | struct sk_buff *skb, |
3503 | u16 vid) |
3504 | { |
3505 | unsigned int transport_len = ipv6_transport_len(skb); |
3506 | struct mld_msg *mld; |
3507 | struct net_bridge_mdb_entry *mp; |
3508 | struct mld2_query *mld2q; |
3509 | struct net_bridge_port_group *p; |
3510 | struct net_bridge_port_group __rcu **pp; |
3511 | struct br_ip saddr = {}; |
3512 | unsigned long max_delay; |
3513 | unsigned long now = jiffies; |
3514 | unsigned int offset = skb_transport_offset(skb); |
3515 | const struct in6_addr *group = NULL; |
3516 | bool is_general_query; |
3517 | int err = 0; |
3518 | |
3519 | spin_lock(lock: &brmctx->br->multicast_lock); |
3520 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3521 | goto out; |
3522 | |
3523 | if (transport_len == sizeof(*mld)) { |
3524 | if (!pskb_may_pull(skb, len: offset + sizeof(*mld))) { |
3525 | err = -EINVAL; |
3526 | goto out; |
3527 | } |
3528 | mld = (struct mld_msg *) icmp6_hdr(skb); |
3529 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); |
3530 | if (max_delay) |
3531 | group = &mld->mld_mca; |
3532 | } else { |
3533 | if (!pskb_may_pull(skb, len: offset + sizeof(*mld2q))) { |
3534 | err = -EINVAL; |
3535 | goto out; |
3536 | } |
3537 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
3538 | if (!mld2q->mld2q_nsrcs) |
3539 | group = &mld2q->mld2q_mca; |
3540 | if (brmctx->multicast_mld_version == 2 && |
3541 | !ipv6_addr_any(a: &mld2q->mld2q_mca) && |
3542 | mld2q->mld2q_suppress) |
3543 | goto out; |
3544 | |
3545 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); |
3546 | } |
3547 | |
3548 | is_general_query = group && ipv6_addr_any(a: group); |
3549 | |
3550 | if (is_general_query) { |
3551 | saddr.proto = htons(ETH_P_IPV6); |
3552 | saddr.src.ip6 = ipv6_hdr(skb)->saddr; |
3553 | |
3554 | br_ip6_multicast_query_received(brmctx, pmctx, |
3555 | query: &brmctx->ip6_other_query, |
3556 | saddr: &saddr, max_delay); |
3557 | goto out; |
3558 | } else if (!group) { |
3559 | goto out; |
3560 | } |
3561 | |
3562 | mp = br_mdb_ip6_get(br: brmctx->br, dst: group, vid); |
3563 | if (!mp) |
3564 | goto out; |
3565 | |
3566 | max_delay *= brmctx->multicast_last_member_count; |
3567 | if (mp->host_joined && |
3568 | (timer_pending(timer: &mp->timer) ? |
3569 | time_after(mp->timer.expires, now + max_delay) : |
3570 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) |
3571 | mod_timer(timer: &mp->timer, expires: now + max_delay); |
3572 | |
3573 | for (pp = &mp->ports; |
3574 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3575 | pp = &p->next) { |
3576 | if (timer_pending(timer: &p->timer) ? |
3577 | time_after(p->timer.expires, now + max_delay) : |
3578 | try_to_del_timer_sync(timer: &p->timer) >= 0 && |
3579 | (brmctx->multicast_mld_version == 1 || |
3580 | p->filter_mode == MCAST_EXCLUDE)) |
3581 | mod_timer(timer: &p->timer, expires: now + max_delay); |
3582 | } |
3583 | |
3584 | out: |
3585 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3586 | return err; |
3587 | } |
3588 | #endif |
3589 | |
3590 | static void |
3591 | br_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3592 | struct net_bridge_mcast_port *pmctx, |
3593 | struct br_ip *group, |
3594 | struct bridge_mcast_other_query *other_query, |
3595 | struct bridge_mcast_own_query *own_query, |
3596 | const unsigned char *src) |
3597 | { |
3598 | struct net_bridge_mdb_entry *mp; |
3599 | struct net_bridge_port_group *p; |
3600 | unsigned long now; |
3601 | unsigned long time; |
3602 | |
3603 | spin_lock(lock: &brmctx->br->multicast_lock); |
3604 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3605 | goto out; |
3606 | |
3607 | mp = br_mdb_ip_get(br: brmctx->br, dst: group); |
3608 | if (!mp) |
3609 | goto out; |
3610 | |
3611 | if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { |
3612 | struct net_bridge_port_group __rcu **pp; |
3613 | |
3614 | for (pp = &mp->ports; |
3615 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3616 | pp = &p->next) { |
3617 | if (!br_port_group_equal(p, port: pmctx->port, src)) |
3618 | continue; |
3619 | |
3620 | if (p->flags & MDB_PG_FLAGS_PERMANENT) |
3621 | break; |
3622 | |
3623 | p->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
3624 | br_multicast_del_pg(mp, pg: p, pp); |
3625 | } |
3626 | goto out; |
3627 | } |
3628 | |
3629 | if (timer_pending(timer: &other_query->timer)) |
3630 | goto out; |
3631 | |
3632 | if (brmctx->multicast_querier) { |
3633 | __br_multicast_send_query(brmctx, pmctx, NULL, NULL, group: &mp->addr, |
3634 | with_srcs: false, sflag: 0, NULL); |
3635 | |
3636 | time = jiffies + brmctx->multicast_last_member_count * |
3637 | brmctx->multicast_last_member_interval; |
3638 | |
3639 | mod_timer(timer: &own_query->timer, expires: time); |
3640 | |
3641 | for (p = mlock_dereference(mp->ports, brmctx->br); |
3642 | p != NULL && pmctx != NULL; |
3643 | p = mlock_dereference(p->next, brmctx->br)) { |
3644 | if (!br_port_group_equal(p, port: pmctx->port, src)) |
3645 | continue; |
3646 | |
3647 | if (!hlist_unhashed(h: &p->mglist) && |
3648 | (timer_pending(timer: &p->timer) ? |
3649 | time_after(p->timer.expires, time) : |
3650 | try_to_del_timer_sync(timer: &p->timer) >= 0)) { |
3651 | mod_timer(timer: &p->timer, expires: time); |
3652 | } |
3653 | |
3654 | break; |
3655 | } |
3656 | } |
3657 | |
3658 | now = jiffies; |
3659 | time = now + brmctx->multicast_last_member_count * |
3660 | brmctx->multicast_last_member_interval; |
3661 | |
3662 | if (!pmctx) { |
3663 | if (mp->host_joined && |
3664 | (timer_pending(timer: &mp->timer) ? |
3665 | time_after(mp->timer.expires, time) : |
3666 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) { |
3667 | mod_timer(timer: &mp->timer, expires: time); |
3668 | } |
3669 | |
3670 | goto out; |
3671 | } |
3672 | |
3673 | for (p = mlock_dereference(mp->ports, brmctx->br); |
3674 | p != NULL; |
3675 | p = mlock_dereference(p->next, brmctx->br)) { |
3676 | if (p->key.port != pmctx->port) |
3677 | continue; |
3678 | |
3679 | if (!hlist_unhashed(h: &p->mglist) && |
3680 | (timer_pending(timer: &p->timer) ? |
3681 | time_after(p->timer.expires, time) : |
3682 | try_to_del_timer_sync(timer: &p->timer) >= 0)) { |
3683 | mod_timer(timer: &p->timer, expires: time); |
3684 | } |
3685 | |
3686 | break; |
3687 | } |
3688 | out: |
3689 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3690 | } |
3691 | |
3692 | static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3693 | struct net_bridge_mcast_port *pmctx, |
3694 | __be32 group, |
3695 | __u16 vid, |
3696 | const unsigned char *src) |
3697 | { |
3698 | struct br_ip br_group; |
3699 | struct bridge_mcast_own_query *own_query; |
3700 | |
3701 | if (ipv4_is_local_multicast(addr: group)) |
3702 | return; |
3703 | |
3704 | own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; |
3705 | |
3706 | memset(&br_group, 0, sizeof(br_group)); |
3707 | br_group.dst.ip4 = group; |
3708 | br_group.proto = htons(ETH_P_IP); |
3709 | br_group.vid = vid; |
3710 | |
3711 | br_multicast_leave_group(brmctx, pmctx, group: &br_group, |
3712 | other_query: &brmctx->ip4_other_query, |
3713 | own_query, src); |
3714 | } |
3715 | |
3716 | #if IS_ENABLED(CONFIG_IPV6) |
3717 | static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3718 | struct net_bridge_mcast_port *pmctx, |
3719 | const struct in6_addr *group, |
3720 | __u16 vid, |
3721 | const unsigned char *src) |
3722 | { |
3723 | struct br_ip br_group; |
3724 | struct bridge_mcast_own_query *own_query; |
3725 | |
3726 | if (ipv6_addr_is_ll_all_nodes(addr: group)) |
3727 | return; |
3728 | |
3729 | own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; |
3730 | |
3731 | memset(&br_group, 0, sizeof(br_group)); |
3732 | br_group.dst.ip6 = *group; |
3733 | br_group.proto = htons(ETH_P_IPV6); |
3734 | br_group.vid = vid; |
3735 | |
3736 | br_multicast_leave_group(brmctx, pmctx, group: &br_group, |
3737 | other_query: &brmctx->ip6_other_query, |
3738 | own_query, src); |
3739 | } |
3740 | #endif |
3741 | |
3742 | static void br_multicast_err_count(const struct net_bridge *br, |
3743 | const struct net_bridge_port *p, |
3744 | __be16 proto) |
3745 | { |
3746 | struct bridge_mcast_stats __percpu *stats; |
3747 | struct bridge_mcast_stats *pstats; |
3748 | |
3749 | if (!br_opt_get(br, opt: BROPT_MULTICAST_STATS_ENABLED)) |
3750 | return; |
3751 | |
3752 | if (p) |
3753 | stats = p->mcast_stats; |
3754 | else |
3755 | stats = br->mcast_stats; |
3756 | if (WARN_ON(!stats)) |
3757 | return; |
3758 | |
3759 | pstats = this_cpu_ptr(stats); |
3760 | |
3761 | u64_stats_update_begin(syncp: &pstats->syncp); |
3762 | switch (proto) { |
3763 | case htons(ETH_P_IP): |
3764 | pstats->mstats.igmp_parse_errors++; |
3765 | break; |
3766 | #if IS_ENABLED(CONFIG_IPV6) |
3767 | case htons(ETH_P_IPV6): |
3768 | pstats->mstats.mld_parse_errors++; |
3769 | break; |
3770 | #endif |
3771 | } |
3772 | u64_stats_update_end(syncp: &pstats->syncp); |
3773 | } |
3774 | |
3775 | static void br_multicast_pim(struct net_bridge_mcast *brmctx, |
3776 | struct net_bridge_mcast_port *pmctx, |
3777 | const struct sk_buff *skb) |
3778 | { |
3779 | unsigned int offset = skb_transport_offset(skb); |
3780 | struct pimhdr *pimhdr, _pimhdr; |
3781 | |
3782 | pimhdr = skb_header_pointer(skb, offset, len: sizeof(_pimhdr), buffer: &_pimhdr); |
3783 | if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || |
3784 | pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) |
3785 | return; |
3786 | |
3787 | spin_lock(lock: &brmctx->br->multicast_lock); |
3788 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3789 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3790 | } |
3791 | |
3792 | static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, |
3793 | struct net_bridge_mcast_port *pmctx, |
3794 | struct sk_buff *skb) |
3795 | { |
3796 | if (ip_hdr(skb)->protocol != IPPROTO_IGMP || |
3797 | igmp_hdr(skb)->type != IGMP_MRDISC_ADV) |
3798 | return -ENOMSG; |
3799 | |
3800 | spin_lock(lock: &brmctx->br->multicast_lock); |
3801 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3802 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3803 | |
3804 | return 0; |
3805 | } |
3806 | |
3807 | static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, |
3808 | struct net_bridge_mcast_port *pmctx, |
3809 | struct sk_buff *skb, |
3810 | u16 vid) |
3811 | { |
3812 | struct net_bridge_port *p = pmctx ? pmctx->port : NULL; |
3813 | const unsigned char *src; |
3814 | struct igmphdr *ih; |
3815 | int err; |
3816 | |
3817 | err = ip_mc_check_igmp(skb); |
3818 | |
3819 | if (err == -ENOMSG) { |
3820 | if (!ipv4_is_local_multicast(addr: ip_hdr(skb)->daddr)) { |
3821 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3822 | } else if (pim_ipv4_all_pim_routers(addr: ip_hdr(skb)->daddr)) { |
3823 | if (ip_hdr(skb)->protocol == IPPROTO_PIM) |
3824 | br_multicast_pim(brmctx, pmctx, skb); |
3825 | } else if (ipv4_is_all_snoopers(addr: ip_hdr(skb)->daddr)) { |
3826 | br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); |
3827 | } |
3828 | |
3829 | return 0; |
3830 | } else if (err < 0) { |
3831 | br_multicast_err_count(br: brmctx->br, p, proto: skb->protocol); |
3832 | return err; |
3833 | } |
3834 | |
3835 | ih = igmp_hdr(skb); |
3836 | src = eth_hdr(skb)->h_source; |
3837 | BR_INPUT_SKB_CB(skb)->igmp = ih->type; |
3838 | |
3839 | switch (ih->type) { |
3840 | case IGMP_HOST_MEMBERSHIP_REPORT: |
3841 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
3842 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3843 | err = br_ip4_multicast_add_group(brmctx, pmctx, group: ih->group, vid, |
3844 | src, igmpv2: true); |
3845 | break; |
3846 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
3847 | err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); |
3848 | break; |
3849 | case IGMP_HOST_MEMBERSHIP_QUERY: |
3850 | br_ip4_multicast_query(brmctx, pmctx, skb, vid); |
3851 | break; |
3852 | case IGMP_HOST_LEAVE_MESSAGE: |
3853 | br_ip4_multicast_leave_group(brmctx, pmctx, group: ih->group, vid, src); |
3854 | break; |
3855 | } |
3856 | |
3857 | br_multicast_count(br: brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, |
3858 | dir: BR_MCAST_DIR_RX); |
3859 | |
3860 | return err; |
3861 | } |
3862 | |
3863 | #if IS_ENABLED(CONFIG_IPV6) |
3864 | static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, |
3865 | struct net_bridge_mcast_port *pmctx, |
3866 | struct sk_buff *skb) |
3867 | { |
3868 | if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) |
3869 | return; |
3870 | |
3871 | spin_lock(lock: &brmctx->br->multicast_lock); |
3872 | br_ip6_multicast_mark_router(brmctx, pmctx); |
3873 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3874 | } |
3875 | |
3876 | static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, |
3877 | struct net_bridge_mcast_port *pmctx, |
3878 | struct sk_buff *skb, |
3879 | u16 vid) |
3880 | { |
3881 | struct net_bridge_port *p = pmctx ? pmctx->port : NULL; |
3882 | const unsigned char *src; |
3883 | struct mld_msg *mld; |
3884 | int err; |
3885 | |
3886 | err = ipv6_mc_check_mld(skb); |
3887 | |
3888 | if (err == -ENOMSG || err == -ENODATA) { |
3889 | if (!ipv6_addr_is_ll_all_nodes(addr: &ipv6_hdr(skb)->daddr)) |
3890 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3891 | if (err == -ENODATA && |
3892 | ipv6_addr_is_all_snoopers(addr: &ipv6_hdr(skb)->daddr)) |
3893 | br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); |
3894 | |
3895 | return 0; |
3896 | } else if (err < 0) { |
3897 | br_multicast_err_count(br: brmctx->br, p, proto: skb->protocol); |
3898 | return err; |
3899 | } |
3900 | |
3901 | mld = (struct mld_msg *)skb_transport_header(skb); |
3902 | BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; |
3903 | |
3904 | switch (mld->mld_type) { |
3905 | case ICMPV6_MGM_REPORT: |
3906 | src = eth_hdr(skb)->h_source; |
3907 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3908 | err = br_ip6_multicast_add_group(brmctx, pmctx, group: &mld->mld_mca, |
3909 | vid, src, mldv1: true); |
3910 | break; |
3911 | case ICMPV6_MLD2_REPORT: |
3912 | err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); |
3913 | break; |
3914 | case ICMPV6_MGM_QUERY: |
3915 | err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); |
3916 | break; |
3917 | case ICMPV6_MGM_REDUCTION: |
3918 | src = eth_hdr(skb)->h_source; |
3919 | br_ip6_multicast_leave_group(brmctx, pmctx, group: &mld->mld_mca, vid, |
3920 | src); |
3921 | break; |
3922 | } |
3923 | |
3924 | br_multicast_count(br: brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, |
3925 | dir: BR_MCAST_DIR_RX); |
3926 | |
3927 | return err; |
3928 | } |
3929 | #endif |
3930 | |
3931 | int br_multicast_rcv(struct net_bridge_mcast **brmctx, |
3932 | struct net_bridge_mcast_port **pmctx, |
3933 | struct net_bridge_vlan *vlan, |
3934 | struct sk_buff *skb, u16 vid) |
3935 | { |
3936 | int ret = 0; |
3937 | |
3938 | BR_INPUT_SKB_CB(skb)->igmp = 0; |
3939 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; |
3940 | |
3941 | if (!br_opt_get(br: (*brmctx)->br, opt: BROPT_MULTICAST_ENABLED)) |
3942 | return 0; |
3943 | |
3944 | if (br_opt_get(br: (*brmctx)->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { |
3945 | const struct net_bridge_vlan *masterv; |
3946 | |
3947 | /* the vlan has the master flag set only when transmitting |
3948 | * through the bridge device |
3949 | */ |
3950 | if (br_vlan_is_master(v: vlan)) { |
3951 | masterv = vlan; |
3952 | *brmctx = &vlan->br_mcast_ctx; |
3953 | *pmctx = NULL; |
3954 | } else { |
3955 | masterv = vlan->brvlan; |
3956 | *brmctx = &vlan->brvlan->br_mcast_ctx; |
3957 | *pmctx = &vlan->port_mcast_ctx; |
3958 | } |
3959 | |
3960 | if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) |
3961 | return 0; |
3962 | } |
3963 | |
3964 | switch (skb->protocol) { |
3965 | case htons(ETH_P_IP): |
3966 | ret = br_multicast_ipv4_rcv(brmctx: *brmctx, pmctx: *pmctx, skb, vid); |
3967 | break; |
3968 | #if IS_ENABLED(CONFIG_IPV6) |
3969 | case htons(ETH_P_IPV6): |
3970 | ret = br_multicast_ipv6_rcv(brmctx: *brmctx, pmctx: *pmctx, skb, vid); |
3971 | break; |
3972 | #endif |
3973 | } |
3974 | |
3975 | return ret; |
3976 | } |
3977 | |
3978 | static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, |
3979 | struct bridge_mcast_own_query *query, |
3980 | struct bridge_mcast_querier *querier) |
3981 | { |
3982 | spin_lock(lock: &brmctx->br->multicast_lock); |
3983 | if (br_multicast_ctx_vlan_disabled(brmctx)) |
3984 | goto out; |
3985 | |
3986 | if (query->startup_sent < brmctx->multicast_startup_query_count) |
3987 | query->startup_sent++; |
3988 | |
3989 | br_multicast_send_query(brmctx, NULL, own_query: query); |
3990 | out: |
3991 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3992 | } |
3993 | |
3994 | static void br_ip4_multicast_query_expired(struct timer_list *t) |
3995 | { |
3996 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
3997 | ip4_own_query.timer); |
3998 | |
3999 | br_multicast_query_expired(brmctx, query: &brmctx->ip4_own_query, |
4000 | querier: &brmctx->ip4_querier); |
4001 | } |
4002 | |
4003 | #if IS_ENABLED(CONFIG_IPV6) |
4004 | static void br_ip6_multicast_query_expired(struct timer_list *t) |
4005 | { |
4006 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
4007 | ip6_own_query.timer); |
4008 | |
4009 | br_multicast_query_expired(brmctx, query: &brmctx->ip6_own_query, |
4010 | querier: &brmctx->ip6_querier); |
4011 | } |
4012 | #endif |
4013 | |
4014 | static void br_multicast_gc_work(struct work_struct *work) |
4015 | { |
4016 | struct net_bridge *br = container_of(work, struct net_bridge, |
4017 | mcast_gc_work); |
4018 | HLIST_HEAD(deleted_head); |
4019 | |
4020 | spin_lock_bh(lock: &br->multicast_lock); |
4021 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
4022 | spin_unlock_bh(lock: &br->multicast_lock); |
4023 | |
4024 | br_multicast_gc(head: &deleted_head); |
4025 | } |
4026 | |
4027 | void br_multicast_ctx_init(struct net_bridge *br, |
4028 | struct net_bridge_vlan *vlan, |
4029 | struct net_bridge_mcast *brmctx) |
4030 | { |
4031 | brmctx->br = br; |
4032 | brmctx->vlan = vlan; |
4033 | brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4034 | brmctx->multicast_last_member_count = 2; |
4035 | brmctx->multicast_startup_query_count = 2; |
4036 | |
4037 | brmctx->multicast_last_member_interval = HZ; |
4038 | brmctx->multicast_query_response_interval = 10 * HZ; |
4039 | brmctx->multicast_startup_query_interval = 125 * HZ / 4; |
4040 | brmctx->multicast_query_interval = 125 * HZ; |
4041 | brmctx->multicast_querier_interval = 255 * HZ; |
4042 | brmctx->multicast_membership_interval = 260 * HZ; |
4043 | |
4044 | brmctx->ip4_other_query.delay_time = 0; |
4045 | brmctx->ip4_querier.port_ifidx = 0; |
4046 | seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); |
4047 | brmctx->multicast_igmp_version = 2; |
4048 | #if IS_ENABLED(CONFIG_IPV6) |
4049 | brmctx->multicast_mld_version = 1; |
4050 | brmctx->ip6_other_query.delay_time = 0; |
4051 | brmctx->ip6_querier.port_ifidx = 0; |
4052 | seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); |
4053 | #endif |
4054 | |
4055 | timer_setup(&brmctx->ip4_mc_router_timer, |
4056 | br_ip4_multicast_local_router_expired, 0); |
4057 | timer_setup(&brmctx->ip4_other_query.timer, |
4058 | br_ip4_multicast_querier_expired, 0); |
4059 | timer_setup(&brmctx->ip4_own_query.timer, |
4060 | br_ip4_multicast_query_expired, 0); |
4061 | #if IS_ENABLED(CONFIG_IPV6) |
4062 | timer_setup(&brmctx->ip6_mc_router_timer, |
4063 | br_ip6_multicast_local_router_expired, 0); |
4064 | timer_setup(&brmctx->ip6_other_query.timer, |
4065 | br_ip6_multicast_querier_expired, 0); |
4066 | timer_setup(&brmctx->ip6_own_query.timer, |
4067 | br_ip6_multicast_query_expired, 0); |
4068 | #endif |
4069 | } |
4070 | |
4071 | void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) |
4072 | { |
4073 | __br_multicast_stop(brmctx); |
4074 | } |
4075 | |
4076 | void br_multicast_init(struct net_bridge *br) |
4077 | { |
4078 | br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; |
4079 | |
4080 | br_multicast_ctx_init(br, NULL, brmctx: &br->multicast_ctx); |
4081 | |
4082 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: true); |
4083 | br_opt_toggle(br, opt: BROPT_HAS_IPV6_ADDR, on: true); |
4084 | |
4085 | spin_lock_init(&br->multicast_lock); |
4086 | INIT_HLIST_HEAD(&br->mdb_list); |
4087 | INIT_HLIST_HEAD(&br->mcast_gc_list); |
4088 | INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); |
4089 | } |
4090 | |
4091 | static void br_ip4_multicast_join_snoopers(struct net_bridge *br) |
4092 | { |
4093 | struct in_device *in_dev = in_dev_get(dev: br->dev); |
4094 | |
4095 | if (!in_dev) |
4096 | return; |
4097 | |
4098 | __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); |
4099 | in_dev_put(idev: in_dev); |
4100 | } |
4101 | |
4102 | #if IS_ENABLED(CONFIG_IPV6) |
4103 | static void br_ip6_multicast_join_snoopers(struct net_bridge *br) |
4104 | { |
4105 | struct in6_addr addr; |
4106 | |
4107 | ipv6_addr_set(addr: &addr, htonl(0xff020000), w2: 0, w3: 0, htonl(0x6a)); |
4108 | ipv6_dev_mc_inc(dev: br->dev, addr: &addr); |
4109 | } |
4110 | #else |
4111 | static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) |
4112 | { |
4113 | } |
4114 | #endif |
4115 | |
4116 | void br_multicast_join_snoopers(struct net_bridge *br) |
4117 | { |
4118 | br_ip4_multicast_join_snoopers(br); |
4119 | br_ip6_multicast_join_snoopers(br); |
4120 | } |
4121 | |
4122 | static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) |
4123 | { |
4124 | struct in_device *in_dev = in_dev_get(dev: br->dev); |
4125 | |
4126 | if (WARN_ON(!in_dev)) |
4127 | return; |
4128 | |
4129 | __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); |
4130 | in_dev_put(idev: in_dev); |
4131 | } |
4132 | |
4133 | #if IS_ENABLED(CONFIG_IPV6) |
4134 | static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) |
4135 | { |
4136 | struct in6_addr addr; |
4137 | |
4138 | ipv6_addr_set(addr: &addr, htonl(0xff020000), w2: 0, w3: 0, htonl(0x6a)); |
4139 | ipv6_dev_mc_dec(dev: br->dev, addr: &addr); |
4140 | } |
4141 | #else |
4142 | static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) |
4143 | { |
4144 | } |
4145 | #endif |
4146 | |
4147 | void br_multicast_leave_snoopers(struct net_bridge *br) |
4148 | { |
4149 | br_ip4_multicast_leave_snoopers(br); |
4150 | br_ip6_multicast_leave_snoopers(br); |
4151 | } |
4152 | |
4153 | static void __br_multicast_open_query(struct net_bridge *br, |
4154 | struct bridge_mcast_own_query *query) |
4155 | { |
4156 | query->startup_sent = 0; |
4157 | |
4158 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
4159 | return; |
4160 | |
4161 | mod_timer(timer: &query->timer, expires: jiffies); |
4162 | } |
4163 | |
4164 | static void __br_multicast_open(struct net_bridge_mcast *brmctx) |
4165 | { |
4166 | __br_multicast_open_query(br: brmctx->br, query: &brmctx->ip4_own_query); |
4167 | #if IS_ENABLED(CONFIG_IPV6) |
4168 | __br_multicast_open_query(br: brmctx->br, query: &brmctx->ip6_own_query); |
4169 | #endif |
4170 | } |
4171 | |
4172 | void br_multicast_open(struct net_bridge *br) |
4173 | { |
4174 | ASSERT_RTNL(); |
4175 | |
4176 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { |
4177 | struct net_bridge_vlan_group *vg; |
4178 | struct net_bridge_vlan *vlan; |
4179 | |
4180 | vg = br_vlan_group(br); |
4181 | if (vg) { |
4182 | list_for_each_entry(vlan, &vg->vlan_list, vlist) { |
4183 | struct net_bridge_mcast *brmctx; |
4184 | |
4185 | brmctx = &vlan->br_mcast_ctx; |
4186 | if (br_vlan_is_brentry(v: vlan) && |
4187 | !br_multicast_ctx_vlan_disabled(brmctx)) |
4188 | __br_multicast_open(brmctx: &vlan->br_mcast_ctx); |
4189 | } |
4190 | } |
4191 | } else { |
4192 | __br_multicast_open(brmctx: &br->multicast_ctx); |
4193 | } |
4194 | } |
4195 | |
4196 | static void __br_multicast_stop(struct net_bridge_mcast *brmctx) |
4197 | { |
4198 | del_timer_sync(timer: &brmctx->ip4_mc_router_timer); |
4199 | del_timer_sync(timer: &brmctx->ip4_other_query.timer); |
4200 | del_timer_sync(timer: &brmctx->ip4_own_query.timer); |
4201 | #if IS_ENABLED(CONFIG_IPV6) |
4202 | del_timer_sync(timer: &brmctx->ip6_mc_router_timer); |
4203 | del_timer_sync(timer: &brmctx->ip6_other_query.timer); |
4204 | del_timer_sync(timer: &brmctx->ip6_own_query.timer); |
4205 | #endif |
4206 | } |
4207 | |
4208 | void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) |
4209 | { |
4210 | struct net_bridge *br; |
4211 | |
4212 | /* it's okay to check for the flag without the multicast lock because it |
4213 | * can only change under RTNL -> multicast_lock, we need the latter to |
4214 | * sync with timers and packets |
4215 | */ |
4216 | if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) |
4217 | return; |
4218 | |
4219 | if (br_vlan_is_master(v: vlan)) { |
4220 | br = vlan->br; |
4221 | |
4222 | if (!br_vlan_is_brentry(v: vlan) || |
4223 | (on && |
4224 | br_multicast_ctx_vlan_global_disabled(brmctx: &vlan->br_mcast_ctx))) |
4225 | return; |
4226 | |
4227 | spin_lock_bh(lock: &br->multicast_lock); |
4228 | vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; |
4229 | spin_unlock_bh(lock: &br->multicast_lock); |
4230 | |
4231 | if (on) |
4232 | __br_multicast_open(brmctx: &vlan->br_mcast_ctx); |
4233 | else |
4234 | __br_multicast_stop(brmctx: &vlan->br_mcast_ctx); |
4235 | } else { |
4236 | struct net_bridge_mcast *brmctx; |
4237 | |
4238 | brmctx = br_multicast_port_ctx_get_global(pmctx: &vlan->port_mcast_ctx); |
4239 | if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) |
4240 | return; |
4241 | |
4242 | br = vlan->port->br; |
4243 | spin_lock_bh(lock: &br->multicast_lock); |
4244 | vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; |
4245 | if (on) |
4246 | __br_multicast_enable_port_ctx(pmctx: &vlan->port_mcast_ctx); |
4247 | else |
4248 | __br_multicast_disable_port_ctx(pmctx: &vlan->port_mcast_ctx); |
4249 | spin_unlock_bh(lock: &br->multicast_lock); |
4250 | } |
4251 | } |
4252 | |
4253 | static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) |
4254 | { |
4255 | struct net_bridge_port *p; |
4256 | |
4257 | if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) |
4258 | return; |
4259 | |
4260 | list_for_each_entry(p, &vlan->br->port_list, list) { |
4261 | struct net_bridge_vlan *vport; |
4262 | |
4263 | vport = br_vlan_find(vg: nbp_vlan_group(p), vid: vlan->vid); |
4264 | if (!vport) |
4265 | continue; |
4266 | br_multicast_toggle_one_vlan(vlan: vport, on); |
4267 | } |
4268 | |
4269 | if (br_vlan_is_brentry(v: vlan)) |
4270 | br_multicast_toggle_one_vlan(vlan, on); |
4271 | } |
4272 | |
4273 | int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, |
4274 | struct netlink_ext_ack *extack) |
4275 | { |
4276 | struct net_bridge_vlan_group *vg; |
4277 | struct net_bridge_vlan *vlan; |
4278 | struct net_bridge_port *p; |
4279 | |
4280 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) |
4281 | return 0; |
4282 | |
4283 | if (on && !br_opt_get(br, opt: BROPT_VLAN_ENABLED)) { |
4284 | NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled" ); |
4285 | return -EINVAL; |
4286 | } |
4287 | |
4288 | vg = br_vlan_group(br); |
4289 | if (!vg) |
4290 | return 0; |
4291 | |
4292 | br_opt_toggle(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); |
4293 | |
4294 | /* disable/enable non-vlan mcast contexts based on vlan snooping */ |
4295 | if (on) |
4296 | __br_multicast_stop(brmctx: &br->multicast_ctx); |
4297 | else |
4298 | __br_multicast_open(brmctx: &br->multicast_ctx); |
4299 | list_for_each_entry(p, &br->port_list, list) { |
4300 | if (on) |
4301 | br_multicast_disable_port(port: p); |
4302 | else |
4303 | br_multicast_enable_port(port: p); |
4304 | } |
4305 | |
4306 | list_for_each_entry(vlan, &vg->vlan_list, vlist) |
4307 | br_multicast_toggle_vlan(vlan, on); |
4308 | |
4309 | return 0; |
4310 | } |
4311 | |
4312 | bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) |
4313 | { |
4314 | ASSERT_RTNL(); |
4315 | |
4316 | /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and |
4317 | * requires only RTNL to change |
4318 | */ |
4319 | if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) |
4320 | return false; |
4321 | |
4322 | vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; |
4323 | br_multicast_toggle_vlan(vlan, on); |
4324 | |
4325 | return true; |
4326 | } |
4327 | |
4328 | void br_multicast_stop(struct net_bridge *br) |
4329 | { |
4330 | ASSERT_RTNL(); |
4331 | |
4332 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { |
4333 | struct net_bridge_vlan_group *vg; |
4334 | struct net_bridge_vlan *vlan; |
4335 | |
4336 | vg = br_vlan_group(br); |
4337 | if (vg) { |
4338 | list_for_each_entry(vlan, &vg->vlan_list, vlist) { |
4339 | struct net_bridge_mcast *brmctx; |
4340 | |
4341 | brmctx = &vlan->br_mcast_ctx; |
4342 | if (br_vlan_is_brentry(v: vlan) && |
4343 | !br_multicast_ctx_vlan_disabled(brmctx)) |
4344 | __br_multicast_stop(brmctx: &vlan->br_mcast_ctx); |
4345 | } |
4346 | } |
4347 | } else { |
4348 | __br_multicast_stop(brmctx: &br->multicast_ctx); |
4349 | } |
4350 | } |
4351 | |
4352 | void br_multicast_dev_del(struct net_bridge *br) |
4353 | { |
4354 | struct net_bridge_mdb_entry *mp; |
4355 | HLIST_HEAD(deleted_head); |
4356 | struct hlist_node *tmp; |
4357 | |
4358 | spin_lock_bh(lock: &br->multicast_lock); |
4359 | hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) |
4360 | br_multicast_del_mdb_entry(mp); |
4361 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
4362 | spin_unlock_bh(lock: &br->multicast_lock); |
4363 | |
4364 | br_multicast_ctx_deinit(brmctx: &br->multicast_ctx); |
4365 | br_multicast_gc(head: &deleted_head); |
4366 | cancel_work_sync(work: &br->mcast_gc_work); |
4367 | |
4368 | rcu_barrier(); |
4369 | } |
4370 | |
4371 | int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) |
4372 | { |
4373 | int err = -EINVAL; |
4374 | |
4375 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4376 | |
4377 | switch (val) { |
4378 | case MDB_RTR_TYPE_DISABLED: |
4379 | case MDB_RTR_TYPE_PERM: |
4380 | br_mc_router_state_change(p: brmctx->br, is_mc_router: val == MDB_RTR_TYPE_PERM); |
4381 | del_timer(timer: &brmctx->ip4_mc_router_timer); |
4382 | #if IS_ENABLED(CONFIG_IPV6) |
4383 | del_timer(timer: &brmctx->ip6_mc_router_timer); |
4384 | #endif |
4385 | brmctx->multicast_router = val; |
4386 | err = 0; |
4387 | break; |
4388 | case MDB_RTR_TYPE_TEMP_QUERY: |
4389 | if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) |
4390 | br_mc_router_state_change(p: brmctx->br, is_mc_router: false); |
4391 | brmctx->multicast_router = val; |
4392 | err = 0; |
4393 | break; |
4394 | } |
4395 | |
4396 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4397 | |
4398 | return err; |
4399 | } |
4400 | |
4401 | static void |
4402 | br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) |
4403 | { |
4404 | if (!deleted) |
4405 | return; |
4406 | |
4407 | /* For backwards compatibility for now, only notify if there is |
4408 | * no multicast router anymore for both IPv4 and IPv6. |
4409 | */ |
4410 | if (!hlist_unhashed(h: &pmctx->ip4_rlist)) |
4411 | return; |
4412 | #if IS_ENABLED(CONFIG_IPV6) |
4413 | if (!hlist_unhashed(h: &pmctx->ip6_rlist)) |
4414 | return; |
4415 | #endif |
4416 | |
4417 | br_rtr_notify(dev: pmctx->port->br->dev, pmctx, RTM_DELMDB); |
4418 | br_port_mc_router_state_change(p: pmctx->port, is_mc_router: false); |
4419 | |
4420 | /* don't allow timer refresh */ |
4421 | if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) |
4422 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4423 | } |
4424 | |
4425 | int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, |
4426 | unsigned long val) |
4427 | { |
4428 | struct net_bridge_mcast *brmctx; |
4429 | unsigned long now = jiffies; |
4430 | int err = -EINVAL; |
4431 | bool del = false; |
4432 | |
4433 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
4434 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4435 | if (pmctx->multicast_router == val) { |
4436 | /* Refresh the temp router port timer */ |
4437 | if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { |
4438 | mod_timer(timer: &pmctx->ip4_mc_router_timer, |
4439 | expires: now + brmctx->multicast_querier_interval); |
4440 | #if IS_ENABLED(CONFIG_IPV6) |
4441 | mod_timer(timer: &pmctx->ip6_mc_router_timer, |
4442 | expires: now + brmctx->multicast_querier_interval); |
4443 | #endif |
4444 | } |
4445 | err = 0; |
4446 | goto unlock; |
4447 | } |
4448 | switch (val) { |
4449 | case MDB_RTR_TYPE_DISABLED: |
4450 | pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; |
4451 | del |= br_ip4_multicast_rport_del(pmctx); |
4452 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
4453 | del |= br_ip6_multicast_rport_del(pmctx); |
4454 | #if IS_ENABLED(CONFIG_IPV6) |
4455 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
4456 | #endif |
4457 | br_multicast_rport_del_notify(pmctx, deleted: del); |
4458 | break; |
4459 | case MDB_RTR_TYPE_TEMP_QUERY: |
4460 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4461 | del |= br_ip4_multicast_rport_del(pmctx); |
4462 | del |= br_ip6_multicast_rport_del(pmctx); |
4463 | br_multicast_rport_del_notify(pmctx, deleted: del); |
4464 | break; |
4465 | case MDB_RTR_TYPE_PERM: |
4466 | pmctx->multicast_router = MDB_RTR_TYPE_PERM; |
4467 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
4468 | br_ip4_multicast_add_router(brmctx, pmctx); |
4469 | #if IS_ENABLED(CONFIG_IPV6) |
4470 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
4471 | #endif |
4472 | br_ip6_multicast_add_router(brmctx, pmctx); |
4473 | break; |
4474 | case MDB_RTR_TYPE_TEMP: |
4475 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP; |
4476 | br_ip4_multicast_mark_router(brmctx, pmctx); |
4477 | br_ip6_multicast_mark_router(brmctx, pmctx); |
4478 | break; |
4479 | default: |
4480 | goto unlock; |
4481 | } |
4482 | err = 0; |
4483 | unlock: |
4484 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4485 | |
4486 | return err; |
4487 | } |
4488 | |
4489 | int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) |
4490 | { |
4491 | int err; |
4492 | |
4493 | if (br_vlan_is_master(v)) |
4494 | err = br_multicast_set_router(brmctx: &v->br_mcast_ctx, val: mcast_router); |
4495 | else |
4496 | err = br_multicast_set_port_router(pmctx: &v->port_mcast_ctx, |
4497 | val: mcast_router); |
4498 | |
4499 | return err; |
4500 | } |
4501 | |
4502 | static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, |
4503 | struct bridge_mcast_own_query *query) |
4504 | { |
4505 | struct net_bridge_port *port; |
4506 | |
4507 | if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) |
4508 | return; |
4509 | |
4510 | __br_multicast_open_query(br: brmctx->br, query); |
4511 | |
4512 | rcu_read_lock(); |
4513 | list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { |
4514 | struct bridge_mcast_own_query *ip4_own_query; |
4515 | #if IS_ENABLED(CONFIG_IPV6) |
4516 | struct bridge_mcast_own_query *ip6_own_query; |
4517 | #endif |
4518 | |
4519 | if (br_multicast_port_ctx_state_stopped(pmctx: &port->multicast_ctx)) |
4520 | continue; |
4521 | |
4522 | if (br_multicast_ctx_is_vlan(brmctx)) { |
4523 | struct net_bridge_vlan *vlan; |
4524 | |
4525 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: port), |
4526 | vid: brmctx->vlan->vid); |
4527 | if (!vlan || |
4528 | br_multicast_port_ctx_state_stopped(pmctx: &vlan->port_mcast_ctx)) |
4529 | continue; |
4530 | |
4531 | ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; |
4532 | #if IS_ENABLED(CONFIG_IPV6) |
4533 | ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; |
4534 | #endif |
4535 | } else { |
4536 | ip4_own_query = &port->multicast_ctx.ip4_own_query; |
4537 | #if IS_ENABLED(CONFIG_IPV6) |
4538 | ip6_own_query = &port->multicast_ctx.ip6_own_query; |
4539 | #endif |
4540 | } |
4541 | |
4542 | if (query == &brmctx->ip4_own_query) |
4543 | br_multicast_enable(query: ip4_own_query); |
4544 | #if IS_ENABLED(CONFIG_IPV6) |
4545 | else |
4546 | br_multicast_enable(query: ip6_own_query); |
4547 | #endif |
4548 | } |
4549 | rcu_read_unlock(); |
4550 | } |
4551 | |
4552 | int br_multicast_toggle(struct net_bridge *br, unsigned long val, |
4553 | struct netlink_ext_ack *extack) |
4554 | { |
4555 | struct net_bridge_port *port; |
4556 | bool change_snoopers = false; |
4557 | int err = 0; |
4558 | |
4559 | spin_lock_bh(lock: &br->multicast_lock); |
4560 | if (!!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) == !!val) |
4561 | goto unlock; |
4562 | |
4563 | err = br_mc_disabled_update(dev: br->dev, value: val, extack); |
4564 | if (err == -EOPNOTSUPP) |
4565 | err = 0; |
4566 | if (err) |
4567 | goto unlock; |
4568 | |
4569 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: !!val); |
4570 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) { |
4571 | change_snoopers = true; |
4572 | goto unlock; |
4573 | } |
4574 | |
4575 | if (!netif_running(dev: br->dev)) |
4576 | goto unlock; |
4577 | |
4578 | br_multicast_open(br); |
4579 | list_for_each_entry(port, &br->port_list, list) |
4580 | __br_multicast_enable_port_ctx(pmctx: &port->multicast_ctx); |
4581 | |
4582 | change_snoopers = true; |
4583 | |
4584 | unlock: |
4585 | spin_unlock_bh(lock: &br->multicast_lock); |
4586 | |
4587 | /* br_multicast_join_snoopers has the potential to cause |
4588 | * an MLD Report/Leave to be delivered to br_multicast_rcv, |
4589 | * which would in turn call br_multicast_add_group, which would |
4590 | * attempt to acquire multicast_lock. This function should be |
4591 | * called after the lock has been released to avoid deadlocks on |
4592 | * multicast_lock. |
4593 | * |
4594 | * br_multicast_leave_snoopers does not have the problem since |
4595 | * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and |
4596 | * returns without calling br_multicast_ipv4/6_rcv if it's not |
4597 | * enabled. Moved both functions out just for symmetry. |
4598 | */ |
4599 | if (change_snoopers) { |
4600 | if (br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
4601 | br_multicast_join_snoopers(br); |
4602 | else |
4603 | br_multicast_leave_snoopers(br); |
4604 | } |
4605 | |
4606 | return err; |
4607 | } |
4608 | |
4609 | bool br_multicast_enabled(const struct net_device *dev) |
4610 | { |
4611 | struct net_bridge *br = netdev_priv(dev); |
4612 | |
4613 | return !!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED); |
4614 | } |
4615 | EXPORT_SYMBOL_GPL(br_multicast_enabled); |
4616 | |
4617 | bool br_multicast_router(const struct net_device *dev) |
4618 | { |
4619 | struct net_bridge *br = netdev_priv(dev); |
4620 | bool is_router; |
4621 | |
4622 | spin_lock_bh(lock: &br->multicast_lock); |
4623 | is_router = br_multicast_is_router(brmctx: &br->multicast_ctx, NULL); |
4624 | spin_unlock_bh(lock: &br->multicast_lock); |
4625 | return is_router; |
4626 | } |
4627 | EXPORT_SYMBOL_GPL(br_multicast_router); |
4628 | |
4629 | int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) |
4630 | { |
4631 | unsigned long max_delay; |
4632 | |
4633 | val = !!val; |
4634 | |
4635 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4636 | if (brmctx->multicast_querier == val) |
4637 | goto unlock; |
4638 | |
4639 | WRITE_ONCE(brmctx->multicast_querier, val); |
4640 | if (!val) |
4641 | goto unlock; |
4642 | |
4643 | max_delay = brmctx->multicast_query_response_interval; |
4644 | |
4645 | if (!timer_pending(timer: &brmctx->ip4_other_query.timer)) |
4646 | brmctx->ip4_other_query.delay_time = jiffies + max_delay; |
4647 | |
4648 | br_multicast_start_querier(brmctx, query: &brmctx->ip4_own_query); |
4649 | |
4650 | #if IS_ENABLED(CONFIG_IPV6) |
4651 | if (!timer_pending(timer: &brmctx->ip6_other_query.timer)) |
4652 | brmctx->ip6_other_query.delay_time = jiffies + max_delay; |
4653 | |
4654 | br_multicast_start_querier(brmctx, query: &brmctx->ip6_own_query); |
4655 | #endif |
4656 | |
4657 | unlock: |
4658 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4659 | |
4660 | return 0; |
4661 | } |
4662 | |
4663 | int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, |
4664 | unsigned long val) |
4665 | { |
4666 | /* Currently we support only version 2 and 3 */ |
4667 | switch (val) { |
4668 | case 2: |
4669 | case 3: |
4670 | break; |
4671 | default: |
4672 | return -EINVAL; |
4673 | } |
4674 | |
4675 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4676 | brmctx->multicast_igmp_version = val; |
4677 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4678 | |
4679 | return 0; |
4680 | } |
4681 | |
4682 | #if IS_ENABLED(CONFIG_IPV6) |
4683 | int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, |
4684 | unsigned long val) |
4685 | { |
4686 | /* Currently we support version 1 and 2 */ |
4687 | switch (val) { |
4688 | case 1: |
4689 | case 2: |
4690 | break; |
4691 | default: |
4692 | return -EINVAL; |
4693 | } |
4694 | |
4695 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4696 | brmctx->multicast_mld_version = val; |
4697 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4698 | |
4699 | return 0; |
4700 | } |
4701 | #endif |
4702 | |
4703 | void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, |
4704 | unsigned long val) |
4705 | { |
4706 | unsigned long intvl_jiffies = clock_t_to_jiffies(x: val); |
4707 | |
4708 | if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { |
4709 | br_info(brmctx->br, |
4710 | "trying to set multicast query interval below minimum, setting to %lu (%ums)\n" , |
4711 | jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), |
4712 | jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); |
4713 | intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; |
4714 | } |
4715 | |
4716 | brmctx->multicast_query_interval = intvl_jiffies; |
4717 | } |
4718 | |
4719 | void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, |
4720 | unsigned long val) |
4721 | { |
4722 | unsigned long intvl_jiffies = clock_t_to_jiffies(x: val); |
4723 | |
4724 | if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { |
4725 | br_info(brmctx->br, |
4726 | "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n" , |
4727 | jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), |
4728 | jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); |
4729 | intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; |
4730 | } |
4731 | |
4732 | brmctx->multicast_startup_query_interval = intvl_jiffies; |
4733 | } |
4734 | |
4735 | /** |
4736 | * br_multicast_list_adjacent - Returns snooped multicast addresses |
4737 | * @dev: The bridge port adjacent to which to retrieve addresses |
4738 | * @br_ip_list: The list to store found, snooped multicast IP addresses in |
4739 | * |
4740 | * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast |
4741 | * snooping feature on all bridge ports of dev's bridge device, excluding |
4742 | * the addresses from dev itself. |
4743 | * |
4744 | * Returns the number of items added to br_ip_list. |
4745 | * |
4746 | * Notes: |
4747 | * - br_ip_list needs to be initialized by caller |
4748 | * - br_ip_list might contain duplicates in the end |
4749 | * (needs to be taken care of by caller) |
4750 | * - br_ip_list needs to be freed by caller |
4751 | */ |
4752 | int br_multicast_list_adjacent(struct net_device *dev, |
4753 | struct list_head *br_ip_list) |
4754 | { |
4755 | struct net_bridge *br; |
4756 | struct net_bridge_port *port; |
4757 | struct net_bridge_port_group *group; |
4758 | struct br_ip_list *entry; |
4759 | int count = 0; |
4760 | |
4761 | rcu_read_lock(); |
4762 | if (!br_ip_list || !netif_is_bridge_port(dev)) |
4763 | goto unlock; |
4764 | |
4765 | port = br_port_get_rcu(dev); |
4766 | if (!port || !port->br) |
4767 | goto unlock; |
4768 | |
4769 | br = port->br; |
4770 | |
4771 | list_for_each_entry_rcu(port, &br->port_list, list) { |
4772 | if (!port->dev || port->dev == dev) |
4773 | continue; |
4774 | |
4775 | hlist_for_each_entry_rcu(group, &port->mglist, mglist) { |
4776 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
4777 | if (!entry) |
4778 | goto unlock; |
4779 | |
4780 | entry->addr = group->key.addr; |
4781 | list_add(new: &entry->list, head: br_ip_list); |
4782 | count++; |
4783 | } |
4784 | } |
4785 | |
4786 | unlock: |
4787 | rcu_read_unlock(); |
4788 | return count; |
4789 | } |
4790 | EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); |
4791 | |
4792 | /** |
4793 | * br_multicast_has_querier_anywhere - Checks for a querier on a bridge |
4794 | * @dev: The bridge port providing the bridge on which to check for a querier |
4795 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4796 | * |
4797 | * Checks whether the given interface has a bridge on top and if so returns |
4798 | * true if a valid querier exists anywhere on the bridged link layer. |
4799 | * Otherwise returns false. |
4800 | */ |
4801 | bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) |
4802 | { |
4803 | struct net_bridge *br; |
4804 | struct net_bridge_port *port; |
4805 | struct ethhdr eth; |
4806 | bool ret = false; |
4807 | |
4808 | rcu_read_lock(); |
4809 | if (!netif_is_bridge_port(dev)) |
4810 | goto unlock; |
4811 | |
4812 | port = br_port_get_rcu(dev); |
4813 | if (!port || !port->br) |
4814 | goto unlock; |
4815 | |
4816 | br = port->br; |
4817 | |
4818 | memset(ð, 0, sizeof(eth)); |
4819 | eth.h_proto = htons(proto); |
4820 | |
4821 | ret = br_multicast_querier_exists(brmctx: &br->multicast_ctx, eth: ð, NULL); |
4822 | |
4823 | unlock: |
4824 | rcu_read_unlock(); |
4825 | return ret; |
4826 | } |
4827 | EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); |
4828 | |
4829 | /** |
4830 | * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port |
4831 | * @dev: The bridge port adjacent to which to check for a querier |
4832 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4833 | * |
4834 | * Checks whether the given interface has a bridge on top and if so returns |
4835 | * true if a selected querier is behind one of the other ports of this |
4836 | * bridge. Otherwise returns false. |
4837 | */ |
4838 | bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) |
4839 | { |
4840 | struct net_bridge_mcast *brmctx; |
4841 | struct net_bridge *br; |
4842 | struct net_bridge_port *port; |
4843 | bool ret = false; |
4844 | int port_ifidx; |
4845 | |
4846 | rcu_read_lock(); |
4847 | if (!netif_is_bridge_port(dev)) |
4848 | goto unlock; |
4849 | |
4850 | port = br_port_get_rcu(dev); |
4851 | if (!port || !port->br) |
4852 | goto unlock; |
4853 | |
4854 | br = port->br; |
4855 | brmctx = &br->multicast_ctx; |
4856 | |
4857 | switch (proto) { |
4858 | case ETH_P_IP: |
4859 | port_ifidx = brmctx->ip4_querier.port_ifidx; |
4860 | if (!timer_pending(timer: &brmctx->ip4_other_query.timer) || |
4861 | port_ifidx == port->dev->ifindex) |
4862 | goto unlock; |
4863 | break; |
4864 | #if IS_ENABLED(CONFIG_IPV6) |
4865 | case ETH_P_IPV6: |
4866 | port_ifidx = brmctx->ip6_querier.port_ifidx; |
4867 | if (!timer_pending(timer: &brmctx->ip6_other_query.timer) || |
4868 | port_ifidx == port->dev->ifindex) |
4869 | goto unlock; |
4870 | break; |
4871 | #endif |
4872 | default: |
4873 | goto unlock; |
4874 | } |
4875 | |
4876 | ret = true; |
4877 | unlock: |
4878 | rcu_read_unlock(); |
4879 | return ret; |
4880 | } |
4881 | EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); |
4882 | |
4883 | /** |
4884 | * br_multicast_has_router_adjacent - Checks for a router behind a bridge port |
4885 | * @dev: The bridge port adjacent to which to check for a multicast router |
4886 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4887 | * |
4888 | * Checks whether the given interface has a bridge on top and if so returns |
4889 | * true if a multicast router is behind one of the other ports of this |
4890 | * bridge. Otherwise returns false. |
4891 | */ |
4892 | bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) |
4893 | { |
4894 | struct net_bridge_mcast_port *pmctx; |
4895 | struct net_bridge_mcast *brmctx; |
4896 | struct net_bridge_port *port; |
4897 | bool ret = false; |
4898 | |
4899 | rcu_read_lock(); |
4900 | port = br_port_get_check_rcu(dev); |
4901 | if (!port) |
4902 | goto unlock; |
4903 | |
4904 | brmctx = &port->br->multicast_ctx; |
4905 | switch (proto) { |
4906 | case ETH_P_IP: |
4907 | hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, |
4908 | ip4_rlist) { |
4909 | if (pmctx->port == port) |
4910 | continue; |
4911 | |
4912 | ret = true; |
4913 | goto unlock; |
4914 | } |
4915 | break; |
4916 | #if IS_ENABLED(CONFIG_IPV6) |
4917 | case ETH_P_IPV6: |
4918 | hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, |
4919 | ip6_rlist) { |
4920 | if (pmctx->port == port) |
4921 | continue; |
4922 | |
4923 | ret = true; |
4924 | goto unlock; |
4925 | } |
4926 | break; |
4927 | #endif |
4928 | default: |
4929 | /* when compiled without IPv6 support, be conservative and |
4930 | * always assume presence of an IPv6 multicast router |
4931 | */ |
4932 | ret = true; |
4933 | } |
4934 | |
4935 | unlock: |
4936 | rcu_read_unlock(); |
4937 | return ret; |
4938 | } |
4939 | EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); |
4940 | |
4941 | static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, |
4942 | const struct sk_buff *skb, u8 type, u8 dir) |
4943 | { |
4944 | struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); |
4945 | __be16 proto = skb->protocol; |
4946 | unsigned int t_len; |
4947 | |
4948 | u64_stats_update_begin(syncp: &pstats->syncp); |
4949 | switch (proto) { |
4950 | case htons(ETH_P_IP): |
4951 | t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); |
4952 | switch (type) { |
4953 | case IGMP_HOST_MEMBERSHIP_REPORT: |
4954 | pstats->mstats.igmp_v1reports[dir]++; |
4955 | break; |
4956 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
4957 | pstats->mstats.igmp_v2reports[dir]++; |
4958 | break; |
4959 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
4960 | pstats->mstats.igmp_v3reports[dir]++; |
4961 | break; |
4962 | case IGMP_HOST_MEMBERSHIP_QUERY: |
4963 | if (t_len != sizeof(struct igmphdr)) { |
4964 | pstats->mstats.igmp_v3queries[dir]++; |
4965 | } else { |
4966 | unsigned int offset = skb_transport_offset(skb); |
4967 | struct igmphdr *ih, _ihdr; |
4968 | |
4969 | ih = skb_header_pointer(skb, offset, |
4970 | len: sizeof(_ihdr), buffer: &_ihdr); |
4971 | if (!ih) |
4972 | break; |
4973 | if (!ih->code) |
4974 | pstats->mstats.igmp_v1queries[dir]++; |
4975 | else |
4976 | pstats->mstats.igmp_v2queries[dir]++; |
4977 | } |
4978 | break; |
4979 | case IGMP_HOST_LEAVE_MESSAGE: |
4980 | pstats->mstats.igmp_leaves[dir]++; |
4981 | break; |
4982 | } |
4983 | break; |
4984 | #if IS_ENABLED(CONFIG_IPV6) |
4985 | case htons(ETH_P_IPV6): |
4986 | t_len = ntohs(ipv6_hdr(skb)->payload_len) + |
4987 | sizeof(struct ipv6hdr); |
4988 | t_len -= skb_network_header_len(skb); |
4989 | switch (type) { |
4990 | case ICMPV6_MGM_REPORT: |
4991 | pstats->mstats.mld_v1reports[dir]++; |
4992 | break; |
4993 | case ICMPV6_MLD2_REPORT: |
4994 | pstats->mstats.mld_v2reports[dir]++; |
4995 | break; |
4996 | case ICMPV6_MGM_QUERY: |
4997 | if (t_len != sizeof(struct mld_msg)) |
4998 | pstats->mstats.mld_v2queries[dir]++; |
4999 | else |
5000 | pstats->mstats.mld_v1queries[dir]++; |
5001 | break; |
5002 | case ICMPV6_MGM_REDUCTION: |
5003 | pstats->mstats.mld_leaves[dir]++; |
5004 | break; |
5005 | } |
5006 | break; |
5007 | #endif /* CONFIG_IPV6 */ |
5008 | } |
5009 | u64_stats_update_end(syncp: &pstats->syncp); |
5010 | } |
5011 | |
5012 | void br_multicast_count(struct net_bridge *br, |
5013 | const struct net_bridge_port *p, |
5014 | const struct sk_buff *skb, u8 type, u8 dir) |
5015 | { |
5016 | struct bridge_mcast_stats __percpu *stats; |
5017 | |
5018 | /* if multicast_disabled is true then igmp type can't be set */ |
5019 | if (!type || !br_opt_get(br, opt: BROPT_MULTICAST_STATS_ENABLED)) |
5020 | return; |
5021 | |
5022 | if (p) |
5023 | stats = p->mcast_stats; |
5024 | else |
5025 | stats = br->mcast_stats; |
5026 | if (WARN_ON(!stats)) |
5027 | return; |
5028 | |
5029 | br_mcast_stats_add(stats, skb, type, dir); |
5030 | } |
5031 | |
5032 | int br_multicast_init_stats(struct net_bridge *br) |
5033 | { |
5034 | br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); |
5035 | if (!br->mcast_stats) |
5036 | return -ENOMEM; |
5037 | |
5038 | return 0; |
5039 | } |
5040 | |
5041 | void br_multicast_uninit_stats(struct net_bridge *br) |
5042 | { |
5043 | free_percpu(pdata: br->mcast_stats); |
5044 | } |
5045 | |
5046 | /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ |
5047 | static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) |
5048 | { |
5049 | dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; |
5050 | dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; |
5051 | } |
5052 | |
5053 | void br_multicast_get_stats(const struct net_bridge *br, |
5054 | const struct net_bridge_port *p, |
5055 | struct br_mcast_stats *dest) |
5056 | { |
5057 | struct bridge_mcast_stats __percpu *stats; |
5058 | struct br_mcast_stats tdst; |
5059 | int i; |
5060 | |
5061 | memset(dest, 0, sizeof(*dest)); |
5062 | if (p) |
5063 | stats = p->mcast_stats; |
5064 | else |
5065 | stats = br->mcast_stats; |
5066 | if (WARN_ON(!stats)) |
5067 | return; |
5068 | |
5069 | memset(&tdst, 0, sizeof(tdst)); |
5070 | for_each_possible_cpu(i) { |
5071 | struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); |
5072 | struct br_mcast_stats temp; |
5073 | unsigned int start; |
5074 | |
5075 | do { |
5076 | start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp); |
5077 | memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); |
5078 | } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start)); |
5079 | |
5080 | mcast_stats_add_dir(dst: tdst.igmp_v1queries, src: temp.igmp_v1queries); |
5081 | mcast_stats_add_dir(dst: tdst.igmp_v2queries, src: temp.igmp_v2queries); |
5082 | mcast_stats_add_dir(dst: tdst.igmp_v3queries, src: temp.igmp_v3queries); |
5083 | mcast_stats_add_dir(dst: tdst.igmp_leaves, src: temp.igmp_leaves); |
5084 | mcast_stats_add_dir(dst: tdst.igmp_v1reports, src: temp.igmp_v1reports); |
5085 | mcast_stats_add_dir(dst: tdst.igmp_v2reports, src: temp.igmp_v2reports); |
5086 | mcast_stats_add_dir(dst: tdst.igmp_v3reports, src: temp.igmp_v3reports); |
5087 | tdst.igmp_parse_errors += temp.igmp_parse_errors; |
5088 | |
5089 | mcast_stats_add_dir(dst: tdst.mld_v1queries, src: temp.mld_v1queries); |
5090 | mcast_stats_add_dir(dst: tdst.mld_v2queries, src: temp.mld_v2queries); |
5091 | mcast_stats_add_dir(dst: tdst.mld_leaves, src: temp.mld_leaves); |
5092 | mcast_stats_add_dir(dst: tdst.mld_v1reports, src: temp.mld_v1reports); |
5093 | mcast_stats_add_dir(dst: tdst.mld_v2reports, src: temp.mld_v2reports); |
5094 | tdst.mld_parse_errors += temp.mld_parse_errors; |
5095 | } |
5096 | memcpy(dest, &tdst, sizeof(*dest)); |
5097 | } |
5098 | |
5099 | int br_mdb_hash_init(struct net_bridge *br) |
5100 | { |
5101 | int err; |
5102 | |
5103 | err = rhashtable_init(ht: &br->sg_port_tbl, params: &br_sg_port_rht_params); |
5104 | if (err) |
5105 | return err; |
5106 | |
5107 | err = rhashtable_init(ht: &br->mdb_hash_tbl, params: &br_mdb_rht_params); |
5108 | if (err) { |
5109 | rhashtable_destroy(ht: &br->sg_port_tbl); |
5110 | return err; |
5111 | } |
5112 | |
5113 | return 0; |
5114 | } |
5115 | |
5116 | void br_mdb_hash_fini(struct net_bridge *br) |
5117 | { |
5118 | rhashtable_destroy(ht: &br->sg_port_tbl); |
5119 | rhashtable_destroy(ht: &br->mdb_hash_tbl); |
5120 | } |
5121 | |