1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | |
3 | #include <linux/mrp_bridge.h> |
4 | #include "br_private_mrp.h" |
5 | |
6 | static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 }; |
7 | static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 }; |
8 | |
9 | static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb); |
10 | |
11 | static struct br_frame_type mrp_frame_type __read_mostly = { |
12 | .type = cpu_to_be16(ETH_P_MRP), |
13 | .frame_handler = br_mrp_process, |
14 | }; |
15 | |
16 | static bool br_mrp_is_ring_port(struct net_bridge_port *p_port, |
17 | struct net_bridge_port *s_port, |
18 | struct net_bridge_port *port) |
19 | { |
20 | if (port == p_port || |
21 | port == s_port) |
22 | return true; |
23 | |
24 | return false; |
25 | } |
26 | |
27 | static bool br_mrp_is_in_port(struct net_bridge_port *i_port, |
28 | struct net_bridge_port *port) |
29 | { |
30 | if (port == i_port) |
31 | return true; |
32 | |
33 | return false; |
34 | } |
35 | |
36 | static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br, |
37 | u32 ifindex) |
38 | { |
39 | struct net_bridge_port *res = NULL; |
40 | struct net_bridge_port *port; |
41 | |
42 | list_for_each_entry(port, &br->port_list, list) { |
43 | if (port->dev->ifindex == ifindex) { |
44 | res = port; |
45 | break; |
46 | } |
47 | } |
48 | |
49 | return res; |
50 | } |
51 | |
52 | static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id) |
53 | { |
54 | struct br_mrp *res = NULL; |
55 | struct br_mrp *mrp; |
56 | |
57 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
58 | lockdep_rtnl_is_held()) { |
59 | if (mrp->ring_id == ring_id) { |
60 | res = mrp; |
61 | break; |
62 | } |
63 | } |
64 | |
65 | return res; |
66 | } |
67 | |
68 | static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id) |
69 | { |
70 | struct br_mrp *res = NULL; |
71 | struct br_mrp *mrp; |
72 | |
73 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
74 | lockdep_rtnl_is_held()) { |
75 | if (mrp->in_id == in_id) { |
76 | res = mrp; |
77 | break; |
78 | } |
79 | } |
80 | |
81 | return res; |
82 | } |
83 | |
84 | static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) |
85 | { |
86 | struct br_mrp *mrp; |
87 | |
88 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
89 | lockdep_rtnl_is_held()) { |
90 | struct net_bridge_port *p; |
91 | |
92 | p = rtnl_dereference(mrp->p_port); |
93 | if (p && p->dev->ifindex == ifindex) |
94 | return false; |
95 | |
96 | p = rtnl_dereference(mrp->s_port); |
97 | if (p && p->dev->ifindex == ifindex) |
98 | return false; |
99 | |
100 | p = rtnl_dereference(mrp->i_port); |
101 | if (p && p->dev->ifindex == ifindex) |
102 | return false; |
103 | } |
104 | |
105 | return true; |
106 | } |
107 | |
108 | static struct br_mrp *br_mrp_find_port(struct net_bridge *br, |
109 | struct net_bridge_port *p) |
110 | { |
111 | struct br_mrp *res = NULL; |
112 | struct br_mrp *mrp; |
113 | |
114 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
115 | lockdep_rtnl_is_held()) { |
116 | if (rcu_access_pointer(mrp->p_port) == p || |
117 | rcu_access_pointer(mrp->s_port) == p || |
118 | rcu_access_pointer(mrp->i_port) == p) { |
119 | res = mrp; |
120 | break; |
121 | } |
122 | } |
123 | |
124 | return res; |
125 | } |
126 | |
127 | static int br_mrp_next_seq(struct br_mrp *mrp) |
128 | { |
129 | mrp->seq_id++; |
130 | return mrp->seq_id; |
131 | } |
132 | |
133 | static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p, |
134 | const u8 *src, const u8 *dst) |
135 | { |
136 | struct ethhdr *eth_hdr; |
137 | struct sk_buff *skb; |
138 | __be16 *version; |
139 | |
140 | skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH); |
141 | if (!skb) |
142 | return NULL; |
143 | |
144 | skb->dev = p->dev; |
145 | skb->protocol = htons(ETH_P_MRP); |
146 | skb->priority = MRP_FRAME_PRIO; |
147 | skb_reserve(skb, len: sizeof(*eth_hdr)); |
148 | |
149 | eth_hdr = skb_push(skb, len: sizeof(*eth_hdr)); |
150 | ether_addr_copy(dst: eth_hdr->h_dest, src: dst); |
151 | ether_addr_copy(dst: eth_hdr->h_source, src); |
152 | eth_hdr->h_proto = htons(ETH_P_MRP); |
153 | |
154 | version = skb_put(skb, len: sizeof(*version)); |
155 | *version = cpu_to_be16(MRP_VERSION); |
156 | |
157 | return skb; |
158 | } |
159 | |
160 | static void br_mrp_skb_tlv(struct sk_buff *skb, |
161 | enum br_mrp_tlv_header_type type, |
162 | u8 length) |
163 | { |
164 | struct br_mrp_tlv_hdr *hdr; |
165 | |
166 | hdr = skb_put(skb, len: sizeof(*hdr)); |
167 | hdr->type = type; |
168 | hdr->length = length; |
169 | } |
170 | |
171 | static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp) |
172 | { |
173 | struct br_mrp_common_hdr *hdr; |
174 | |
175 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_COMMON, length: sizeof(*hdr)); |
176 | |
177 | hdr = skb_put(skb, len: sizeof(*hdr)); |
178 | hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp)); |
179 | memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH); |
180 | } |
181 | |
182 | static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp, |
183 | struct net_bridge_port *p, |
184 | enum br_mrp_port_role_type port_role) |
185 | { |
186 | struct br_mrp_ring_test_hdr *hdr = NULL; |
187 | struct sk_buff *skb = NULL; |
188 | |
189 | if (!p) |
190 | return NULL; |
191 | |
192 | skb = br_mrp_skb_alloc(p, src: p->dev->dev_addr, dst: mrp_test_dmac); |
193 | if (!skb) |
194 | return NULL; |
195 | |
196 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_RING_TEST, length: sizeof(*hdr)); |
197 | hdr = skb_put(skb, len: sizeof(*hdr)); |
198 | |
199 | hdr->prio = cpu_to_be16(mrp->prio); |
200 | ether_addr_copy(dst: hdr->sa, src: p->br->dev->dev_addr); |
201 | hdr->port_role = cpu_to_be16(port_role); |
202 | hdr->state = cpu_to_be16(mrp->ring_state); |
203 | hdr->transitions = cpu_to_be16(mrp->ring_transitions); |
204 | hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); |
205 | |
206 | br_mrp_skb_common(skb, mrp); |
207 | |
208 | /* In case the node behaves as MRA then the Test frame needs to have |
209 | * an Option TLV which includes eventually a sub-option TLV that has |
210 | * the type AUTO_MGR |
211 | */ |
212 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { |
213 | struct br_mrp_sub_option1_hdr *sub_opt = NULL; |
214 | struct br_mrp_tlv_hdr *sub_tlv = NULL; |
215 | struct br_mrp_oui_hdr *oui = NULL; |
216 | u8 length; |
217 | |
218 | length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) + |
219 | MRP_OPT_PADDING; |
220 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_OPTION, length); |
221 | |
222 | oui = skb_put(skb, len: sizeof(*oui)); |
223 | memset(oui, 0x0, sizeof(*oui)); |
224 | sub_opt = skb_put(skb, len: sizeof(*sub_opt)); |
225 | memset(sub_opt, 0x0, sizeof(*sub_opt)); |
226 | |
227 | sub_tlv = skb_put(skb, len: sizeof(*sub_tlv)); |
228 | sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR; |
229 | |
230 | /* 32 bit alligment shall be ensured therefore add 2 bytes */ |
231 | skb_put(skb, MRP_OPT_PADDING); |
232 | } |
233 | |
234 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_END, length: 0x0); |
235 | |
236 | return skb; |
237 | } |
238 | |
239 | static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp, |
240 | struct net_bridge_port *p, |
241 | enum br_mrp_port_role_type port_role) |
242 | { |
243 | struct br_mrp_in_test_hdr *hdr = NULL; |
244 | struct sk_buff *skb = NULL; |
245 | |
246 | if (!p) |
247 | return NULL; |
248 | |
249 | skb = br_mrp_skb_alloc(p, src: p->dev->dev_addr, dst: mrp_in_test_dmac); |
250 | if (!skb) |
251 | return NULL; |
252 | |
253 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_IN_TEST, length: sizeof(*hdr)); |
254 | hdr = skb_put(skb, len: sizeof(*hdr)); |
255 | |
256 | hdr->id = cpu_to_be16(mrp->in_id); |
257 | ether_addr_copy(dst: hdr->sa, src: p->br->dev->dev_addr); |
258 | hdr->port_role = cpu_to_be16(port_role); |
259 | hdr->state = cpu_to_be16(mrp->in_state); |
260 | hdr->transitions = cpu_to_be16(mrp->in_transitions); |
261 | hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); |
262 | |
263 | br_mrp_skb_common(skb, mrp); |
264 | br_mrp_skb_tlv(skb, type: BR_MRP_TLV_HEADER_END, length: 0x0); |
265 | |
266 | return skb; |
267 | } |
268 | |
269 | /* This function is continuously called in the following cases: |
270 | * - when node role is MRM, in this case test_monitor is always set to false |
271 | * because it needs to notify the userspace that the ring is open and needs to |
272 | * send MRP_Test frames |
273 | * - when node role is MRA, there are 2 subcases: |
274 | * - when MRA behaves as MRM, in this case is similar with MRM role |
275 | * - when MRA behaves as MRC, in this case test_monitor is set to true, |
276 | * because it needs to detect when it stops seeing MRP_Test frames |
277 | * from MRM node but it doesn't need to send MRP_Test frames. |
278 | */ |
279 | static void br_mrp_test_work_expired(struct work_struct *work) |
280 | { |
281 | struct delayed_work *del_work = to_delayed_work(work); |
282 | struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work); |
283 | struct net_bridge_port *p; |
284 | bool notify_open = false; |
285 | struct sk_buff *skb; |
286 | |
287 | if (time_before_eq(mrp->test_end, jiffies)) |
288 | return; |
289 | |
290 | if (mrp->test_count_miss < mrp->test_max_miss) { |
291 | mrp->test_count_miss++; |
292 | } else { |
293 | /* Notify that the ring is open only if the ring state is |
294 | * closed, otherwise it would continue to notify at every |
295 | * interval. |
296 | * Also notify that the ring is open when the node has the |
297 | * role MRA and behaves as MRC. The reason is that the |
298 | * userspace needs to know when the MRM stopped sending |
299 | * MRP_Test frames so that the current node to try to take |
300 | * the role of a MRM. |
301 | */ |
302 | if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED || |
303 | mrp->test_monitor) |
304 | notify_open = true; |
305 | } |
306 | |
307 | rcu_read_lock(); |
308 | |
309 | p = rcu_dereference(mrp->p_port); |
310 | if (p) { |
311 | if (!mrp->test_monitor) { |
312 | skb = br_mrp_alloc_test_skb(mrp, p, |
313 | port_role: BR_MRP_PORT_ROLE_PRIMARY); |
314 | if (!skb) |
315 | goto out; |
316 | |
317 | skb_reset_network_header(skb); |
318 | dev_queue_xmit(skb); |
319 | } |
320 | |
321 | if (notify_open && !mrp->ring_role_offloaded) |
322 | br_mrp_ring_port_open(dev: p->dev, loc: true); |
323 | } |
324 | |
325 | p = rcu_dereference(mrp->s_port); |
326 | if (p) { |
327 | if (!mrp->test_monitor) { |
328 | skb = br_mrp_alloc_test_skb(mrp, p, |
329 | port_role: BR_MRP_PORT_ROLE_SECONDARY); |
330 | if (!skb) |
331 | goto out; |
332 | |
333 | skb_reset_network_header(skb); |
334 | dev_queue_xmit(skb); |
335 | } |
336 | |
337 | if (notify_open && !mrp->ring_role_offloaded) |
338 | br_mrp_ring_port_open(dev: p->dev, loc: true); |
339 | } |
340 | |
341 | out: |
342 | rcu_read_unlock(); |
343 | |
344 | queue_delayed_work(wq: system_wq, dwork: &mrp->test_work, |
345 | delay: usecs_to_jiffies(u: mrp->test_interval)); |
346 | } |
347 | |
348 | /* This function is continuously called when the node has the interconnect role |
349 | * MIM. It would generate interconnect test frames and will send them on all 3 |
350 | * ports. But will also check if it stop receiving interconnect test frames. |
351 | */ |
352 | static void br_mrp_in_test_work_expired(struct work_struct *work) |
353 | { |
354 | struct delayed_work *del_work = to_delayed_work(work); |
355 | struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work); |
356 | struct net_bridge_port *p; |
357 | bool notify_open = false; |
358 | struct sk_buff *skb; |
359 | |
360 | if (time_before_eq(mrp->in_test_end, jiffies)) |
361 | return; |
362 | |
363 | if (mrp->in_test_count_miss < mrp->in_test_max_miss) { |
364 | mrp->in_test_count_miss++; |
365 | } else { |
366 | /* Notify that the interconnect ring is open only if the |
367 | * interconnect ring state is closed, otherwise it would |
368 | * continue to notify at every interval. |
369 | */ |
370 | if (mrp->in_state == BR_MRP_IN_STATE_CLOSED) |
371 | notify_open = true; |
372 | } |
373 | |
374 | rcu_read_lock(); |
375 | |
376 | p = rcu_dereference(mrp->p_port); |
377 | if (p) { |
378 | skb = br_mrp_alloc_in_test_skb(mrp, p, |
379 | port_role: BR_MRP_PORT_ROLE_PRIMARY); |
380 | if (!skb) |
381 | goto out; |
382 | |
383 | skb_reset_network_header(skb); |
384 | dev_queue_xmit(skb); |
385 | |
386 | if (notify_open && !mrp->in_role_offloaded) |
387 | br_mrp_in_port_open(dev: p->dev, loc: true); |
388 | } |
389 | |
390 | p = rcu_dereference(mrp->s_port); |
391 | if (p) { |
392 | skb = br_mrp_alloc_in_test_skb(mrp, p, |
393 | port_role: BR_MRP_PORT_ROLE_SECONDARY); |
394 | if (!skb) |
395 | goto out; |
396 | |
397 | skb_reset_network_header(skb); |
398 | dev_queue_xmit(skb); |
399 | |
400 | if (notify_open && !mrp->in_role_offloaded) |
401 | br_mrp_in_port_open(dev: p->dev, loc: true); |
402 | } |
403 | |
404 | p = rcu_dereference(mrp->i_port); |
405 | if (p) { |
406 | skb = br_mrp_alloc_in_test_skb(mrp, p, |
407 | port_role: BR_MRP_PORT_ROLE_INTER); |
408 | if (!skb) |
409 | goto out; |
410 | |
411 | skb_reset_network_header(skb); |
412 | dev_queue_xmit(skb); |
413 | |
414 | if (notify_open && !mrp->in_role_offloaded) |
415 | br_mrp_in_port_open(dev: p->dev, loc: true); |
416 | } |
417 | |
418 | out: |
419 | rcu_read_unlock(); |
420 | |
421 | queue_delayed_work(wq: system_wq, dwork: &mrp->in_test_work, |
422 | delay: usecs_to_jiffies(u: mrp->in_test_interval)); |
423 | } |
424 | |
425 | /* Deletes the MRP instance. |
426 | * note: called under rtnl_lock |
427 | */ |
428 | static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) |
429 | { |
430 | struct net_bridge_port *p; |
431 | u8 state; |
432 | |
433 | /* Stop sending MRP_Test frames */ |
434 | cancel_delayed_work_sync(dwork: &mrp->test_work); |
435 | br_mrp_switchdev_send_ring_test(br, mrp, interval: 0, max_miss: 0, period: 0, monitor: 0); |
436 | |
437 | /* Stop sending MRP_InTest frames if has an interconnect role */ |
438 | cancel_delayed_work_sync(dwork: &mrp->in_test_work); |
439 | br_mrp_switchdev_send_in_test(br, mrp, interval: 0, max_miss: 0, period: 0); |
440 | |
441 | /* Disable the roles */ |
442 | br_mrp_switchdev_set_ring_role(br, mrp, role: BR_MRP_RING_ROLE_DISABLED); |
443 | p = rtnl_dereference(mrp->i_port); |
444 | if (p) |
445 | br_mrp_switchdev_set_in_role(br, mrp, in_id: mrp->in_id, ring_id: mrp->ring_id, |
446 | role: BR_MRP_IN_ROLE_DISABLED); |
447 | |
448 | br_mrp_switchdev_del(br, mrp); |
449 | |
450 | /* Reset the ports */ |
451 | p = rtnl_dereference(mrp->p_port); |
452 | if (p) { |
453 | spin_lock_bh(lock: &br->lock); |
454 | state = netif_running(dev: br->dev) ? |
455 | BR_STATE_FORWARDING : BR_STATE_DISABLED; |
456 | p->state = state; |
457 | p->flags &= ~BR_MRP_AWARE; |
458 | spin_unlock_bh(lock: &br->lock); |
459 | br_mrp_port_switchdev_set_state(p, state); |
460 | rcu_assign_pointer(mrp->p_port, NULL); |
461 | } |
462 | |
463 | p = rtnl_dereference(mrp->s_port); |
464 | if (p) { |
465 | spin_lock_bh(lock: &br->lock); |
466 | state = netif_running(dev: br->dev) ? |
467 | BR_STATE_FORWARDING : BR_STATE_DISABLED; |
468 | p->state = state; |
469 | p->flags &= ~BR_MRP_AWARE; |
470 | spin_unlock_bh(lock: &br->lock); |
471 | br_mrp_port_switchdev_set_state(p, state); |
472 | rcu_assign_pointer(mrp->s_port, NULL); |
473 | } |
474 | |
475 | p = rtnl_dereference(mrp->i_port); |
476 | if (p) { |
477 | spin_lock_bh(lock: &br->lock); |
478 | state = netif_running(dev: br->dev) ? |
479 | BR_STATE_FORWARDING : BR_STATE_DISABLED; |
480 | p->state = state; |
481 | p->flags &= ~BR_MRP_AWARE; |
482 | spin_unlock_bh(lock: &br->lock); |
483 | br_mrp_port_switchdev_set_state(p, state); |
484 | rcu_assign_pointer(mrp->i_port, NULL); |
485 | } |
486 | |
487 | hlist_del_rcu(n: &mrp->list); |
488 | kfree_rcu(mrp, rcu); |
489 | |
490 | if (hlist_empty(h: &br->mrp_list)) |
491 | br_del_frame(br, ft: &mrp_frame_type); |
492 | } |
493 | |
494 | /* Adds a new MRP instance. |
495 | * note: called under rtnl_lock |
496 | */ |
497 | int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance) |
498 | { |
499 | struct net_bridge_port *p; |
500 | struct br_mrp *mrp; |
501 | int err; |
502 | |
503 | /* If the ring exists, it is not possible to create another one with the |
504 | * same ring_id |
505 | */ |
506 | mrp = br_mrp_find_id(br, ring_id: instance->ring_id); |
507 | if (mrp) |
508 | return -EINVAL; |
509 | |
510 | if (!br_mrp_get_port(br, ifindex: instance->p_ifindex) || |
511 | !br_mrp_get_port(br, ifindex: instance->s_ifindex)) |
512 | return -EINVAL; |
513 | |
514 | /* It is not possible to have the same port part of multiple rings */ |
515 | if (!br_mrp_unique_ifindex(br, ifindex: instance->p_ifindex) || |
516 | !br_mrp_unique_ifindex(br, ifindex: instance->s_ifindex)) |
517 | return -EINVAL; |
518 | |
519 | mrp = kzalloc(size: sizeof(*mrp), GFP_KERNEL); |
520 | if (!mrp) |
521 | return -ENOMEM; |
522 | |
523 | mrp->ring_id = instance->ring_id; |
524 | mrp->prio = instance->prio; |
525 | |
526 | p = br_mrp_get_port(br, ifindex: instance->p_ifindex); |
527 | spin_lock_bh(lock: &br->lock); |
528 | p->state = BR_STATE_FORWARDING; |
529 | p->flags |= BR_MRP_AWARE; |
530 | spin_unlock_bh(lock: &br->lock); |
531 | rcu_assign_pointer(mrp->p_port, p); |
532 | |
533 | p = br_mrp_get_port(br, ifindex: instance->s_ifindex); |
534 | spin_lock_bh(lock: &br->lock); |
535 | p->state = BR_STATE_FORWARDING; |
536 | p->flags |= BR_MRP_AWARE; |
537 | spin_unlock_bh(lock: &br->lock); |
538 | rcu_assign_pointer(mrp->s_port, p); |
539 | |
540 | if (hlist_empty(h: &br->mrp_list)) |
541 | br_add_frame(br, ft: &mrp_frame_type); |
542 | |
543 | INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired); |
544 | INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired); |
545 | hlist_add_tail_rcu(n: &mrp->list, h: &br->mrp_list); |
546 | |
547 | err = br_mrp_switchdev_add(br, mrp); |
548 | if (err) |
549 | goto delete_mrp; |
550 | |
551 | return 0; |
552 | |
553 | delete_mrp: |
554 | br_mrp_del_impl(br, mrp); |
555 | |
556 | return err; |
557 | } |
558 | |
559 | /* Deletes the MRP instance from which the port is part of |
560 | * note: called under rtnl_lock |
561 | */ |
562 | void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p) |
563 | { |
564 | struct br_mrp *mrp = br_mrp_find_port(br, p); |
565 | |
566 | /* If the port is not part of a MRP instance just bail out */ |
567 | if (!mrp) |
568 | return; |
569 | |
570 | br_mrp_del_impl(br, mrp); |
571 | } |
572 | |
573 | /* Deletes existing MRP instance based on ring_id |
574 | * note: called under rtnl_lock |
575 | */ |
576 | int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance) |
577 | { |
578 | struct br_mrp *mrp = br_mrp_find_id(br, ring_id: instance->ring_id); |
579 | |
580 | if (!mrp) |
581 | return -EINVAL; |
582 | |
583 | br_mrp_del_impl(br, mrp); |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | /* Set port state, port state can be forwarding, blocked or disabled |
589 | * note: already called with rtnl_lock |
590 | */ |
591 | int br_mrp_set_port_state(struct net_bridge_port *p, |
592 | enum br_mrp_port_state_type state) |
593 | { |
594 | u32 port_state; |
595 | |
596 | if (!p || !(p->flags & BR_MRP_AWARE)) |
597 | return -EINVAL; |
598 | |
599 | spin_lock_bh(lock: &p->br->lock); |
600 | |
601 | if (state == BR_MRP_PORT_STATE_FORWARDING) |
602 | port_state = BR_STATE_FORWARDING; |
603 | else |
604 | port_state = BR_STATE_BLOCKING; |
605 | |
606 | p->state = port_state; |
607 | spin_unlock_bh(lock: &p->br->lock); |
608 | |
609 | br_mrp_port_switchdev_set_state(p, state: port_state); |
610 | |
611 | return 0; |
612 | } |
613 | |
614 | /* Set port role, port role can be primary or secondary |
615 | * note: already called with rtnl_lock |
616 | */ |
617 | int br_mrp_set_port_role(struct net_bridge_port *p, |
618 | enum br_mrp_port_role_type role) |
619 | { |
620 | struct br_mrp *mrp; |
621 | |
622 | if (!p || !(p->flags & BR_MRP_AWARE)) |
623 | return -EINVAL; |
624 | |
625 | mrp = br_mrp_find_port(br: p->br, p); |
626 | |
627 | if (!mrp) |
628 | return -EINVAL; |
629 | |
630 | switch (role) { |
631 | case BR_MRP_PORT_ROLE_PRIMARY: |
632 | rcu_assign_pointer(mrp->p_port, p); |
633 | break; |
634 | case BR_MRP_PORT_ROLE_SECONDARY: |
635 | rcu_assign_pointer(mrp->s_port, p); |
636 | break; |
637 | default: |
638 | return -EINVAL; |
639 | } |
640 | |
641 | br_mrp_port_switchdev_set_role(p, role); |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | /* Set ring state, ring state can be only Open or Closed |
647 | * note: already called with rtnl_lock |
648 | */ |
649 | int br_mrp_set_ring_state(struct net_bridge *br, |
650 | struct br_mrp_ring_state *state) |
651 | { |
652 | struct br_mrp *mrp = br_mrp_find_id(br, ring_id: state->ring_id); |
653 | |
654 | if (!mrp) |
655 | return -EINVAL; |
656 | |
657 | if (mrp->ring_state != state->ring_state) |
658 | mrp->ring_transitions++; |
659 | |
660 | mrp->ring_state = state->ring_state; |
661 | |
662 | br_mrp_switchdev_set_ring_state(br, mrp, state: state->ring_state); |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or |
668 | * MRC(Media Redundancy Client). |
669 | * note: already called with rtnl_lock |
670 | */ |
671 | int br_mrp_set_ring_role(struct net_bridge *br, |
672 | struct br_mrp_ring_role *role) |
673 | { |
674 | struct br_mrp *mrp = br_mrp_find_id(br, ring_id: role->ring_id); |
675 | enum br_mrp_hw_support support; |
676 | |
677 | if (!mrp) |
678 | return -EINVAL; |
679 | |
680 | mrp->ring_role = role->ring_role; |
681 | |
682 | /* If there is an error just bailed out */ |
683 | support = br_mrp_switchdev_set_ring_role(br, mrp, role: role->ring_role); |
684 | if (support == BR_MRP_NONE) |
685 | return -EOPNOTSUPP; |
686 | |
687 | /* Now detect if the HW actually applied the role or not. If the HW |
688 | * applied the role it means that the SW will not to do those operations |
689 | * anymore. For example if the role ir MRM then the HW will notify the |
690 | * SW when ring is open, but if the is not pushed to the HW the SW will |
691 | * need to detect when the ring is open |
692 | */ |
693 | mrp->ring_role_offloaded = support == BR_MRP_SW ? 0 : 1; |
694 | |
695 | return 0; |
696 | } |
697 | |
698 | /* Start to generate or monitor MRP test frames, the frames are generated by |
699 | * HW and if it fails, they are generated by the SW. |
700 | * note: already called with rtnl_lock |
701 | */ |
702 | int br_mrp_start_test(struct net_bridge *br, |
703 | struct br_mrp_start_test *test) |
704 | { |
705 | struct br_mrp *mrp = br_mrp_find_id(br, ring_id: test->ring_id); |
706 | enum br_mrp_hw_support support; |
707 | |
708 | if (!mrp) |
709 | return -EINVAL; |
710 | |
711 | /* Try to push it to the HW and if it fails then continue with SW |
712 | * implementation and if that also fails then return error. |
713 | */ |
714 | support = br_mrp_switchdev_send_ring_test(br, mrp, interval: test->interval, |
715 | max_miss: test->max_miss, period: test->period, |
716 | monitor: test->monitor); |
717 | if (support == BR_MRP_NONE) |
718 | return -EOPNOTSUPP; |
719 | |
720 | if (support == BR_MRP_HW) |
721 | return 0; |
722 | |
723 | mrp->test_interval = test->interval; |
724 | mrp->test_end = jiffies + usecs_to_jiffies(u: test->period); |
725 | mrp->test_max_miss = test->max_miss; |
726 | mrp->test_monitor = test->monitor; |
727 | mrp->test_count_miss = 0; |
728 | queue_delayed_work(wq: system_wq, dwork: &mrp->test_work, |
729 | delay: usecs_to_jiffies(u: test->interval)); |
730 | |
731 | return 0; |
732 | } |
733 | |
734 | /* Set in state, int state can be only Open or Closed |
735 | * note: already called with rtnl_lock |
736 | */ |
737 | int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state) |
738 | { |
739 | struct br_mrp *mrp = br_mrp_find_in_id(br, in_id: state->in_id); |
740 | |
741 | if (!mrp) |
742 | return -EINVAL; |
743 | |
744 | if (mrp->in_state != state->in_state) |
745 | mrp->in_transitions++; |
746 | |
747 | mrp->in_state = state->in_state; |
748 | |
749 | br_mrp_switchdev_set_in_state(br, mrp, state: state->in_state); |
750 | |
751 | return 0; |
752 | } |
753 | |
754 | /* Set in role, in role can be only MIM(Media Interconnection Manager) or |
755 | * MIC(Media Interconnection Client). |
756 | * note: already called with rtnl_lock |
757 | */ |
758 | int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role) |
759 | { |
760 | struct br_mrp *mrp = br_mrp_find_id(br, ring_id: role->ring_id); |
761 | enum br_mrp_hw_support support; |
762 | struct net_bridge_port *p; |
763 | |
764 | if (!mrp) |
765 | return -EINVAL; |
766 | |
767 | if (!br_mrp_get_port(br, ifindex: role->i_ifindex)) |
768 | return -EINVAL; |
769 | |
770 | if (role->in_role == BR_MRP_IN_ROLE_DISABLED) { |
771 | u8 state; |
772 | |
773 | /* It is not allowed to disable a port that doesn't exist */ |
774 | p = rtnl_dereference(mrp->i_port); |
775 | if (!p) |
776 | return -EINVAL; |
777 | |
778 | /* Stop the generating MRP_InTest frames */ |
779 | cancel_delayed_work_sync(dwork: &mrp->in_test_work); |
780 | br_mrp_switchdev_send_in_test(br, mrp, interval: 0, max_miss: 0, period: 0); |
781 | |
782 | /* Remove the port */ |
783 | spin_lock_bh(lock: &br->lock); |
784 | state = netif_running(dev: br->dev) ? |
785 | BR_STATE_FORWARDING : BR_STATE_DISABLED; |
786 | p->state = state; |
787 | p->flags &= ~BR_MRP_AWARE; |
788 | spin_unlock_bh(lock: &br->lock); |
789 | br_mrp_port_switchdev_set_state(p, state); |
790 | rcu_assign_pointer(mrp->i_port, NULL); |
791 | |
792 | mrp->in_role = role->in_role; |
793 | mrp->in_id = 0; |
794 | |
795 | return 0; |
796 | } |
797 | |
798 | /* It is not possible to have the same port part of multiple rings */ |
799 | if (!br_mrp_unique_ifindex(br, ifindex: role->i_ifindex)) |
800 | return -EINVAL; |
801 | |
802 | /* It is not allowed to set a different interconnect port if the mrp |
803 | * instance has already one. First it needs to be disabled and after |
804 | * that set the new port |
805 | */ |
806 | if (rcu_access_pointer(mrp->i_port)) |
807 | return -EINVAL; |
808 | |
809 | p = br_mrp_get_port(br, ifindex: role->i_ifindex); |
810 | spin_lock_bh(lock: &br->lock); |
811 | p->state = BR_STATE_FORWARDING; |
812 | p->flags |= BR_MRP_AWARE; |
813 | spin_unlock_bh(lock: &br->lock); |
814 | rcu_assign_pointer(mrp->i_port, p); |
815 | |
816 | mrp->in_role = role->in_role; |
817 | mrp->in_id = role->in_id; |
818 | |
819 | /* If there is an error just bailed out */ |
820 | support = br_mrp_switchdev_set_in_role(br, mrp, in_id: role->in_id, |
821 | ring_id: role->ring_id, role: role->in_role); |
822 | if (support == BR_MRP_NONE) |
823 | return -EOPNOTSUPP; |
824 | |
825 | /* Now detect if the HW actually applied the role or not. If the HW |
826 | * applied the role it means that the SW will not to do those operations |
827 | * anymore. For example if the role is MIM then the HW will notify the |
828 | * SW when interconnect ring is open, but if the is not pushed to the HW |
829 | * the SW will need to detect when the interconnect ring is open. |
830 | */ |
831 | mrp->in_role_offloaded = support == BR_MRP_SW ? 0 : 1; |
832 | |
833 | return 0; |
834 | } |
835 | |
836 | /* Start to generate MRP_InTest frames, the frames are generated by |
837 | * HW and if it fails, they are generated by the SW. |
838 | * note: already called with rtnl_lock |
839 | */ |
840 | int br_mrp_start_in_test(struct net_bridge *br, |
841 | struct br_mrp_start_in_test *in_test) |
842 | { |
843 | struct br_mrp *mrp = br_mrp_find_in_id(br, in_id: in_test->in_id); |
844 | enum br_mrp_hw_support support; |
845 | |
846 | if (!mrp) |
847 | return -EINVAL; |
848 | |
849 | if (mrp->in_role != BR_MRP_IN_ROLE_MIM) |
850 | return -EINVAL; |
851 | |
852 | /* Try to push it to the HW and if it fails then continue with SW |
853 | * implementation and if that also fails then return error. |
854 | */ |
855 | support = br_mrp_switchdev_send_in_test(br, mrp, interval: in_test->interval, |
856 | max_miss: in_test->max_miss, |
857 | period: in_test->period); |
858 | if (support == BR_MRP_NONE) |
859 | return -EOPNOTSUPP; |
860 | |
861 | if (support == BR_MRP_HW) |
862 | return 0; |
863 | |
864 | mrp->in_test_interval = in_test->interval; |
865 | mrp->in_test_end = jiffies + usecs_to_jiffies(u: in_test->period); |
866 | mrp->in_test_max_miss = in_test->max_miss; |
867 | mrp->in_test_count_miss = 0; |
868 | queue_delayed_work(wq: system_wq, dwork: &mrp->in_test_work, |
869 | delay: usecs_to_jiffies(u: in_test->interval)); |
870 | |
871 | return 0; |
872 | } |
873 | |
874 | /* Determine if the frame type is a ring frame */ |
875 | static bool br_mrp_ring_frame(struct sk_buff *skb) |
876 | { |
877 | const struct br_mrp_tlv_hdr *hdr; |
878 | struct br_mrp_tlv_hdr _hdr; |
879 | |
880 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
881 | if (!hdr) |
882 | return false; |
883 | |
884 | if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST || |
885 | hdr->type == BR_MRP_TLV_HEADER_RING_TOPO || |
886 | hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN || |
887 | hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP || |
888 | hdr->type == BR_MRP_TLV_HEADER_OPTION) |
889 | return true; |
890 | |
891 | return false; |
892 | } |
893 | |
894 | /* Determine if the frame type is an interconnect frame */ |
895 | static bool br_mrp_in_frame(struct sk_buff *skb) |
896 | { |
897 | const struct br_mrp_tlv_hdr *hdr; |
898 | struct br_mrp_tlv_hdr _hdr; |
899 | |
900 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
901 | if (!hdr) |
902 | return false; |
903 | |
904 | if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST || |
905 | hdr->type == BR_MRP_TLV_HEADER_IN_TOPO || |
906 | hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN || |
907 | hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP || |
908 | hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) |
909 | return true; |
910 | |
911 | return false; |
912 | } |
913 | |
914 | /* Process only MRP Test frame. All the other MRP frames are processed by |
915 | * userspace application |
916 | * note: already called with rcu_read_lock |
917 | */ |
918 | static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port, |
919 | struct sk_buff *skb) |
920 | { |
921 | const struct br_mrp_tlv_hdr *hdr; |
922 | struct br_mrp_tlv_hdr _hdr; |
923 | |
924 | /* Each MRP header starts with a version field which is 16 bits. |
925 | * Therefore skip the version and get directly the TLV header. |
926 | */ |
927 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
928 | if (!hdr) |
929 | return; |
930 | |
931 | if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) |
932 | return; |
933 | |
934 | mrp->test_count_miss = 0; |
935 | |
936 | /* Notify the userspace that the ring is closed only when the ring is |
937 | * not closed |
938 | */ |
939 | if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED) |
940 | br_mrp_ring_port_open(dev: port->dev, loc: false); |
941 | } |
942 | |
943 | /* Determine if the test hdr has a better priority than the node */ |
944 | static bool br_mrp_test_better_than_own(struct br_mrp *mrp, |
945 | struct net_bridge *br, |
946 | const struct br_mrp_ring_test_hdr *hdr) |
947 | { |
948 | u16 prio = be16_to_cpu(hdr->prio); |
949 | |
950 | if (prio < mrp->prio || |
951 | (prio == mrp->prio && |
952 | ether_addr_to_u64(addr: hdr->sa) < ether_addr_to_u64(addr: br->dev->dev_addr))) |
953 | return true; |
954 | |
955 | return false; |
956 | } |
957 | |
958 | /* Process only MRP Test frame. All the other MRP frames are processed by |
959 | * userspace application |
960 | * note: already called with rcu_read_lock |
961 | */ |
962 | static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br, |
963 | struct net_bridge_port *port, |
964 | struct sk_buff *skb) |
965 | { |
966 | const struct br_mrp_ring_test_hdr *test_hdr; |
967 | struct br_mrp_ring_test_hdr _test_hdr; |
968 | const struct br_mrp_tlv_hdr *hdr; |
969 | struct br_mrp_tlv_hdr _hdr; |
970 | |
971 | /* Each MRP header starts with a version field which is 16 bits. |
972 | * Therefore skip the version and get directly the TLV header. |
973 | */ |
974 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
975 | if (!hdr) |
976 | return; |
977 | |
978 | if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) |
979 | return; |
980 | |
981 | test_hdr = skb_header_pointer(skb, offset: sizeof(uint16_t) + sizeof(_hdr), |
982 | len: sizeof(_test_hdr), buffer: &_test_hdr); |
983 | if (!test_hdr) |
984 | return; |
985 | |
986 | /* Only frames that have a better priority than the node will |
987 | * clear the miss counter because otherwise the node will need to behave |
988 | * as MRM. |
989 | */ |
990 | if (br_mrp_test_better_than_own(mrp, br, hdr: test_hdr)) |
991 | mrp->test_count_miss = 0; |
992 | } |
993 | |
994 | /* Process only MRP InTest frame. All the other MRP frames are processed by |
995 | * userspace application |
996 | * note: already called with rcu_read_lock |
997 | */ |
998 | static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port, |
999 | struct sk_buff *skb) |
1000 | { |
1001 | const struct br_mrp_in_test_hdr *in_hdr; |
1002 | struct br_mrp_in_test_hdr _in_hdr; |
1003 | const struct br_mrp_tlv_hdr *hdr; |
1004 | struct br_mrp_tlv_hdr _hdr; |
1005 | |
1006 | /* Each MRP header starts with a version field which is 16 bits. |
1007 | * Therefore skip the version and get directly the TLV header. |
1008 | */ |
1009 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
1010 | if (!hdr) |
1011 | return false; |
1012 | |
1013 | /* The check for InTest frame type was already done */ |
1014 | in_hdr = skb_header_pointer(skb, offset: sizeof(uint16_t) + sizeof(_hdr), |
1015 | len: sizeof(_in_hdr), buffer: &_in_hdr); |
1016 | if (!in_hdr) |
1017 | return false; |
1018 | |
1019 | /* It needs to process only it's own InTest frames. */ |
1020 | if (mrp->in_id != ntohs(in_hdr->id)) |
1021 | return false; |
1022 | |
1023 | mrp->in_test_count_miss = 0; |
1024 | |
1025 | /* Notify the userspace that the ring is closed only when the ring is |
1026 | * not closed |
1027 | */ |
1028 | if (mrp->in_state != BR_MRP_IN_STATE_CLOSED) |
1029 | br_mrp_in_port_open(dev: port->dev, loc: false); |
1030 | |
1031 | return true; |
1032 | } |
1033 | |
1034 | /* Get the MRP frame type |
1035 | * note: already called with rcu_read_lock |
1036 | */ |
1037 | static u8 br_mrp_get_frame_type(struct sk_buff *skb) |
1038 | { |
1039 | const struct br_mrp_tlv_hdr *hdr; |
1040 | struct br_mrp_tlv_hdr _hdr; |
1041 | |
1042 | /* Each MRP header starts with a version field which is 16 bits. |
1043 | * Therefore skip the version and get directly the TLV header. |
1044 | */ |
1045 | hdr = skb_header_pointer(skb, offset: sizeof(uint16_t), len: sizeof(_hdr), buffer: &_hdr); |
1046 | if (!hdr) |
1047 | return 0xff; |
1048 | |
1049 | return hdr->type; |
1050 | } |
1051 | |
1052 | static bool br_mrp_mrm_behaviour(struct br_mrp *mrp) |
1053 | { |
1054 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRM || |
1055 | (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor)) |
1056 | return true; |
1057 | |
1058 | return false; |
1059 | } |
1060 | |
1061 | static bool br_mrp_mrc_behaviour(struct br_mrp *mrp) |
1062 | { |
1063 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRC || |
1064 | (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor)) |
1065 | return true; |
1066 | |
1067 | return false; |
1068 | } |
1069 | |
1070 | /* This will just forward the frame to the other mrp ring ports, depending on |
1071 | * the frame type, ring role and interconnect role |
1072 | * note: already called with rcu_read_lock |
1073 | */ |
1074 | static int br_mrp_rcv(struct net_bridge_port *p, |
1075 | struct sk_buff *skb, struct net_device *dev) |
1076 | { |
1077 | struct net_bridge_port *p_port, *s_port, *i_port = NULL; |
1078 | struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL; |
1079 | struct net_bridge *br; |
1080 | struct br_mrp *mrp; |
1081 | |
1082 | /* If port is disabled don't accept any frames */ |
1083 | if (p->state == BR_STATE_DISABLED) |
1084 | return 0; |
1085 | |
1086 | br = p->br; |
1087 | mrp = br_mrp_find_port(br, p); |
1088 | if (unlikely(!mrp)) |
1089 | return 0; |
1090 | |
1091 | p_port = rcu_dereference(mrp->p_port); |
1092 | if (!p_port) |
1093 | return 0; |
1094 | p_dst = p_port; |
1095 | |
1096 | s_port = rcu_dereference(mrp->s_port); |
1097 | if (!s_port) |
1098 | return 0; |
1099 | s_dst = s_port; |
1100 | |
1101 | /* If the frame is a ring frame then it is not required to check the |
1102 | * interconnect role and ports to process or forward the frame |
1103 | */ |
1104 | if (br_mrp_ring_frame(skb)) { |
1105 | /* If the role is MRM then don't forward the frames */ |
1106 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { |
1107 | br_mrp_mrm_process(mrp, port: p, skb); |
1108 | goto no_forward; |
1109 | } |
1110 | |
1111 | /* If the role is MRA then don't forward the frames if it |
1112 | * behaves as MRM node |
1113 | */ |
1114 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { |
1115 | if (!mrp->test_monitor) { |
1116 | br_mrp_mrm_process(mrp, port: p, skb); |
1117 | goto no_forward; |
1118 | } |
1119 | |
1120 | br_mrp_mra_process(mrp, br, port: p, skb); |
1121 | } |
1122 | |
1123 | goto forward; |
1124 | } |
1125 | |
1126 | if (br_mrp_in_frame(skb)) { |
1127 | u8 in_type = br_mrp_get_frame_type(skb); |
1128 | |
1129 | i_port = rcu_dereference(mrp->i_port); |
1130 | i_dst = i_port; |
1131 | |
1132 | /* If the ring port is in block state it should not forward |
1133 | * In_Test frames |
1134 | */ |
1135 | if (br_mrp_is_ring_port(p_port, s_port, port: p) && |
1136 | p->state == BR_STATE_BLOCKING && |
1137 | in_type == BR_MRP_TLV_HEADER_IN_TEST) |
1138 | goto no_forward; |
1139 | |
1140 | /* Nodes that behaves as MRM needs to stop forwarding the |
1141 | * frames in case the ring is closed, otherwise will be a loop. |
1142 | * In this case the frame is no forward between the ring ports. |
1143 | */ |
1144 | if (br_mrp_mrm_behaviour(mrp) && |
1145 | br_mrp_is_ring_port(p_port, s_port, port: p) && |
1146 | (s_port->state != BR_STATE_FORWARDING || |
1147 | p_port->state != BR_STATE_FORWARDING)) { |
1148 | p_dst = NULL; |
1149 | s_dst = NULL; |
1150 | } |
1151 | |
1152 | /* A node that behaves as MRC and doesn't have a interconnect |
1153 | * role then it should forward all frames between the ring ports |
1154 | * because it doesn't have an interconnect port |
1155 | */ |
1156 | if (br_mrp_mrc_behaviour(mrp) && |
1157 | mrp->in_role == BR_MRP_IN_ROLE_DISABLED) |
1158 | goto forward; |
1159 | |
1160 | if (mrp->in_role == BR_MRP_IN_ROLE_MIM) { |
1161 | if (in_type == BR_MRP_TLV_HEADER_IN_TEST) { |
1162 | /* MIM should not forward it's own InTest |
1163 | * frames |
1164 | */ |
1165 | if (br_mrp_mim_process(mrp, port: p, skb)) { |
1166 | goto no_forward; |
1167 | } else { |
1168 | if (br_mrp_is_ring_port(p_port, s_port, |
1169 | port: p)) |
1170 | i_dst = NULL; |
1171 | |
1172 | if (br_mrp_is_in_port(i_port, port: p)) |
1173 | goto no_forward; |
1174 | } |
1175 | } else { |
1176 | /* MIM should forward IntLinkChange/Status and |
1177 | * IntTopoChange between ring ports but MIM |
1178 | * should not forward IntLinkChange/Status and |
1179 | * IntTopoChange if the frame was received at |
1180 | * the interconnect port |
1181 | */ |
1182 | if (br_mrp_is_ring_port(p_port, s_port, port: p)) |
1183 | i_dst = NULL; |
1184 | |
1185 | if (br_mrp_is_in_port(i_port, port: p)) |
1186 | goto no_forward; |
1187 | } |
1188 | } |
1189 | |
1190 | if (mrp->in_role == BR_MRP_IN_ROLE_MIC) { |
1191 | /* MIC should forward InTest frames on all ports |
1192 | * regardless of the received port |
1193 | */ |
1194 | if (in_type == BR_MRP_TLV_HEADER_IN_TEST) |
1195 | goto forward; |
1196 | |
1197 | /* MIC should forward IntLinkChange frames only if they |
1198 | * are received on ring ports to all the ports |
1199 | */ |
1200 | if (br_mrp_is_ring_port(p_port, s_port, port: p) && |
1201 | (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP || |
1202 | in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN)) |
1203 | goto forward; |
1204 | |
1205 | /* MIC should forward IntLinkStatus frames only to |
1206 | * interconnect port if it was received on a ring port. |
1207 | * If it is received on interconnect port then, it |
1208 | * should be forward on both ring ports |
1209 | */ |
1210 | if (br_mrp_is_ring_port(p_port, s_port, port: p) && |
1211 | in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) { |
1212 | p_dst = NULL; |
1213 | s_dst = NULL; |
1214 | } |
1215 | |
1216 | /* Should forward the InTopo frames only between the |
1217 | * ring ports |
1218 | */ |
1219 | if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) { |
1220 | i_dst = NULL; |
1221 | goto forward; |
1222 | } |
1223 | |
1224 | /* In all the other cases don't forward the frames */ |
1225 | goto no_forward; |
1226 | } |
1227 | } |
1228 | |
1229 | forward: |
1230 | if (p_dst) |
1231 | br_forward(to: p_dst, skb, local_rcv: true, local_orig: false); |
1232 | if (s_dst) |
1233 | br_forward(to: s_dst, skb, local_rcv: true, local_orig: false); |
1234 | if (i_dst) |
1235 | br_forward(to: i_dst, skb, local_rcv: true, local_orig: false); |
1236 | |
1237 | no_forward: |
1238 | return 1; |
1239 | } |
1240 | |
1241 | /* Check if the frame was received on a port that is part of MRP ring |
1242 | * and if the frame has MRP eth. In that case process the frame otherwise do |
1243 | * normal forwarding. |
1244 | * note: already called with rcu_read_lock |
1245 | */ |
1246 | static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb) |
1247 | { |
1248 | /* If there is no MRP instance do normal forwarding */ |
1249 | if (likely(!(p->flags & BR_MRP_AWARE))) |
1250 | goto out; |
1251 | |
1252 | return br_mrp_rcv(p, skb, dev: p->dev); |
1253 | out: |
1254 | return 0; |
1255 | } |
1256 | |
1257 | bool br_mrp_enabled(struct net_bridge *br) |
1258 | { |
1259 | return !hlist_empty(h: &br->mrp_list); |
1260 | } |
1261 | |