1 | /* SPDX-License-Identifier: GPL-1.0+ */ |
2 | /* |
3 | * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. |
4 | * |
5 | * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes |
6 | * NCM: Network and Communications Management, Inc. |
7 | * |
8 | * BUT, I'm the one who modified it for ethernet, so: |
9 | * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov |
10 | * |
11 | */ |
12 | |
13 | #ifndef _NET_BONDING_H |
14 | #define _NET_BONDING_H |
15 | |
16 | #include <linux/timer.h> |
17 | #include <linux/proc_fs.h> |
18 | #include <linux/if_bonding.h> |
19 | #include <linux/cpumask.h> |
20 | #include <linux/in6.h> |
21 | #include <linux/netpoll.h> |
22 | #include <linux/inetdevice.h> |
23 | #include <linux/etherdevice.h> |
24 | #include <linux/reciprocal_div.h> |
25 | #include <linux/if_link.h> |
26 | |
27 | #include <net/bond_3ad.h> |
28 | #include <net/bond_alb.h> |
29 | #include <net/bond_options.h> |
30 | #include <net/ipv6.h> |
31 | #include <net/addrconf.h> |
32 | |
33 | #define BOND_MAX_ARP_TARGETS 16 |
34 | #define BOND_MAX_NS_TARGETS BOND_MAX_ARP_TARGETS |
35 | |
36 | #define BOND_DEFAULT_MIIMON 100 |
37 | |
38 | #ifndef __long_aligned |
39 | #define __long_aligned __attribute__((aligned((sizeof(long))))) |
40 | #endif |
41 | |
42 | #define slave_info(bond_dev, slave_dev, fmt, ...) \ |
43 | netdev_info(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) |
44 | #define slave_warn(bond_dev, slave_dev, fmt, ...) \ |
45 | netdev_warn(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) |
46 | #define slave_dbg(bond_dev, slave_dev, fmt, ...) \ |
47 | netdev_dbg(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) |
48 | #define slave_err(bond_dev, slave_dev, fmt, ...) \ |
49 | netdev_err(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) |
50 | |
51 | #define BOND_MODE(bond) ((bond)->params.mode) |
52 | |
53 | /* slave list primitives */ |
54 | #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) |
55 | |
56 | #define bond_has_slaves(bond) !list_empty(bond_slave_list(bond)) |
57 | |
58 | /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ |
59 | #define bond_first_slave(bond) \ |
60 | (bond_has_slaves(bond) ? \ |
61 | netdev_adjacent_get_private(bond_slave_list(bond)->next) : \ |
62 | NULL) |
63 | #define bond_last_slave(bond) \ |
64 | (bond_has_slaves(bond) ? \ |
65 | netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \ |
66 | NULL) |
67 | |
68 | /* Caller must have rcu_read_lock */ |
69 | #define bond_first_slave_rcu(bond) \ |
70 | netdev_lower_get_first_private_rcu(bond->dev) |
71 | |
72 | #define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond)) |
73 | #define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond)) |
74 | |
75 | /** |
76 | * bond_for_each_slave - iterate over all slaves |
77 | * @bond: the bond holding this list |
78 | * @pos: current slave |
79 | * @iter: list_head * iterator |
80 | * |
81 | * Caller must hold RTNL |
82 | */ |
83 | #define bond_for_each_slave(bond, pos, iter) \ |
84 | netdev_for_each_lower_private((bond)->dev, pos, iter) |
85 | |
86 | /* Caller must have rcu_read_lock */ |
87 | #define bond_for_each_slave_rcu(bond, pos, iter) \ |
88 | netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) |
89 | |
90 | #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ |
91 | NETIF_F_GSO_ESP) |
92 | |
93 | #ifdef CONFIG_NET_POLL_CONTROLLER |
94 | extern atomic_t netpoll_block_tx; |
95 | |
96 | static inline void block_netpoll_tx(void) |
97 | { |
98 | atomic_inc(v: &netpoll_block_tx); |
99 | } |
100 | |
101 | static inline void unblock_netpoll_tx(void) |
102 | { |
103 | atomic_dec(v: &netpoll_block_tx); |
104 | } |
105 | |
106 | static inline int is_netpoll_tx_blocked(struct net_device *dev) |
107 | { |
108 | if (unlikely(netpoll_tx_running(dev))) |
109 | return atomic_read(v: &netpoll_block_tx); |
110 | return 0; |
111 | } |
112 | #else |
113 | #define block_netpoll_tx() |
114 | #define unblock_netpoll_tx() |
115 | #define is_netpoll_tx_blocked(dev) (0) |
116 | #endif |
117 | |
118 | struct bond_params { |
119 | int mode; |
120 | int xmit_policy; |
121 | int miimon; |
122 | u8 num_peer_notif; |
123 | u8 missed_max; |
124 | int arp_interval; |
125 | int arp_validate; |
126 | int arp_all_targets; |
127 | int use_carrier; |
128 | int fail_over_mac; |
129 | int updelay; |
130 | int downdelay; |
131 | int peer_notif_delay; |
132 | int lacp_active; |
133 | int lacp_fast; |
134 | unsigned int min_links; |
135 | int ad_select; |
136 | char primary[IFNAMSIZ]; |
137 | int primary_reselect; |
138 | __be32 arp_targets[BOND_MAX_ARP_TARGETS]; |
139 | int tx_queues; |
140 | int all_slaves_active; |
141 | int resend_igmp; |
142 | int lp_interval; |
143 | int packets_per_slave; |
144 | int tlb_dynamic_lb; |
145 | struct reciprocal_value reciprocal_packets_per_slave; |
146 | u16 ad_actor_sys_prio; |
147 | u16 ad_user_port_key; |
148 | #if IS_ENABLED(CONFIG_IPV6) |
149 | struct in6_addr ns_targets[BOND_MAX_NS_TARGETS]; |
150 | #endif |
151 | |
152 | /* 2 bytes of padding : see ether_addr_equal_64bits() */ |
153 | u8 ad_actor_system[ETH_ALEN + 2]; |
154 | }; |
155 | |
156 | struct slave { |
157 | struct net_device *dev; /* first - useful for panic debug */ |
158 | struct bonding *bond; /* our master */ |
159 | int delay; |
160 | /* all 4 in jiffies */ |
161 | unsigned long last_link_up; |
162 | unsigned long last_tx; |
163 | unsigned long last_rx; |
164 | unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; |
165 | s8 link; /* one of BOND_LINK_XXXX */ |
166 | s8 link_new_state; /* one of BOND_LINK_XXXX */ |
167 | u8 backup:1, /* indicates backup slave. Value corresponds with |
168 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ |
169 | inactive:1, /* indicates inactive slave */ |
170 | should_notify:1, /* indicates whether the state changed */ |
171 | should_notify_link:1; /* indicates whether the link changed */ |
172 | u8 duplex; |
173 | u32 original_mtu; |
174 | u32 link_failure_count; |
175 | u32 speed; |
176 | u16 queue_id; |
177 | u8 perm_hwaddr[MAX_ADDR_LEN]; |
178 | int prio; |
179 | struct ad_slave_info *ad_info; |
180 | struct tlb_slave_info tlb_info; |
181 | #ifdef CONFIG_NET_POLL_CONTROLLER |
182 | struct netpoll *np; |
183 | #endif |
184 | struct delayed_work notify_work; |
185 | struct kobject kobj; |
186 | struct rtnl_link_stats64 slave_stats; |
187 | }; |
188 | |
189 | static inline struct slave *to_slave(struct kobject *kobj) |
190 | { |
191 | return container_of(kobj, struct slave, kobj); |
192 | } |
193 | |
194 | struct bond_up_slave { |
195 | unsigned int count; |
196 | struct rcu_head rcu; |
197 | struct slave *arr[]; |
198 | }; |
199 | |
200 | /* |
201 | * Link pseudo-state only used internally by monitors |
202 | */ |
203 | #define BOND_LINK_NOCHANGE -1 |
204 | |
205 | struct bond_ipsec { |
206 | struct list_head list; |
207 | struct xfrm_state *xs; |
208 | }; |
209 | |
210 | /* |
211 | * Here are the locking policies for the two bonding locks: |
212 | * Get rcu_read_lock when reading or RTNL when writing slave list. |
213 | */ |
214 | struct bonding { |
215 | struct net_device *dev; /* first - useful for panic debug */ |
216 | struct slave __rcu *curr_active_slave; |
217 | struct slave __rcu *current_arp_slave; |
218 | struct slave __rcu *primary_slave; |
219 | struct bond_up_slave __rcu *usable_slaves; |
220 | struct bond_up_slave __rcu *all_slaves; |
221 | bool force_primary; |
222 | bool notifier_ctx; |
223 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ |
224 | int (*recv_probe)(const struct sk_buff *, struct bonding *, |
225 | struct slave *); |
226 | /* mode_lock is used for mode-specific locking needs, currently used by: |
227 | * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and |
228 | * bond_3ad_state_machine_handler() concurrently and also |
229 | * the access to the state machine shared variables. |
230 | * TLB mode (5) - to sync the use and modifications of its hash table |
231 | * ALB mode (6) - to sync the use and modifications of its hash table |
232 | */ |
233 | spinlock_t mode_lock; |
234 | spinlock_t stats_lock; |
235 | u32 send_peer_notif; |
236 | u8 igmp_retrans; |
237 | #ifdef CONFIG_PROC_FS |
238 | struct proc_dir_entry *proc_entry; |
239 | char proc_file_name[IFNAMSIZ]; |
240 | #endif /* CONFIG_PROC_FS */ |
241 | struct list_head bond_list; |
242 | u32 __percpu *rr_tx_counter; |
243 | struct ad_bond_info ad_info; |
244 | struct alb_bond_info alb_info; |
245 | struct bond_params params; |
246 | struct workqueue_struct *wq; |
247 | struct delayed_work mii_work; |
248 | struct delayed_work arp_work; |
249 | struct delayed_work alb_work; |
250 | struct delayed_work ad_work; |
251 | struct delayed_work mcast_work; |
252 | struct delayed_work slave_arr_work; |
253 | #ifdef CONFIG_DEBUG_FS |
254 | /* debugging support via debugfs */ |
255 | struct dentry *debug_dir; |
256 | #endif /* CONFIG_DEBUG_FS */ |
257 | struct rtnl_link_stats64 bond_stats; |
258 | #ifdef CONFIG_XFRM_OFFLOAD |
259 | struct list_head ipsec_list; |
260 | /* protecting ipsec_list */ |
261 | spinlock_t ipsec_lock; |
262 | #endif /* CONFIG_XFRM_OFFLOAD */ |
263 | struct bpf_prog *xdp_prog; |
264 | }; |
265 | |
266 | #define bond_slave_get_rcu(dev) \ |
267 | ((struct slave *) rcu_dereference(dev->rx_handler_data)) |
268 | |
269 | #define bond_slave_get_rtnl(dev) \ |
270 | ((struct slave *) rtnl_dereference(dev->rx_handler_data)) |
271 | |
272 | void bond_queue_slave_event(struct slave *slave); |
273 | void bond_lower_state_changed(struct slave *slave); |
274 | |
275 | struct bond_vlan_tag { |
276 | __be16 vlan_proto; |
277 | unsigned short vlan_id; |
278 | }; |
279 | |
280 | /* |
281 | * Returns NULL if the net_device does not belong to any of the bond's slaves |
282 | * |
283 | * Caller must hold bond lock for read |
284 | */ |
285 | static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, |
286 | struct net_device *slave_dev) |
287 | { |
288 | return netdev_lower_dev_get_private(dev: bond->dev, lower_dev: slave_dev); |
289 | } |
290 | |
291 | static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) |
292 | { |
293 | return slave->bond; |
294 | } |
295 | |
296 | static inline bool bond_should_override_tx_queue(struct bonding *bond) |
297 | { |
298 | return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || |
299 | BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; |
300 | } |
301 | |
302 | static inline bool bond_is_lb(const struct bonding *bond) |
303 | { |
304 | return BOND_MODE(bond) == BOND_MODE_TLB || |
305 | BOND_MODE(bond) == BOND_MODE_ALB; |
306 | } |
307 | |
308 | static inline bool bond_needs_speed_duplex(const struct bonding *bond) |
309 | { |
310 | return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond); |
311 | } |
312 | |
313 | static inline bool bond_is_nondyn_tlb(const struct bonding *bond) |
314 | { |
315 | return (bond_is_lb(bond) && bond->params.tlb_dynamic_lb == 0); |
316 | } |
317 | |
318 | static inline bool bond_mode_can_use_xmit_hash(const struct bonding *bond) |
319 | { |
320 | return (BOND_MODE(bond) == BOND_MODE_8023AD || |
321 | BOND_MODE(bond) == BOND_MODE_XOR || |
322 | BOND_MODE(bond) == BOND_MODE_TLB || |
323 | BOND_MODE(bond) == BOND_MODE_ALB); |
324 | } |
325 | |
326 | static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) |
327 | { |
328 | return (BOND_MODE(bond) == BOND_MODE_8023AD || |
329 | BOND_MODE(bond) == BOND_MODE_XOR || |
330 | bond_is_nondyn_tlb(bond)); |
331 | } |
332 | |
333 | static inline bool bond_mode_uses_arp(int mode) |
334 | { |
335 | return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && |
336 | mode != BOND_MODE_ALB; |
337 | } |
338 | |
339 | static inline bool bond_mode_uses_primary(int mode) |
340 | { |
341 | return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || |
342 | mode == BOND_MODE_ALB; |
343 | } |
344 | |
345 | static inline bool bond_uses_primary(struct bonding *bond) |
346 | { |
347 | return bond_mode_uses_primary(BOND_MODE(bond)); |
348 | } |
349 | |
350 | static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) |
351 | { |
352 | struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave); |
353 | |
354 | return bond_uses_primary(bond) && slave ? slave->dev : NULL; |
355 | } |
356 | |
357 | static inline bool bond_slave_is_up(struct slave *slave) |
358 | { |
359 | return netif_running(dev: slave->dev) && netif_carrier_ok(dev: slave->dev); |
360 | } |
361 | |
362 | static inline void bond_set_active_slave(struct slave *slave) |
363 | { |
364 | if (slave->backup) { |
365 | slave->backup = 0; |
366 | bond_queue_slave_event(slave); |
367 | bond_lower_state_changed(slave); |
368 | } |
369 | } |
370 | |
371 | static inline void bond_set_backup_slave(struct slave *slave) |
372 | { |
373 | if (!slave->backup) { |
374 | slave->backup = 1; |
375 | bond_queue_slave_event(slave); |
376 | bond_lower_state_changed(slave); |
377 | } |
378 | } |
379 | |
380 | static inline void bond_set_slave_state(struct slave *slave, |
381 | int slave_state, bool notify) |
382 | { |
383 | if (slave->backup == slave_state) |
384 | return; |
385 | |
386 | slave->backup = slave_state; |
387 | if (notify) { |
388 | bond_lower_state_changed(slave); |
389 | bond_queue_slave_event(slave); |
390 | slave->should_notify = 0; |
391 | } else { |
392 | if (slave->should_notify) |
393 | slave->should_notify = 0; |
394 | else |
395 | slave->should_notify = 1; |
396 | } |
397 | } |
398 | |
399 | static inline void bond_slave_state_change(struct bonding *bond) |
400 | { |
401 | struct list_head *iter; |
402 | struct slave *tmp; |
403 | |
404 | bond_for_each_slave(bond, tmp, iter) { |
405 | if (tmp->link == BOND_LINK_UP) |
406 | bond_set_active_slave(slave: tmp); |
407 | else if (tmp->link == BOND_LINK_DOWN) |
408 | bond_set_backup_slave(slave: tmp); |
409 | } |
410 | } |
411 | |
412 | static inline void bond_slave_state_notify(struct bonding *bond) |
413 | { |
414 | struct list_head *iter; |
415 | struct slave *tmp; |
416 | |
417 | bond_for_each_slave(bond, tmp, iter) { |
418 | if (tmp->should_notify) { |
419 | bond_lower_state_changed(slave: tmp); |
420 | tmp->should_notify = 0; |
421 | } |
422 | } |
423 | } |
424 | |
425 | static inline int bond_slave_state(struct slave *slave) |
426 | { |
427 | return slave->backup; |
428 | } |
429 | |
430 | static inline bool bond_is_active_slave(struct slave *slave) |
431 | { |
432 | return !bond_slave_state(slave); |
433 | } |
434 | |
435 | static inline bool bond_slave_can_tx(struct slave *slave) |
436 | { |
437 | return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && |
438 | bond_is_active_slave(slave); |
439 | } |
440 | |
441 | static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev) |
442 | { |
443 | struct slave *slave; |
444 | bool active; |
445 | |
446 | rcu_read_lock(); |
447 | slave = bond_slave_get_rcu(slave_dev); |
448 | active = bond_is_active_slave(slave); |
449 | rcu_read_unlock(); |
450 | |
451 | return active; |
452 | } |
453 | |
454 | static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) |
455 | { |
456 | if (len == ETH_ALEN) { |
457 | ether_addr_copy(dst, src); |
458 | return; |
459 | } |
460 | |
461 | memcpy(dst, src, len); |
462 | } |
463 | |
464 | #define BOND_PRI_RESELECT_ALWAYS 0 |
465 | #define BOND_PRI_RESELECT_BETTER 1 |
466 | #define BOND_PRI_RESELECT_FAILURE 2 |
467 | |
468 | #define BOND_FOM_NONE 0 |
469 | #define BOND_FOM_ACTIVE 1 |
470 | #define BOND_FOM_FOLLOW 2 |
471 | |
472 | #define BOND_ARP_TARGETS_ANY 0 |
473 | #define BOND_ARP_TARGETS_ALL 1 |
474 | |
475 | #define BOND_ARP_VALIDATE_NONE 0 |
476 | #define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) |
477 | #define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) |
478 | #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ |
479 | BOND_ARP_VALIDATE_BACKUP) |
480 | #define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1) |
481 | #define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \ |
482 | BOND_ARP_FILTER) |
483 | #define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \ |
484 | BOND_ARP_FILTER) |
485 | |
486 | #define BOND_SLAVE_NOTIFY_NOW true |
487 | #define BOND_SLAVE_NOTIFY_LATER false |
488 | |
489 | static inline int slave_do_arp_validate(struct bonding *bond, |
490 | struct slave *slave) |
491 | { |
492 | return bond->params.arp_validate & (1 << bond_slave_state(slave)); |
493 | } |
494 | |
495 | static inline int slave_do_arp_validate_only(struct bonding *bond) |
496 | { |
497 | return bond->params.arp_validate & BOND_ARP_FILTER; |
498 | } |
499 | |
500 | static inline int bond_is_ip_target_ok(__be32 addr) |
501 | { |
502 | return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); |
503 | } |
504 | |
505 | #if IS_ENABLED(CONFIG_IPV6) |
506 | static inline int bond_is_ip6_target_ok(struct in6_addr *addr) |
507 | { |
508 | return !ipv6_addr_any(a: addr) && |
509 | !ipv6_addr_loopback(a: addr) && |
510 | !ipv6_addr_is_multicast(addr); |
511 | } |
512 | #endif |
513 | |
514 | /* Get the oldest arp which we've received on this slave for bond's |
515 | * arp_targets. |
516 | */ |
517 | static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, |
518 | struct slave *slave) |
519 | { |
520 | int i = 1; |
521 | unsigned long ret = slave->target_last_arp_rx[0]; |
522 | |
523 | for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) |
524 | if (time_before(slave->target_last_arp_rx[i], ret)) |
525 | ret = slave->target_last_arp_rx[i]; |
526 | |
527 | return ret; |
528 | } |
529 | |
530 | static inline unsigned long slave_last_rx(struct bonding *bond, |
531 | struct slave *slave) |
532 | { |
533 | if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) |
534 | return slave_oldest_target_arp_rx(bond, slave); |
535 | |
536 | return slave->last_rx; |
537 | } |
538 | |
539 | static inline void slave_update_last_tx(struct slave *slave) |
540 | { |
541 | WRITE_ONCE(slave->last_tx, jiffies); |
542 | } |
543 | |
544 | static inline unsigned long slave_last_tx(struct slave *slave) |
545 | { |
546 | return READ_ONCE(slave->last_tx); |
547 | } |
548 | |
549 | #ifdef CONFIG_NET_POLL_CONTROLLER |
550 | static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, |
551 | struct sk_buff *skb) |
552 | { |
553 | return netpoll_send_skb(np: slave->np, skb); |
554 | } |
555 | #else |
556 | static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, |
557 | struct sk_buff *skb) |
558 | { |
559 | BUG(); |
560 | return NETDEV_TX_OK; |
561 | } |
562 | #endif |
563 | |
564 | static inline void bond_set_slave_inactive_flags(struct slave *slave, |
565 | bool notify) |
566 | { |
567 | if (!bond_is_lb(bond: slave->bond)) |
568 | bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); |
569 | if (!slave->bond->params.all_slaves_active) |
570 | slave->inactive = 1; |
571 | } |
572 | |
573 | static inline void bond_set_slave_active_flags(struct slave *slave, |
574 | bool notify) |
575 | { |
576 | bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); |
577 | slave->inactive = 0; |
578 | } |
579 | |
580 | static inline bool bond_is_slave_inactive(struct slave *slave) |
581 | { |
582 | return slave->inactive; |
583 | } |
584 | |
585 | static inline void bond_propose_link_state(struct slave *slave, int state) |
586 | { |
587 | slave->link_new_state = state; |
588 | } |
589 | |
590 | static inline void bond_commit_link_state(struct slave *slave, bool notify) |
591 | { |
592 | if (slave->link_new_state == BOND_LINK_NOCHANGE) |
593 | return; |
594 | |
595 | slave->link = slave->link_new_state; |
596 | if (notify) { |
597 | bond_queue_slave_event(slave); |
598 | bond_lower_state_changed(slave); |
599 | slave->should_notify_link = 0; |
600 | } else { |
601 | if (slave->should_notify_link) |
602 | slave->should_notify_link = 0; |
603 | else |
604 | slave->should_notify_link = 1; |
605 | } |
606 | } |
607 | |
608 | static inline void bond_set_slave_link_state(struct slave *slave, int state, |
609 | bool notify) |
610 | { |
611 | bond_propose_link_state(slave, state); |
612 | bond_commit_link_state(slave, notify); |
613 | } |
614 | |
615 | static inline void bond_slave_link_notify(struct bonding *bond) |
616 | { |
617 | struct list_head *iter; |
618 | struct slave *tmp; |
619 | |
620 | bond_for_each_slave(bond, tmp, iter) { |
621 | if (tmp->should_notify_link) { |
622 | bond_queue_slave_event(slave: tmp); |
623 | bond_lower_state_changed(slave: tmp); |
624 | tmp->should_notify_link = 0; |
625 | } |
626 | } |
627 | } |
628 | |
629 | static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) |
630 | { |
631 | struct in_device *in_dev; |
632 | __be32 addr = 0; |
633 | |
634 | rcu_read_lock(); |
635 | in_dev = __in_dev_get_rcu(dev); |
636 | |
637 | if (in_dev) |
638 | addr = inet_confirm_addr(net: dev_net(dev), in_dev, dst, local, |
639 | scope: RT_SCOPE_HOST); |
640 | rcu_read_unlock(); |
641 | return addr; |
642 | } |
643 | |
644 | struct bond_net { |
645 | struct net *net; /* Associated network namespace */ |
646 | struct list_head dev_list; |
647 | #ifdef CONFIG_PROC_FS |
648 | struct proc_dir_entry *proc_dir; |
649 | #endif |
650 | struct class_attribute class_attr_bonding_masters; |
651 | }; |
652 | |
653 | int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); |
654 | netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); |
655 | int bond_create(struct net *net, const char *name); |
656 | int bond_create_sysfs(struct bond_net *net); |
657 | void bond_destroy_sysfs(struct bond_net *net); |
658 | void bond_prepare_sysfs_group(struct bonding *bond); |
659 | int bond_sysfs_slave_add(struct slave *slave); |
660 | void bond_sysfs_slave_del(struct slave *slave); |
661 | void bond_xdp_set_features(struct net_device *bond_dev); |
662 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, |
663 | struct netlink_ext_ack *extack); |
664 | int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); |
665 | u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); |
666 | int bond_set_carrier(struct bonding *bond); |
667 | void bond_select_active_slave(struct bonding *bond); |
668 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active); |
669 | void bond_create_debugfs(void); |
670 | void bond_destroy_debugfs(void); |
671 | void bond_debug_register(struct bonding *bond); |
672 | void bond_debug_unregister(struct bonding *bond); |
673 | void bond_debug_reregister(struct bonding *bond); |
674 | const char *bond_mode_name(int mode); |
675 | void bond_setup(struct net_device *bond_dev); |
676 | unsigned int bond_get_num_tx_queues(void); |
677 | int bond_netlink_init(void); |
678 | void bond_netlink_fini(void); |
679 | struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); |
680 | const char *bond_slave_link_status(s8 link); |
681 | struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, |
682 | struct net_device *end_dev, |
683 | int level); |
684 | int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); |
685 | void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); |
686 | void bond_work_init_all(struct bonding *bond); |
687 | |
688 | #ifdef CONFIG_PROC_FS |
689 | void bond_create_proc_entry(struct bonding *bond); |
690 | void bond_remove_proc_entry(struct bonding *bond); |
691 | void bond_create_proc_dir(struct bond_net *bn); |
692 | void bond_destroy_proc_dir(struct bond_net *bn); |
693 | #else |
694 | static inline void bond_create_proc_entry(struct bonding *bond) |
695 | { |
696 | } |
697 | |
698 | static inline void bond_remove_proc_entry(struct bonding *bond) |
699 | { |
700 | } |
701 | |
702 | static inline void bond_create_proc_dir(struct bond_net *bn) |
703 | { |
704 | } |
705 | |
706 | static inline void bond_destroy_proc_dir(struct bond_net *bn) |
707 | { |
708 | } |
709 | #endif |
710 | |
711 | static inline struct slave *bond_slave_has_mac(struct bonding *bond, |
712 | const u8 *mac) |
713 | { |
714 | struct list_head *iter; |
715 | struct slave *tmp; |
716 | |
717 | bond_for_each_slave(bond, tmp, iter) |
718 | if (ether_addr_equal_64bits(addr1: mac, addr2: tmp->dev->dev_addr)) |
719 | return tmp; |
720 | |
721 | return NULL; |
722 | } |
723 | |
724 | /* Caller must hold rcu_read_lock() for read */ |
725 | static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) |
726 | { |
727 | struct list_head *iter; |
728 | struct slave *tmp; |
729 | |
730 | bond_for_each_slave_rcu(bond, tmp, iter) |
731 | if (ether_addr_equal_64bits(addr1: mac, addr2: tmp->dev->dev_addr)) |
732 | return true; |
733 | return false; |
734 | } |
735 | |
736 | /* Check if the ip is present in arp ip list, or first free slot if ip == 0 |
737 | * Returns -1 if not found, index if found |
738 | */ |
739 | static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) |
740 | { |
741 | int i; |
742 | |
743 | for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) |
744 | if (targets[i] == ip) |
745 | return i; |
746 | else if (targets[i] == 0) |
747 | break; |
748 | |
749 | return -1; |
750 | } |
751 | |
752 | #if IS_ENABLED(CONFIG_IPV6) |
753 | static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip) |
754 | { |
755 | struct in6_addr mcaddr; |
756 | int i; |
757 | |
758 | for (i = 0; i < BOND_MAX_NS_TARGETS; i++) { |
759 | addrconf_addr_solict_mult(addr: &targets[i], solicited: &mcaddr); |
760 | if ((ipv6_addr_equal(a1: &targets[i], a2: ip)) || |
761 | (ipv6_addr_equal(a1: &mcaddr, a2: ip))) |
762 | return i; |
763 | else if (ipv6_addr_any(a: &targets[i])) |
764 | break; |
765 | } |
766 | |
767 | return -1; |
768 | } |
769 | #endif |
770 | |
771 | /* exported from bond_main.c */ |
772 | extern unsigned int bond_net_id; |
773 | |
774 | /* exported from bond_netlink.c */ |
775 | extern struct rtnl_link_ops bond_link_ops; |
776 | |
777 | /* exported from bond_sysfs_slave.c */ |
778 | extern const struct sysfs_ops slave_sysfs_ops; |
779 | |
780 | /* exported from bond_3ad.c */ |
781 | extern const u8 lacpdu_mcast_addr[]; |
782 | |
783 | static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) |
784 | { |
785 | dev_core_stats_tx_dropped_inc(dev); |
786 | dev_kfree_skb_any(skb); |
787 | return NET_XMIT_DROP; |
788 | } |
789 | |
790 | #endif /* _NET_BONDING_H */ |
791 | |