1 | // SPDX-License-Identifier: GPL-1.0+ |
2 | /* |
3 | * originally based on the dummy device. |
4 | * |
5 | * Copyright 1999, Thomas Davis, tadavis@lbl.gov. |
6 | * Based on dummy.c, and eql.c devices. |
7 | * |
8 | * bonding.c: an Ethernet Bonding driver |
9 | * |
10 | * This is useful to talk to a Cisco EtherChannel compatible equipment: |
11 | * Cisco 5500 |
12 | * Sun Trunking (Solaris) |
13 | * Alteon AceDirector Trunks |
14 | * Linux Bonding |
15 | * and probably many L2 switches ... |
16 | * |
17 | * How it works: |
18 | * ifconfig bond0 ipaddress netmask up |
19 | * will setup a network device, with an ip address. No mac address |
20 | * will be assigned at this time. The hw mac address will come from |
21 | * the first slave bonded to the channel. All slaves will then use |
22 | * this hw mac address. |
23 | * |
24 | * ifconfig bond0 down |
25 | * will release all slaves, marking them as down. |
26 | * |
27 | * ifenslave bond0 eth0 |
28 | * will attach eth0 to bond0 as a slave. eth0 hw mac address will either |
29 | * a: be used as initial mac address |
30 | * b: if a hw mac address already is there, eth0's hw mac address |
31 | * will then be set from bond0. |
32 | * |
33 | */ |
34 | |
35 | #include <linux/kernel.h> |
36 | #include <linux/module.h> |
37 | #include <linux/types.h> |
38 | #include <linux/fcntl.h> |
39 | #include <linux/filter.h> |
40 | #include <linux/interrupt.h> |
41 | #include <linux/ptrace.h> |
42 | #include <linux/ioport.h> |
43 | #include <linux/in.h> |
44 | #include <net/ip.h> |
45 | #include <linux/ip.h> |
46 | #include <linux/icmp.h> |
47 | #include <linux/icmpv6.h> |
48 | #include <linux/tcp.h> |
49 | #include <linux/udp.h> |
50 | #include <linux/slab.h> |
51 | #include <linux/string.h> |
52 | #include <linux/init.h> |
53 | #include <linux/timer.h> |
54 | #include <linux/socket.h> |
55 | #include <linux/ctype.h> |
56 | #include <linux/inet.h> |
57 | #include <linux/bitops.h> |
58 | #include <linux/io.h> |
59 | #include <asm/dma.h> |
60 | #include <linux/uaccess.h> |
61 | #include <linux/errno.h> |
62 | #include <linux/netdevice.h> |
63 | #include <linux/inetdevice.h> |
64 | #include <linux/igmp.h> |
65 | #include <linux/etherdevice.h> |
66 | #include <linux/skbuff.h> |
67 | #include <net/sock.h> |
68 | #include <linux/rtnetlink.h> |
69 | #include <linux/smp.h> |
70 | #include <linux/if_ether.h> |
71 | #include <net/arp.h> |
72 | #include <linux/mii.h> |
73 | #include <linux/ethtool.h> |
74 | #include <linux/if_vlan.h> |
75 | #include <linux/if_bonding.h> |
76 | #include <linux/phy.h> |
77 | #include <linux/jiffies.h> |
78 | #include <linux/preempt.h> |
79 | #include <net/route.h> |
80 | #include <net/net_namespace.h> |
81 | #include <net/netns/generic.h> |
82 | #include <net/pkt_sched.h> |
83 | #include <linux/rculist.h> |
84 | #include <net/flow_dissector.h> |
85 | #include <net/xfrm.h> |
86 | #include <net/bonding.h> |
87 | #include <net/bond_3ad.h> |
88 | #include <net/bond_alb.h> |
89 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
90 | #include <net/tls.h> |
91 | #endif |
92 | #include <net/ip6_route.h> |
93 | #include <net/xdp.h> |
94 | |
95 | #include "bonding_priv.h" |
96 | |
97 | /*---------------------------- Module parameters ----------------------------*/ |
98 | |
99 | /* monitor all links that often (in milliseconds). <=0 disables monitoring */ |
100 | |
101 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; |
102 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; |
103 | static int num_peer_notif = 1; |
104 | static int miimon; |
105 | static int updelay; |
106 | static int downdelay; |
107 | static int use_carrier = 1; |
108 | static char *mode; |
109 | static char *primary; |
110 | static char *primary_reselect; |
111 | static char *lacp_rate; |
112 | static int min_links; |
113 | static char *ad_select; |
114 | static char *xmit_hash_policy; |
115 | static int arp_interval; |
116 | static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; |
117 | static char *arp_validate; |
118 | static char *arp_all_targets; |
119 | static char *fail_over_mac; |
120 | static int all_slaves_active; |
121 | static struct bond_params bonding_defaults; |
122 | static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; |
123 | static int packets_per_slave = 1; |
124 | static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; |
125 | |
126 | module_param(max_bonds, int, 0); |
127 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices" ); |
128 | module_param(tx_queues, int, 0); |
129 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)" ); |
130 | module_param_named(num_grat_arp, num_peer_notif, int, 0644); |
131 | MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " |
132 | "failover event (alias of num_unsol_na)" ); |
133 | module_param_named(num_unsol_na, num_peer_notif, int, 0644); |
134 | MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " |
135 | "failover event (alias of num_grat_arp)" ); |
136 | module_param(miimon, int, 0); |
137 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds" ); |
138 | module_param(updelay, int, 0); |
139 | MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds" ); |
140 | module_param(downdelay, int, 0); |
141 | MODULE_PARM_DESC(downdelay, "Delay before considering link down, " |
142 | "in milliseconds" ); |
143 | module_param(use_carrier, int, 0); |
144 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " |
145 | "0 for off, 1 for on (default)" ); |
146 | module_param(mode, charp, 0); |
147 | MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " |
148 | "1 for active-backup, 2 for balance-xor, " |
149 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " |
150 | "6 for balance-alb" ); |
151 | module_param(primary, charp, 0); |
152 | MODULE_PARM_DESC(primary, "Primary network device to use" ); |
153 | module_param(primary_reselect, charp, 0); |
154 | MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " |
155 | "once it comes up; " |
156 | "0 for always (default), " |
157 | "1 for only if speed of primary is " |
158 | "better, " |
159 | "2 for only on active slave " |
160 | "failure" ); |
161 | module_param(lacp_rate, charp, 0); |
162 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
163 | "0 for slow, 1 for fast" ); |
164 | module_param(ad_select, charp, 0); |
165 | MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " |
166 | "0 for stable (default), 1 for bandwidth, " |
167 | "2 for count" ); |
168 | module_param(min_links, int, 0); |
169 | MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier" ); |
170 | |
171 | module_param(xmit_hash_policy, charp, 0); |
172 | MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " |
173 | "0 for layer 2 (default), 1 for layer 3+4, " |
174 | "2 for layer 2+3, 3 for encap layer 2+3, " |
175 | "4 for encap layer 3+4, 5 for vlan+srcmac" ); |
176 | module_param(arp_interval, int, 0); |
177 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds" ); |
178 | module_param_array(arp_ip_target, charp, NULL, 0); |
179 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form" ); |
180 | module_param(arp_validate, charp, 0); |
181 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " |
182 | "0 for none (default), 1 for active, " |
183 | "2 for backup, 3 for all" ); |
184 | module_param(arp_all_targets, charp, 0); |
185 | MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all" ); |
186 | module_param(fail_over_mac, charp, 0); |
187 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " |
188 | "the same MAC; 0 for none (default), " |
189 | "1 for active, 2 for follow" ); |
190 | module_param(all_slaves_active, int, 0); |
191 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " |
192 | "by setting active flag for all slaves; " |
193 | "0 for never (default), 1 for always." ); |
194 | module_param(resend_igmp, int, 0); |
195 | MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " |
196 | "link failure" ); |
197 | module_param(packets_per_slave, int, 0); |
198 | MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr " |
199 | "mode; 0 for a random slave, 1 packet per " |
200 | "slave (default), >1 packets per slave." ); |
201 | module_param(lp_interval, uint, 0); |
202 | MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where " |
203 | "the bonding driver sends learning packets to " |
204 | "each slaves peer switch. The default is 1." ); |
205 | |
206 | /*----------------------------- Global variables ----------------------------*/ |
207 | |
208 | #ifdef CONFIG_NET_POLL_CONTROLLER |
209 | atomic_t netpoll_block_tx = ATOMIC_INIT(0); |
210 | #endif |
211 | |
212 | unsigned int bond_net_id __read_mostly; |
213 | |
214 | static const struct flow_dissector_key flow_keys_bonding_keys[] = { |
215 | { |
216 | .key_id = FLOW_DISSECTOR_KEY_CONTROL, |
217 | .offset = offsetof(struct flow_keys, control), |
218 | }, |
219 | { |
220 | .key_id = FLOW_DISSECTOR_KEY_BASIC, |
221 | .offset = offsetof(struct flow_keys, basic), |
222 | }, |
223 | { |
224 | .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, |
225 | .offset = offsetof(struct flow_keys, addrs.v4addrs), |
226 | }, |
227 | { |
228 | .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, |
229 | .offset = offsetof(struct flow_keys, addrs.v6addrs), |
230 | }, |
231 | { |
232 | .key_id = FLOW_DISSECTOR_KEY_TIPC, |
233 | .offset = offsetof(struct flow_keys, addrs.tipckey), |
234 | }, |
235 | { |
236 | .key_id = FLOW_DISSECTOR_KEY_PORTS, |
237 | .offset = offsetof(struct flow_keys, ports), |
238 | }, |
239 | { |
240 | .key_id = FLOW_DISSECTOR_KEY_ICMP, |
241 | .offset = offsetof(struct flow_keys, icmp), |
242 | }, |
243 | { |
244 | .key_id = FLOW_DISSECTOR_KEY_VLAN, |
245 | .offset = offsetof(struct flow_keys, vlan), |
246 | }, |
247 | { |
248 | .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, |
249 | .offset = offsetof(struct flow_keys, tags), |
250 | }, |
251 | { |
252 | .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, |
253 | .offset = offsetof(struct flow_keys, keyid), |
254 | }, |
255 | }; |
256 | |
257 | static struct flow_dissector flow_keys_bonding __read_mostly; |
258 | |
259 | /*-------------------------- Forward declarations ---------------------------*/ |
260 | |
261 | static int bond_init(struct net_device *bond_dev); |
262 | static void bond_uninit(struct net_device *bond_dev); |
263 | static void bond_get_stats(struct net_device *bond_dev, |
264 | struct rtnl_link_stats64 *stats); |
265 | static void bond_slave_arr_handler(struct work_struct *work); |
266 | static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, |
267 | int mod); |
268 | static void bond_netdev_notify_work(struct work_struct *work); |
269 | |
270 | /*---------------------------- General routines -----------------------------*/ |
271 | |
272 | const char *bond_mode_name(int mode) |
273 | { |
274 | static const char *names[] = { |
275 | [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)" , |
276 | [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)" , |
277 | [BOND_MODE_XOR] = "load balancing (xor)" , |
278 | [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)" , |
279 | [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation" , |
280 | [BOND_MODE_TLB] = "transmit load balancing" , |
281 | [BOND_MODE_ALB] = "adaptive load balancing" , |
282 | }; |
283 | |
284 | if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) |
285 | return "unknown" ; |
286 | |
287 | return names[mode]; |
288 | } |
289 | |
290 | /** |
291 | * bond_dev_queue_xmit - Prepare skb for xmit. |
292 | * |
293 | * @bond: bond device that got this skb for tx. |
294 | * @skb: hw accel VLAN tagged skb to transmit |
295 | * @slave_dev: slave that is supposed to xmit this skbuff |
296 | */ |
297 | netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, |
298 | struct net_device *slave_dev) |
299 | { |
300 | skb->dev = slave_dev; |
301 | |
302 | BUILD_BUG_ON(sizeof(skb->queue_mapping) != |
303 | sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); |
304 | skb_set_queue_mapping(skb, queue_mapping: qdisc_skb_cb(skb)->slave_dev_queue_mapping); |
305 | |
306 | if (unlikely(netpoll_tx_running(bond->dev))) |
307 | return bond_netpoll_send_skb(slave: bond_get_slave_by_dev(bond, slave_dev), skb); |
308 | |
309 | return dev_queue_xmit(skb); |
310 | } |
311 | |
312 | static bool bond_sk_check(struct bonding *bond) |
313 | { |
314 | switch (BOND_MODE(bond)) { |
315 | case BOND_MODE_8023AD: |
316 | case BOND_MODE_XOR: |
317 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) |
318 | return true; |
319 | fallthrough; |
320 | default: |
321 | return false; |
322 | } |
323 | } |
324 | |
325 | static bool bond_xdp_check(struct bonding *bond) |
326 | { |
327 | switch (BOND_MODE(bond)) { |
328 | case BOND_MODE_ROUNDROBIN: |
329 | case BOND_MODE_ACTIVEBACKUP: |
330 | return true; |
331 | case BOND_MODE_8023AD: |
332 | case BOND_MODE_XOR: |
333 | /* vlan+srcmac is not supported with XDP as in most cases the 802.1q |
334 | * payload is not in the packet due to hardware offload. |
335 | */ |
336 | if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) |
337 | return true; |
338 | fallthrough; |
339 | default: |
340 | return false; |
341 | } |
342 | } |
343 | |
344 | /*---------------------------------- VLAN -----------------------------------*/ |
345 | |
346 | /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, |
347 | * We don't protect the slave list iteration with a lock because: |
348 | * a. This operation is performed in IOCTL context, |
349 | * b. The operation is protected by the RTNL semaphore in the 8021q code, |
350 | * c. Holding a lock with BH disabled while directly calling a base driver |
351 | * entry point is generally a BAD idea. |
352 | * |
353 | * The design of synchronization/protection for this operation in the 8021q |
354 | * module is good for one or more VLAN devices over a single physical device |
355 | * and cannot be extended for a teaming solution like bonding, so there is a |
356 | * potential race condition here where a net device from the vlan group might |
357 | * be referenced (either by a base driver or the 8021q code) while it is being |
358 | * removed from the system. However, it turns out we're not making matters |
359 | * worse, and if it works for regular VLAN usage it will work here too. |
360 | */ |
361 | |
362 | /** |
363 | * bond_vlan_rx_add_vid - Propagates adding an id to slaves |
364 | * @bond_dev: bonding net device that got called |
365 | * @proto: network protocol ID |
366 | * @vid: vlan id being added |
367 | */ |
368 | static int bond_vlan_rx_add_vid(struct net_device *bond_dev, |
369 | __be16 proto, u16 vid) |
370 | { |
371 | struct bonding *bond = netdev_priv(dev: bond_dev); |
372 | struct slave *slave, *rollback_slave; |
373 | struct list_head *iter; |
374 | int res; |
375 | |
376 | bond_for_each_slave(bond, slave, iter) { |
377 | res = vlan_vid_add(dev: slave->dev, proto, vid); |
378 | if (res) |
379 | goto unwind; |
380 | } |
381 | |
382 | return 0; |
383 | |
384 | unwind: |
385 | /* unwind to the slave that failed */ |
386 | bond_for_each_slave(bond, rollback_slave, iter) { |
387 | if (rollback_slave == slave) |
388 | break; |
389 | |
390 | vlan_vid_del(dev: rollback_slave->dev, proto, vid); |
391 | } |
392 | |
393 | return res; |
394 | } |
395 | |
396 | /** |
397 | * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves |
398 | * @bond_dev: bonding net device that got called |
399 | * @proto: network protocol ID |
400 | * @vid: vlan id being removed |
401 | */ |
402 | static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, |
403 | __be16 proto, u16 vid) |
404 | { |
405 | struct bonding *bond = netdev_priv(dev: bond_dev); |
406 | struct list_head *iter; |
407 | struct slave *slave; |
408 | |
409 | bond_for_each_slave(bond, slave, iter) |
410 | vlan_vid_del(dev: slave->dev, proto, vid); |
411 | |
412 | if (bond_is_lb(bond)) |
413 | bond_alb_clear_vlan(bond, vlan_id: vid); |
414 | |
415 | return 0; |
416 | } |
417 | |
418 | /*---------------------------------- XFRM -----------------------------------*/ |
419 | |
420 | #ifdef CONFIG_XFRM_OFFLOAD |
421 | /** |
422 | * bond_ipsec_add_sa - program device with a security association |
423 | * @xs: pointer to transformer state struct |
424 | * @extack: extack point to fill failure reason |
425 | **/ |
426 | static int bond_ipsec_add_sa(struct xfrm_state *xs, |
427 | struct netlink_ext_ack *extack) |
428 | { |
429 | struct net_device *bond_dev = xs->xso.dev; |
430 | struct bond_ipsec *ipsec; |
431 | struct bonding *bond; |
432 | struct slave *slave; |
433 | int err; |
434 | |
435 | if (!bond_dev) |
436 | return -EINVAL; |
437 | |
438 | rcu_read_lock(); |
439 | bond = netdev_priv(dev: bond_dev); |
440 | slave = rcu_dereference(bond->curr_active_slave); |
441 | if (!slave) { |
442 | rcu_read_unlock(); |
443 | return -ENODEV; |
444 | } |
445 | |
446 | if (!slave->dev->xfrmdev_ops || |
447 | !slave->dev->xfrmdev_ops->xdo_dev_state_add || |
448 | netif_is_bond_master(dev: slave->dev)) { |
449 | NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload" ); |
450 | rcu_read_unlock(); |
451 | return -EINVAL; |
452 | } |
453 | |
454 | ipsec = kmalloc(size: sizeof(*ipsec), GFP_ATOMIC); |
455 | if (!ipsec) { |
456 | rcu_read_unlock(); |
457 | return -ENOMEM; |
458 | } |
459 | xs->xso.real_dev = slave->dev; |
460 | |
461 | err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); |
462 | if (!err) { |
463 | ipsec->xs = xs; |
464 | INIT_LIST_HEAD(list: &ipsec->list); |
465 | spin_lock_bh(lock: &bond->ipsec_lock); |
466 | list_add(new: &ipsec->list, head: &bond->ipsec_list); |
467 | spin_unlock_bh(lock: &bond->ipsec_lock); |
468 | } else { |
469 | kfree(objp: ipsec); |
470 | } |
471 | rcu_read_unlock(); |
472 | return err; |
473 | } |
474 | |
475 | static void bond_ipsec_add_sa_all(struct bonding *bond) |
476 | { |
477 | struct net_device *bond_dev = bond->dev; |
478 | struct bond_ipsec *ipsec; |
479 | struct slave *slave; |
480 | |
481 | rcu_read_lock(); |
482 | slave = rcu_dereference(bond->curr_active_slave); |
483 | if (!slave) |
484 | goto out; |
485 | |
486 | if (!slave->dev->xfrmdev_ops || |
487 | !slave->dev->xfrmdev_ops->xdo_dev_state_add || |
488 | netif_is_bond_master(dev: slave->dev)) { |
489 | spin_lock_bh(lock: &bond->ipsec_lock); |
490 | if (!list_empty(head: &bond->ipsec_list)) |
491 | slave_warn(bond_dev, slave->dev, |
492 | "%s: no slave xdo_dev_state_add\n" , |
493 | __func__); |
494 | spin_unlock_bh(lock: &bond->ipsec_lock); |
495 | goto out; |
496 | } |
497 | |
498 | spin_lock_bh(lock: &bond->ipsec_lock); |
499 | list_for_each_entry(ipsec, &bond->ipsec_list, list) { |
500 | ipsec->xs->xso.real_dev = slave->dev; |
501 | if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { |
502 | slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n" , __func__); |
503 | ipsec->xs->xso.real_dev = NULL; |
504 | } |
505 | } |
506 | spin_unlock_bh(lock: &bond->ipsec_lock); |
507 | out: |
508 | rcu_read_unlock(); |
509 | } |
510 | |
511 | /** |
512 | * bond_ipsec_del_sa - clear out this specific SA |
513 | * @xs: pointer to transformer state struct |
514 | **/ |
515 | static void bond_ipsec_del_sa(struct xfrm_state *xs) |
516 | { |
517 | struct net_device *bond_dev = xs->xso.dev; |
518 | struct bond_ipsec *ipsec; |
519 | struct bonding *bond; |
520 | struct slave *slave; |
521 | |
522 | if (!bond_dev) |
523 | return; |
524 | |
525 | rcu_read_lock(); |
526 | bond = netdev_priv(dev: bond_dev); |
527 | slave = rcu_dereference(bond->curr_active_slave); |
528 | |
529 | if (!slave) |
530 | goto out; |
531 | |
532 | if (!xs->xso.real_dev) |
533 | goto out; |
534 | |
535 | WARN_ON(xs->xso.real_dev != slave->dev); |
536 | |
537 | if (!slave->dev->xfrmdev_ops || |
538 | !slave->dev->xfrmdev_ops->xdo_dev_state_delete || |
539 | netif_is_bond_master(dev: slave->dev)) { |
540 | slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n" , __func__); |
541 | goto out; |
542 | } |
543 | |
544 | slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); |
545 | out: |
546 | spin_lock_bh(lock: &bond->ipsec_lock); |
547 | list_for_each_entry(ipsec, &bond->ipsec_list, list) { |
548 | if (ipsec->xs == xs) { |
549 | list_del(entry: &ipsec->list); |
550 | kfree(objp: ipsec); |
551 | break; |
552 | } |
553 | } |
554 | spin_unlock_bh(lock: &bond->ipsec_lock); |
555 | rcu_read_unlock(); |
556 | } |
557 | |
558 | static void bond_ipsec_del_sa_all(struct bonding *bond) |
559 | { |
560 | struct net_device *bond_dev = bond->dev; |
561 | struct bond_ipsec *ipsec; |
562 | struct slave *slave; |
563 | |
564 | rcu_read_lock(); |
565 | slave = rcu_dereference(bond->curr_active_slave); |
566 | if (!slave) { |
567 | rcu_read_unlock(); |
568 | return; |
569 | } |
570 | |
571 | spin_lock_bh(lock: &bond->ipsec_lock); |
572 | list_for_each_entry(ipsec, &bond->ipsec_list, list) { |
573 | if (!ipsec->xs->xso.real_dev) |
574 | continue; |
575 | |
576 | if (!slave->dev->xfrmdev_ops || |
577 | !slave->dev->xfrmdev_ops->xdo_dev_state_delete || |
578 | netif_is_bond_master(dev: slave->dev)) { |
579 | slave_warn(bond_dev, slave->dev, |
580 | "%s: no slave xdo_dev_state_delete\n" , |
581 | __func__); |
582 | } else { |
583 | slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); |
584 | } |
585 | ipsec->xs->xso.real_dev = NULL; |
586 | } |
587 | spin_unlock_bh(lock: &bond->ipsec_lock); |
588 | rcu_read_unlock(); |
589 | } |
590 | |
591 | /** |
592 | * bond_ipsec_offload_ok - can this packet use the xfrm hw offload |
593 | * @skb: current data packet |
594 | * @xs: pointer to transformer state struct |
595 | **/ |
596 | static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) |
597 | { |
598 | struct net_device *bond_dev = xs->xso.dev; |
599 | struct net_device *real_dev; |
600 | struct slave *curr_active; |
601 | struct bonding *bond; |
602 | int err; |
603 | |
604 | bond = netdev_priv(dev: bond_dev); |
605 | rcu_read_lock(); |
606 | curr_active = rcu_dereference(bond->curr_active_slave); |
607 | real_dev = curr_active->dev; |
608 | |
609 | if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
610 | err = false; |
611 | goto out; |
612 | } |
613 | |
614 | if (!xs->xso.real_dev) { |
615 | err = false; |
616 | goto out; |
617 | } |
618 | |
619 | if (!real_dev->xfrmdev_ops || |
620 | !real_dev->xfrmdev_ops->xdo_dev_offload_ok || |
621 | netif_is_bond_master(dev: real_dev)) { |
622 | err = false; |
623 | goto out; |
624 | } |
625 | |
626 | err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); |
627 | out: |
628 | rcu_read_unlock(); |
629 | return err; |
630 | } |
631 | |
632 | static const struct xfrmdev_ops bond_xfrmdev_ops = { |
633 | .xdo_dev_state_add = bond_ipsec_add_sa, |
634 | .xdo_dev_state_delete = bond_ipsec_del_sa, |
635 | .xdo_dev_offload_ok = bond_ipsec_offload_ok, |
636 | }; |
637 | #endif /* CONFIG_XFRM_OFFLOAD */ |
638 | |
639 | /*------------------------------- Link status -------------------------------*/ |
640 | |
641 | /* Set the carrier state for the master according to the state of its |
642 | * slaves. If any slaves are up, the master is up. In 802.3ad mode, |
643 | * do special 802.3ad magic. |
644 | * |
645 | * Returns zero if carrier state does not change, nonzero if it does. |
646 | */ |
647 | int bond_set_carrier(struct bonding *bond) |
648 | { |
649 | struct list_head *iter; |
650 | struct slave *slave; |
651 | |
652 | if (!bond_has_slaves(bond)) |
653 | goto down; |
654 | |
655 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
656 | return bond_3ad_set_carrier(bond); |
657 | |
658 | bond_for_each_slave(bond, slave, iter) { |
659 | if (slave->link == BOND_LINK_UP) { |
660 | if (!netif_carrier_ok(dev: bond->dev)) { |
661 | netif_carrier_on(dev: bond->dev); |
662 | return 1; |
663 | } |
664 | return 0; |
665 | } |
666 | } |
667 | |
668 | down: |
669 | if (netif_carrier_ok(dev: bond->dev)) { |
670 | netif_carrier_off(dev: bond->dev); |
671 | return 1; |
672 | } |
673 | return 0; |
674 | } |
675 | |
676 | /* Get link speed and duplex from the slave's base driver |
677 | * using ethtool. If for some reason the call fails or the |
678 | * values are invalid, set speed and duplex to -1, |
679 | * and return. Return 1 if speed or duplex settings are |
680 | * UNKNOWN; 0 otherwise. |
681 | */ |
682 | static int bond_update_speed_duplex(struct slave *slave) |
683 | { |
684 | struct net_device *slave_dev = slave->dev; |
685 | struct ethtool_link_ksettings ecmd; |
686 | int res; |
687 | |
688 | slave->speed = SPEED_UNKNOWN; |
689 | slave->duplex = DUPLEX_UNKNOWN; |
690 | |
691 | res = __ethtool_get_link_ksettings(dev: slave_dev, link_ksettings: &ecmd); |
692 | if (res < 0) |
693 | return 1; |
694 | if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) |
695 | return 1; |
696 | switch (ecmd.base.duplex) { |
697 | case DUPLEX_FULL: |
698 | case DUPLEX_HALF: |
699 | break; |
700 | default: |
701 | return 1; |
702 | } |
703 | |
704 | slave->speed = ecmd.base.speed; |
705 | slave->duplex = ecmd.base.duplex; |
706 | |
707 | return 0; |
708 | } |
709 | |
710 | const char *bond_slave_link_status(s8 link) |
711 | { |
712 | switch (link) { |
713 | case BOND_LINK_UP: |
714 | return "up" ; |
715 | case BOND_LINK_FAIL: |
716 | return "going down" ; |
717 | case BOND_LINK_DOWN: |
718 | return "down" ; |
719 | case BOND_LINK_BACK: |
720 | return "going back" ; |
721 | default: |
722 | return "unknown" ; |
723 | } |
724 | } |
725 | |
726 | /* if <dev> supports MII link status reporting, check its link status. |
727 | * |
728 | * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), |
729 | * depending upon the setting of the use_carrier parameter. |
730 | * |
731 | * Return either BMSR_LSTATUS, meaning that the link is up (or we |
732 | * can't tell and just pretend it is), or 0, meaning that the link is |
733 | * down. |
734 | * |
735 | * If reporting is non-zero, instead of faking link up, return -1 if |
736 | * both ETHTOOL and MII ioctls fail (meaning the device does not |
737 | * support them). If use_carrier is set, return whatever it says. |
738 | * It'd be nice if there was a good way to tell if a driver supports |
739 | * netif_carrier, but there really isn't. |
740 | */ |
741 | static int bond_check_dev_link(struct bonding *bond, |
742 | struct net_device *slave_dev, int reporting) |
743 | { |
744 | const struct net_device_ops *slave_ops = slave_dev->netdev_ops; |
745 | int (*ioctl)(struct net_device *, struct ifreq *, int); |
746 | struct ifreq ifr; |
747 | struct mii_ioctl_data *mii; |
748 | |
749 | if (!reporting && !netif_running(dev: slave_dev)) |
750 | return 0; |
751 | |
752 | if (bond->params.use_carrier) |
753 | return netif_carrier_ok(dev: slave_dev) ? BMSR_LSTATUS : 0; |
754 | |
755 | /* Try to get link status using Ethtool first. */ |
756 | if (slave_dev->ethtool_ops->get_link) |
757 | return slave_dev->ethtool_ops->get_link(slave_dev) ? |
758 | BMSR_LSTATUS : 0; |
759 | |
760 | /* Ethtool can't be used, fallback to MII ioctls. */ |
761 | ioctl = slave_ops->ndo_eth_ioctl; |
762 | if (ioctl) { |
763 | /* TODO: set pointer to correct ioctl on a per team member |
764 | * bases to make this more efficient. that is, once |
765 | * we determine the correct ioctl, we will always |
766 | * call it and not the others for that team |
767 | * member. |
768 | */ |
769 | |
770 | /* We cannot assume that SIOCGMIIPHY will also read a |
771 | * register; not all network drivers (e.g., e100) |
772 | * support that. |
773 | */ |
774 | |
775 | /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ |
776 | strscpy_pad(dest: ifr.ifr_name, src: slave_dev->name, IFNAMSIZ); |
777 | mii = if_mii(rq: &ifr); |
778 | if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { |
779 | mii->reg_num = MII_BMSR; |
780 | if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) |
781 | return mii->val_out & BMSR_LSTATUS; |
782 | } |
783 | } |
784 | |
785 | /* If reporting, report that either there's no ndo_eth_ioctl, |
786 | * or both SIOCGMIIREG and get_link failed (meaning that we |
787 | * cannot report link status). If not reporting, pretend |
788 | * we're ok. |
789 | */ |
790 | return reporting ? -1 : BMSR_LSTATUS; |
791 | } |
792 | |
793 | /*----------------------------- Multicast list ------------------------------*/ |
794 | |
795 | /* Push the promiscuity flag down to appropriate slaves */ |
796 | static int bond_set_promiscuity(struct bonding *bond, int inc) |
797 | { |
798 | struct list_head *iter; |
799 | int err = 0; |
800 | |
801 | if (bond_uses_primary(bond)) { |
802 | struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); |
803 | |
804 | if (curr_active) |
805 | err = dev_set_promiscuity(dev: curr_active->dev, inc); |
806 | } else { |
807 | struct slave *slave; |
808 | |
809 | bond_for_each_slave(bond, slave, iter) { |
810 | err = dev_set_promiscuity(dev: slave->dev, inc); |
811 | if (err) |
812 | return err; |
813 | } |
814 | } |
815 | return err; |
816 | } |
817 | |
818 | /* Push the allmulti flag down to all slaves */ |
819 | static int bond_set_allmulti(struct bonding *bond, int inc) |
820 | { |
821 | struct list_head *iter; |
822 | int err = 0; |
823 | |
824 | if (bond_uses_primary(bond)) { |
825 | struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); |
826 | |
827 | if (curr_active) |
828 | err = dev_set_allmulti(dev: curr_active->dev, inc); |
829 | } else { |
830 | struct slave *slave; |
831 | |
832 | bond_for_each_slave(bond, slave, iter) { |
833 | err = dev_set_allmulti(dev: slave->dev, inc); |
834 | if (err) |
835 | return err; |
836 | } |
837 | } |
838 | return err; |
839 | } |
840 | |
841 | /* Retrieve the list of registered multicast addresses for the bonding |
842 | * device and retransmit an IGMP JOIN request to the current active |
843 | * slave. |
844 | */ |
845 | static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) |
846 | { |
847 | struct bonding *bond = container_of(work, struct bonding, |
848 | mcast_work.work); |
849 | |
850 | if (!rtnl_trylock()) { |
851 | queue_delayed_work(wq: bond->wq, dwork: &bond->mcast_work, delay: 1); |
852 | return; |
853 | } |
854 | call_netdevice_notifiers(val: NETDEV_RESEND_IGMP, dev: bond->dev); |
855 | |
856 | if (bond->igmp_retrans > 1) { |
857 | bond->igmp_retrans--; |
858 | queue_delayed_work(wq: bond->wq, dwork: &bond->mcast_work, HZ/5); |
859 | } |
860 | rtnl_unlock(); |
861 | } |
862 | |
863 | /* Flush bond's hardware addresses from slave */ |
864 | static void bond_hw_addr_flush(struct net_device *bond_dev, |
865 | struct net_device *slave_dev) |
866 | { |
867 | struct bonding *bond = netdev_priv(dev: bond_dev); |
868 | |
869 | dev_uc_unsync(to: slave_dev, from: bond_dev); |
870 | dev_mc_unsync(to: slave_dev, from: bond_dev); |
871 | |
872 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
873 | dev_mc_del(dev: slave_dev, addr: lacpdu_mcast_addr); |
874 | } |
875 | |
876 | /*--------------------------- Active slave change ---------------------------*/ |
877 | |
878 | /* Update the hardware address list and promisc/allmulti for the new and |
879 | * old active slaves (if any). Modes that are not using primary keep all |
880 | * slaves up date at all times; only the modes that use primary need to call |
881 | * this function to swap these settings during a failover. |
882 | */ |
883 | static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, |
884 | struct slave *old_active) |
885 | { |
886 | if (old_active) { |
887 | if (bond->dev->flags & IFF_PROMISC) |
888 | dev_set_promiscuity(dev: old_active->dev, inc: -1); |
889 | |
890 | if (bond->dev->flags & IFF_ALLMULTI) |
891 | dev_set_allmulti(dev: old_active->dev, inc: -1); |
892 | |
893 | if (bond->dev->flags & IFF_UP) |
894 | bond_hw_addr_flush(bond_dev: bond->dev, slave_dev: old_active->dev); |
895 | } |
896 | |
897 | if (new_active) { |
898 | /* FIXME: Signal errors upstream. */ |
899 | if (bond->dev->flags & IFF_PROMISC) |
900 | dev_set_promiscuity(dev: new_active->dev, inc: 1); |
901 | |
902 | if (bond->dev->flags & IFF_ALLMULTI) |
903 | dev_set_allmulti(dev: new_active->dev, inc: 1); |
904 | |
905 | if (bond->dev->flags & IFF_UP) { |
906 | netif_addr_lock_bh(dev: bond->dev); |
907 | dev_uc_sync(to: new_active->dev, from: bond->dev); |
908 | dev_mc_sync(to: new_active->dev, from: bond->dev); |
909 | netif_addr_unlock_bh(dev: bond->dev); |
910 | } |
911 | } |
912 | } |
913 | |
914 | /** |
915 | * bond_set_dev_addr - clone slave's address to bond |
916 | * @bond_dev: bond net device |
917 | * @slave_dev: slave net device |
918 | * |
919 | * Should be called with RTNL held. |
920 | */ |
921 | static int bond_set_dev_addr(struct net_device *bond_dev, |
922 | struct net_device *slave_dev) |
923 | { |
924 | int err; |
925 | |
926 | slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n" , |
927 | bond_dev, slave_dev, slave_dev->addr_len); |
928 | err = dev_pre_changeaddr_notify(dev: bond_dev, addr: slave_dev->dev_addr, NULL); |
929 | if (err) |
930 | return err; |
931 | |
932 | __dev_addr_set(dev: bond_dev, addr: slave_dev->dev_addr, len: slave_dev->addr_len); |
933 | bond_dev->addr_assign_type = NET_ADDR_STOLEN; |
934 | call_netdevice_notifiers(val: NETDEV_CHANGEADDR, dev: bond_dev); |
935 | return 0; |
936 | } |
937 | |
938 | static struct slave *bond_get_old_active(struct bonding *bond, |
939 | struct slave *new_active) |
940 | { |
941 | struct slave *slave; |
942 | struct list_head *iter; |
943 | |
944 | bond_for_each_slave(bond, slave, iter) { |
945 | if (slave == new_active) |
946 | continue; |
947 | |
948 | if (ether_addr_equal(addr1: bond->dev->dev_addr, addr2: slave->dev->dev_addr)) |
949 | return slave; |
950 | } |
951 | |
952 | return NULL; |
953 | } |
954 | |
955 | /* bond_do_fail_over_mac |
956 | * |
957 | * Perform special MAC address swapping for fail_over_mac settings |
958 | * |
959 | * Called with RTNL |
960 | */ |
961 | static void bond_do_fail_over_mac(struct bonding *bond, |
962 | struct slave *new_active, |
963 | struct slave *old_active) |
964 | { |
965 | u8 tmp_mac[MAX_ADDR_LEN]; |
966 | struct sockaddr_storage ss; |
967 | int rv; |
968 | |
969 | switch (bond->params.fail_over_mac) { |
970 | case BOND_FOM_ACTIVE: |
971 | if (new_active) { |
972 | rv = bond_set_dev_addr(bond_dev: bond->dev, slave_dev: new_active->dev); |
973 | if (rv) |
974 | slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n" , |
975 | -rv); |
976 | } |
977 | break; |
978 | case BOND_FOM_FOLLOW: |
979 | /* if new_active && old_active, swap them |
980 | * if just old_active, do nothing (going to no active slave) |
981 | * if just new_active, set new_active to bond's MAC |
982 | */ |
983 | if (!new_active) |
984 | return; |
985 | |
986 | if (!old_active) |
987 | old_active = bond_get_old_active(bond, new_active); |
988 | |
989 | if (old_active) { |
990 | bond_hw_addr_copy(dst: tmp_mac, src: new_active->dev->dev_addr, |
991 | len: new_active->dev->addr_len); |
992 | bond_hw_addr_copy(dst: ss.__data, |
993 | src: old_active->dev->dev_addr, |
994 | len: old_active->dev->addr_len); |
995 | ss.ss_family = new_active->dev->type; |
996 | } else { |
997 | bond_hw_addr_copy(dst: ss.__data, src: bond->dev->dev_addr, |
998 | len: bond->dev->addr_len); |
999 | ss.ss_family = bond->dev->type; |
1000 | } |
1001 | |
1002 | rv = dev_set_mac_address(dev: new_active->dev, |
1003 | sa: (struct sockaddr *)&ss, NULL); |
1004 | if (rv) { |
1005 | slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n" , |
1006 | -rv); |
1007 | goto out; |
1008 | } |
1009 | |
1010 | if (!old_active) |
1011 | goto out; |
1012 | |
1013 | bond_hw_addr_copy(dst: ss.__data, src: tmp_mac, |
1014 | len: new_active->dev->addr_len); |
1015 | ss.ss_family = old_active->dev->type; |
1016 | |
1017 | rv = dev_set_mac_address(dev: old_active->dev, |
1018 | sa: (struct sockaddr *)&ss, NULL); |
1019 | if (rv) |
1020 | slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n" , |
1021 | -rv); |
1022 | out: |
1023 | break; |
1024 | default: |
1025 | netdev_err(dev: bond->dev, format: "bond_do_fail_over_mac impossible: bad policy %d\n" , |
1026 | bond->params.fail_over_mac); |
1027 | break; |
1028 | } |
1029 | |
1030 | } |
1031 | |
1032 | /** |
1033 | * bond_choose_primary_or_current - select the primary or high priority slave |
1034 | * @bond: our bonding struct |
1035 | * |
1036 | * - Check if there is a primary link. If the primary link was set and is up, |
1037 | * go on and do link reselection. |
1038 | * |
1039 | * - If primary link is not set or down, find the highest priority link. |
1040 | * If the highest priority link is not current slave, set it as primary |
1041 | * link and do link reselection. |
1042 | */ |
1043 | static struct slave *bond_choose_primary_or_current(struct bonding *bond) |
1044 | { |
1045 | struct slave *prim = rtnl_dereference(bond->primary_slave); |
1046 | struct slave *curr = rtnl_dereference(bond->curr_active_slave); |
1047 | struct slave *slave, *hprio = NULL; |
1048 | struct list_head *iter; |
1049 | |
1050 | if (!prim || prim->link != BOND_LINK_UP) { |
1051 | bond_for_each_slave(bond, slave, iter) { |
1052 | if (slave->link == BOND_LINK_UP) { |
1053 | hprio = hprio ?: slave; |
1054 | if (slave->prio > hprio->prio) |
1055 | hprio = slave; |
1056 | } |
1057 | } |
1058 | |
1059 | if (hprio && hprio != curr) { |
1060 | prim = hprio; |
1061 | goto link_reselect; |
1062 | } |
1063 | |
1064 | if (!curr || curr->link != BOND_LINK_UP) |
1065 | return NULL; |
1066 | return curr; |
1067 | } |
1068 | |
1069 | if (bond->force_primary) { |
1070 | bond->force_primary = false; |
1071 | return prim; |
1072 | } |
1073 | |
1074 | link_reselect: |
1075 | if (!curr || curr->link != BOND_LINK_UP) |
1076 | return prim; |
1077 | |
1078 | /* At this point, prim and curr are both up */ |
1079 | switch (bond->params.primary_reselect) { |
1080 | case BOND_PRI_RESELECT_ALWAYS: |
1081 | return prim; |
1082 | case BOND_PRI_RESELECT_BETTER: |
1083 | if (prim->speed < curr->speed) |
1084 | return curr; |
1085 | if (prim->speed == curr->speed && prim->duplex <= curr->duplex) |
1086 | return curr; |
1087 | return prim; |
1088 | case BOND_PRI_RESELECT_FAILURE: |
1089 | return curr; |
1090 | default: |
1091 | netdev_err(dev: bond->dev, format: "impossible primary_reselect %d\n" , |
1092 | bond->params.primary_reselect); |
1093 | return curr; |
1094 | } |
1095 | } |
1096 | |
1097 | /** |
1098 | * bond_find_best_slave - select the best available slave to be the active one |
1099 | * @bond: our bonding struct |
1100 | */ |
1101 | static struct slave *bond_find_best_slave(struct bonding *bond) |
1102 | { |
1103 | struct slave *slave, *bestslave = NULL; |
1104 | struct list_head *iter; |
1105 | int mintime = bond->params.updelay; |
1106 | |
1107 | slave = bond_choose_primary_or_current(bond); |
1108 | if (slave) |
1109 | return slave; |
1110 | |
1111 | bond_for_each_slave(bond, slave, iter) { |
1112 | if (slave->link == BOND_LINK_UP) |
1113 | return slave; |
1114 | if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) && |
1115 | slave->delay < mintime) { |
1116 | mintime = slave->delay; |
1117 | bestslave = slave; |
1118 | } |
1119 | } |
1120 | |
1121 | return bestslave; |
1122 | } |
1123 | |
1124 | static bool bond_should_notify_peers(struct bonding *bond) |
1125 | { |
1126 | struct slave *slave; |
1127 | |
1128 | rcu_read_lock(); |
1129 | slave = rcu_dereference(bond->curr_active_slave); |
1130 | rcu_read_unlock(); |
1131 | |
1132 | if (!slave || !bond->send_peer_notif || |
1133 | bond->send_peer_notif % |
1134 | max(1, bond->params.peer_notif_delay) != 0 || |
1135 | !netif_carrier_ok(dev: bond->dev) || |
1136 | test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) |
1137 | return false; |
1138 | |
1139 | netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n" , |
1140 | slave ? slave->dev->name : "NULL" ); |
1141 | |
1142 | return true; |
1143 | } |
1144 | |
1145 | /** |
1146 | * bond_change_active_slave - change the active slave into the specified one |
1147 | * @bond: our bonding struct |
1148 | * @new_active: the new slave to make the active one |
1149 | * |
1150 | * Set the new slave to the bond's settings and unset them on the old |
1151 | * curr_active_slave. |
1152 | * Setting include flags, mc-list, promiscuity, allmulti, etc. |
1153 | * |
1154 | * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP, |
1155 | * because it is apparently the best available slave we have, even though its |
1156 | * updelay hasn't timed out yet. |
1157 | * |
1158 | * Caller must hold RTNL. |
1159 | */ |
1160 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active) |
1161 | { |
1162 | struct slave *old_active; |
1163 | |
1164 | ASSERT_RTNL(); |
1165 | |
1166 | old_active = rtnl_dereference(bond->curr_active_slave); |
1167 | |
1168 | if (old_active == new_active) |
1169 | return; |
1170 | |
1171 | #ifdef CONFIG_XFRM_OFFLOAD |
1172 | bond_ipsec_del_sa_all(bond); |
1173 | #endif /* CONFIG_XFRM_OFFLOAD */ |
1174 | |
1175 | if (new_active) { |
1176 | new_active->last_link_up = jiffies; |
1177 | |
1178 | if (new_active->link == BOND_LINK_BACK) { |
1179 | if (bond_uses_primary(bond)) { |
1180 | slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n" , |
1181 | (bond->params.updelay - new_active->delay) * bond->params.miimon); |
1182 | } |
1183 | |
1184 | new_active->delay = 0; |
1185 | bond_set_slave_link_state(slave: new_active, BOND_LINK_UP, |
1186 | BOND_SLAVE_NOTIFY_NOW); |
1187 | |
1188 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
1189 | bond_3ad_handle_link_change(slave: new_active, BOND_LINK_UP); |
1190 | |
1191 | if (bond_is_lb(bond)) |
1192 | bond_alb_handle_link_change(bond, slave: new_active, BOND_LINK_UP); |
1193 | } else { |
1194 | if (bond_uses_primary(bond)) |
1195 | slave_info(bond->dev, new_active->dev, "making interface the new active one\n" ); |
1196 | } |
1197 | } |
1198 | |
1199 | if (bond_uses_primary(bond)) |
1200 | bond_hw_addr_swap(bond, new_active, old_active); |
1201 | |
1202 | if (bond_is_lb(bond)) { |
1203 | bond_alb_handle_active_change(bond, new_slave: new_active); |
1204 | if (old_active) |
1205 | bond_set_slave_inactive_flags(slave: old_active, |
1206 | BOND_SLAVE_NOTIFY_NOW); |
1207 | if (new_active) |
1208 | bond_set_slave_active_flags(slave: new_active, |
1209 | BOND_SLAVE_NOTIFY_NOW); |
1210 | } else { |
1211 | rcu_assign_pointer(bond->curr_active_slave, new_active); |
1212 | } |
1213 | |
1214 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { |
1215 | if (old_active) |
1216 | bond_set_slave_inactive_flags(slave: old_active, |
1217 | BOND_SLAVE_NOTIFY_NOW); |
1218 | |
1219 | if (new_active) { |
1220 | bool should_notify_peers = false; |
1221 | |
1222 | bond_set_slave_active_flags(slave: new_active, |
1223 | BOND_SLAVE_NOTIFY_NOW); |
1224 | |
1225 | if (bond->params.fail_over_mac) |
1226 | bond_do_fail_over_mac(bond, new_active, |
1227 | old_active); |
1228 | |
1229 | if (netif_running(dev: bond->dev)) { |
1230 | bond->send_peer_notif = |
1231 | bond->params.num_peer_notif * |
1232 | max(1, bond->params.peer_notif_delay); |
1233 | should_notify_peers = |
1234 | bond_should_notify_peers(bond); |
1235 | } |
1236 | |
1237 | call_netdevice_notifiers(val: NETDEV_BONDING_FAILOVER, dev: bond->dev); |
1238 | if (should_notify_peers) { |
1239 | bond->send_peer_notif--; |
1240 | call_netdevice_notifiers(val: NETDEV_NOTIFY_PEERS, |
1241 | dev: bond->dev); |
1242 | } |
1243 | } |
1244 | } |
1245 | |
1246 | #ifdef CONFIG_XFRM_OFFLOAD |
1247 | bond_ipsec_add_sa_all(bond); |
1248 | #endif /* CONFIG_XFRM_OFFLOAD */ |
1249 | |
1250 | /* resend IGMP joins since active slave has changed or |
1251 | * all were sent on curr_active_slave. |
1252 | * resend only if bond is brought up with the affected |
1253 | * bonding modes and the retransmission is enabled |
1254 | */ |
1255 | if (netif_running(dev: bond->dev) && (bond->params.resend_igmp > 0) && |
1256 | ((bond_uses_primary(bond) && new_active) || |
1257 | BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { |
1258 | bond->igmp_retrans = bond->params.resend_igmp; |
1259 | queue_delayed_work(wq: bond->wq, dwork: &bond->mcast_work, delay: 1); |
1260 | } |
1261 | } |
1262 | |
1263 | /** |
1264 | * bond_select_active_slave - select a new active slave, if needed |
1265 | * @bond: our bonding struct |
1266 | * |
1267 | * This functions should be called when one of the following occurs: |
1268 | * - The old curr_active_slave has been released or lost its link. |
1269 | * - The primary_slave has got its link back. |
1270 | * - A slave has got its link back and there's no old curr_active_slave. |
1271 | * |
1272 | * Caller must hold RTNL. |
1273 | */ |
1274 | void bond_select_active_slave(struct bonding *bond) |
1275 | { |
1276 | struct slave *best_slave; |
1277 | int rv; |
1278 | |
1279 | ASSERT_RTNL(); |
1280 | |
1281 | best_slave = bond_find_best_slave(bond); |
1282 | if (best_slave != rtnl_dereference(bond->curr_active_slave)) { |
1283 | bond_change_active_slave(bond, new_active: best_slave); |
1284 | rv = bond_set_carrier(bond); |
1285 | if (!rv) |
1286 | return; |
1287 | |
1288 | if (netif_carrier_ok(dev: bond->dev)) |
1289 | netdev_info(dev: bond->dev, format: "active interface up!\n" ); |
1290 | else |
1291 | netdev_info(dev: bond->dev, format: "now running without any active interface!\n" ); |
1292 | } |
1293 | } |
1294 | |
1295 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1296 | static inline int slave_enable_netpoll(struct slave *slave) |
1297 | { |
1298 | struct netpoll *np; |
1299 | int err = 0; |
1300 | |
1301 | np = kzalloc(size: sizeof(*np), GFP_KERNEL); |
1302 | err = -ENOMEM; |
1303 | if (!np) |
1304 | goto out; |
1305 | |
1306 | err = __netpoll_setup(np, ndev: slave->dev); |
1307 | if (err) { |
1308 | kfree(objp: np); |
1309 | goto out; |
1310 | } |
1311 | slave->np = np; |
1312 | out: |
1313 | return err; |
1314 | } |
1315 | static inline void slave_disable_netpoll(struct slave *slave) |
1316 | { |
1317 | struct netpoll *np = slave->np; |
1318 | |
1319 | if (!np) |
1320 | return; |
1321 | |
1322 | slave->np = NULL; |
1323 | |
1324 | __netpoll_free(np); |
1325 | } |
1326 | |
1327 | static void bond_poll_controller(struct net_device *bond_dev) |
1328 | { |
1329 | struct bonding *bond = netdev_priv(dev: bond_dev); |
1330 | struct slave *slave = NULL; |
1331 | struct list_head *iter; |
1332 | struct ad_info ad_info; |
1333 | |
1334 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
1335 | if (bond_3ad_get_active_agg_info(bond, ad_info: &ad_info)) |
1336 | return; |
1337 | |
1338 | bond_for_each_slave_rcu(bond, slave, iter) { |
1339 | if (!bond_slave_is_up(slave)) |
1340 | continue; |
1341 | |
1342 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
1343 | struct aggregator *agg = |
1344 | SLAVE_AD_INFO(slave)->port.aggregator; |
1345 | |
1346 | if (agg && |
1347 | agg->aggregator_identifier != ad_info.aggregator_id) |
1348 | continue; |
1349 | } |
1350 | |
1351 | netpoll_poll_dev(dev: slave->dev); |
1352 | } |
1353 | } |
1354 | |
1355 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1356 | { |
1357 | struct bonding *bond = netdev_priv(dev: bond_dev); |
1358 | struct list_head *iter; |
1359 | struct slave *slave; |
1360 | |
1361 | bond_for_each_slave(bond, slave, iter) |
1362 | if (bond_slave_is_up(slave)) |
1363 | slave_disable_netpoll(slave); |
1364 | } |
1365 | |
1366 | static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) |
1367 | { |
1368 | struct bonding *bond = netdev_priv(dev); |
1369 | struct list_head *iter; |
1370 | struct slave *slave; |
1371 | int err = 0; |
1372 | |
1373 | bond_for_each_slave(bond, slave, iter) { |
1374 | err = slave_enable_netpoll(slave); |
1375 | if (err) { |
1376 | bond_netpoll_cleanup(bond_dev: dev); |
1377 | break; |
1378 | } |
1379 | } |
1380 | return err; |
1381 | } |
1382 | #else |
1383 | static inline int slave_enable_netpoll(struct slave *slave) |
1384 | { |
1385 | return 0; |
1386 | } |
1387 | static inline void slave_disable_netpoll(struct slave *slave) |
1388 | { |
1389 | } |
1390 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1391 | { |
1392 | } |
1393 | #endif |
1394 | |
1395 | /*---------------------------------- IOCTL ----------------------------------*/ |
1396 | |
1397 | static netdev_features_t bond_fix_features(struct net_device *dev, |
1398 | netdev_features_t features) |
1399 | { |
1400 | struct bonding *bond = netdev_priv(dev); |
1401 | struct list_head *iter; |
1402 | netdev_features_t mask; |
1403 | struct slave *slave; |
1404 | |
1405 | mask = features; |
1406 | |
1407 | features &= ~NETIF_F_ONE_FOR_ALL; |
1408 | features |= NETIF_F_ALL_FOR_ALL; |
1409 | |
1410 | bond_for_each_slave(bond, slave, iter) { |
1411 | features = netdev_increment_features(all: features, |
1412 | one: slave->dev->features, |
1413 | mask); |
1414 | } |
1415 | features = netdev_add_tso_features(features, mask); |
1416 | |
1417 | return features; |
1418 | } |
1419 | |
1420 | #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
1421 | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ |
1422 | NETIF_F_HIGHDMA | NETIF_F_LRO) |
1423 | |
1424 | #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
1425 | NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) |
1426 | |
1427 | #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
1428 | NETIF_F_GSO_SOFTWARE) |
1429 | |
1430 | |
1431 | static void bond_compute_features(struct bonding *bond) |
1432 | { |
1433 | unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | |
1434 | IFF_XMIT_DST_RELEASE_PERM; |
1435 | netdev_features_t vlan_features = BOND_VLAN_FEATURES; |
1436 | netdev_features_t enc_features = BOND_ENC_FEATURES; |
1437 | #ifdef CONFIG_XFRM_OFFLOAD |
1438 | netdev_features_t xfrm_features = BOND_XFRM_FEATURES; |
1439 | #endif /* CONFIG_XFRM_OFFLOAD */ |
1440 | netdev_features_t mpls_features = BOND_MPLS_FEATURES; |
1441 | struct net_device *bond_dev = bond->dev; |
1442 | struct list_head *iter; |
1443 | struct slave *slave; |
1444 | unsigned short = ETH_HLEN; |
1445 | unsigned int tso_max_size = TSO_MAX_SIZE; |
1446 | u16 tso_max_segs = TSO_MAX_SEGS; |
1447 | |
1448 | if (!bond_has_slaves(bond)) |
1449 | goto done; |
1450 | vlan_features &= NETIF_F_ALL_FOR_ALL; |
1451 | mpls_features &= NETIF_F_ALL_FOR_ALL; |
1452 | |
1453 | bond_for_each_slave(bond, slave, iter) { |
1454 | vlan_features = netdev_increment_features(all: vlan_features, |
1455 | one: slave->dev->vlan_features, BOND_VLAN_FEATURES); |
1456 | |
1457 | enc_features = netdev_increment_features(all: enc_features, |
1458 | one: slave->dev->hw_enc_features, |
1459 | BOND_ENC_FEATURES); |
1460 | |
1461 | #ifdef CONFIG_XFRM_OFFLOAD |
1462 | xfrm_features = netdev_increment_features(all: xfrm_features, |
1463 | one: slave->dev->hw_enc_features, |
1464 | BOND_XFRM_FEATURES); |
1465 | #endif /* CONFIG_XFRM_OFFLOAD */ |
1466 | |
1467 | mpls_features = netdev_increment_features(all: mpls_features, |
1468 | one: slave->dev->mpls_features, |
1469 | BOND_MPLS_FEATURES); |
1470 | |
1471 | dst_release_flag &= slave->dev->priv_flags; |
1472 | if (slave->dev->hard_header_len > max_hard_header_len) |
1473 | max_hard_header_len = slave->dev->hard_header_len; |
1474 | |
1475 | tso_max_size = min(tso_max_size, slave->dev->tso_max_size); |
1476 | tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs); |
1477 | } |
1478 | bond_dev->hard_header_len = max_hard_header_len; |
1479 | |
1480 | done: |
1481 | bond_dev->vlan_features = vlan_features; |
1482 | bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | |
1483 | NETIF_F_HW_VLAN_CTAG_TX | |
1484 | NETIF_F_HW_VLAN_STAG_TX; |
1485 | #ifdef CONFIG_XFRM_OFFLOAD |
1486 | bond_dev->hw_enc_features |= xfrm_features; |
1487 | #endif /* CONFIG_XFRM_OFFLOAD */ |
1488 | bond_dev->mpls_features = mpls_features; |
1489 | netif_set_tso_max_segs(dev: bond_dev, segs: tso_max_segs); |
1490 | netif_set_tso_max_size(dev: bond_dev, size: tso_max_size); |
1491 | |
1492 | bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
1493 | if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && |
1494 | dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) |
1495 | bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; |
1496 | |
1497 | netdev_change_features(dev: bond_dev); |
1498 | } |
1499 | |
1500 | static void bond_setup_by_slave(struct net_device *bond_dev, |
1501 | struct net_device *slave_dev) |
1502 | { |
1503 | bond_dev->header_ops = slave_dev->header_ops; |
1504 | |
1505 | bond_dev->type = slave_dev->type; |
1506 | bond_dev->hard_header_len = slave_dev->hard_header_len; |
1507 | bond_dev->needed_headroom = slave_dev->needed_headroom; |
1508 | bond_dev->addr_len = slave_dev->addr_len; |
1509 | |
1510 | memcpy(bond_dev->broadcast, slave_dev->broadcast, |
1511 | slave_dev->addr_len); |
1512 | |
1513 | if (slave_dev->flags & IFF_POINTOPOINT) { |
1514 | bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); |
1515 | bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); |
1516 | } |
1517 | } |
1518 | |
1519 | /* On bonding slaves other than the currently active slave, suppress |
1520 | * duplicates except for alb non-mcast/bcast. |
1521 | */ |
1522 | static bool bond_should_deliver_exact_match(struct sk_buff *skb, |
1523 | struct slave *slave, |
1524 | struct bonding *bond) |
1525 | { |
1526 | if (bond_is_slave_inactive(slave)) { |
1527 | if (BOND_MODE(bond) == BOND_MODE_ALB && |
1528 | skb->pkt_type != PACKET_BROADCAST && |
1529 | skb->pkt_type != PACKET_MULTICAST) |
1530 | return false; |
1531 | return true; |
1532 | } |
1533 | return false; |
1534 | } |
1535 | |
1536 | static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) |
1537 | { |
1538 | struct sk_buff *skb = *pskb; |
1539 | struct slave *slave; |
1540 | struct bonding *bond; |
1541 | int (*recv_probe)(const struct sk_buff *, struct bonding *, |
1542 | struct slave *); |
1543 | int ret = RX_HANDLER_ANOTHER; |
1544 | |
1545 | skb = skb_share_check(skb, GFP_ATOMIC); |
1546 | if (unlikely(!skb)) |
1547 | return RX_HANDLER_CONSUMED; |
1548 | |
1549 | *pskb = skb; |
1550 | |
1551 | slave = bond_slave_get_rcu(skb->dev); |
1552 | bond = slave->bond; |
1553 | |
1554 | recv_probe = READ_ONCE(bond->recv_probe); |
1555 | if (recv_probe) { |
1556 | ret = recv_probe(skb, bond, slave); |
1557 | if (ret == RX_HANDLER_CONSUMED) { |
1558 | consume_skb(skb); |
1559 | return ret; |
1560 | } |
1561 | } |
1562 | |
1563 | /* |
1564 | * For packets determined by bond_should_deliver_exact_match() call to |
1565 | * be suppressed we want to make an exception for link-local packets. |
1566 | * This is necessary for e.g. LLDP daemons to be able to monitor |
1567 | * inactive slave links without being forced to bind to them |
1568 | * explicitly. |
1569 | * |
1570 | * At the same time, packets that are passed to the bonding master |
1571 | * (including link-local ones) can have their originating interface |
1572 | * determined via PACKET_ORIGDEV socket option. |
1573 | */ |
1574 | if (bond_should_deliver_exact_match(skb, slave, bond)) { |
1575 | if (is_link_local_ether_addr(addr: eth_hdr(skb)->h_dest)) |
1576 | return RX_HANDLER_PASS; |
1577 | return RX_HANDLER_EXACT; |
1578 | } |
1579 | |
1580 | skb->dev = bond->dev; |
1581 | |
1582 | if (BOND_MODE(bond) == BOND_MODE_ALB && |
1583 | netif_is_bridge_port(dev: bond->dev) && |
1584 | skb->pkt_type == PACKET_HOST) { |
1585 | |
1586 | if (unlikely(skb_cow_head(skb, |
1587 | skb->data - skb_mac_header(skb)))) { |
1588 | kfree_skb(skb); |
1589 | return RX_HANDLER_CONSUMED; |
1590 | } |
1591 | bond_hw_addr_copy(dst: eth_hdr(skb)->h_dest, src: bond->dev->dev_addr, |
1592 | len: bond->dev->addr_len); |
1593 | } |
1594 | |
1595 | return ret; |
1596 | } |
1597 | |
1598 | static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) |
1599 | { |
1600 | switch (BOND_MODE(bond)) { |
1601 | case BOND_MODE_ROUNDROBIN: |
1602 | return NETDEV_LAG_TX_TYPE_ROUNDROBIN; |
1603 | case BOND_MODE_ACTIVEBACKUP: |
1604 | return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; |
1605 | case BOND_MODE_BROADCAST: |
1606 | return NETDEV_LAG_TX_TYPE_BROADCAST; |
1607 | case BOND_MODE_XOR: |
1608 | case BOND_MODE_8023AD: |
1609 | return NETDEV_LAG_TX_TYPE_HASH; |
1610 | default: |
1611 | return NETDEV_LAG_TX_TYPE_UNKNOWN; |
1612 | } |
1613 | } |
1614 | |
1615 | static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, |
1616 | enum netdev_lag_tx_type type) |
1617 | { |
1618 | if (type != NETDEV_LAG_TX_TYPE_HASH) |
1619 | return NETDEV_LAG_HASH_NONE; |
1620 | |
1621 | switch (bond->params.xmit_policy) { |
1622 | case BOND_XMIT_POLICY_LAYER2: |
1623 | return NETDEV_LAG_HASH_L2; |
1624 | case BOND_XMIT_POLICY_LAYER34: |
1625 | return NETDEV_LAG_HASH_L34; |
1626 | case BOND_XMIT_POLICY_LAYER23: |
1627 | return NETDEV_LAG_HASH_L23; |
1628 | case BOND_XMIT_POLICY_ENCAP23: |
1629 | return NETDEV_LAG_HASH_E23; |
1630 | case BOND_XMIT_POLICY_ENCAP34: |
1631 | return NETDEV_LAG_HASH_E34; |
1632 | case BOND_XMIT_POLICY_VLAN_SRCMAC: |
1633 | return NETDEV_LAG_HASH_VLAN_SRCMAC; |
1634 | default: |
1635 | return NETDEV_LAG_HASH_UNKNOWN; |
1636 | } |
1637 | } |
1638 | |
1639 | static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, |
1640 | struct netlink_ext_ack *extack) |
1641 | { |
1642 | struct netdev_lag_upper_info lag_upper_info; |
1643 | enum netdev_lag_tx_type type; |
1644 | int err; |
1645 | |
1646 | type = bond_lag_tx_type(bond); |
1647 | lag_upper_info.tx_type = type; |
1648 | lag_upper_info.hash_type = bond_lag_hash_type(bond, type); |
1649 | |
1650 | err = netdev_master_upper_dev_link(dev: slave->dev, upper_dev: bond->dev, upper_priv: slave, |
1651 | upper_info: &lag_upper_info, extack); |
1652 | if (err) |
1653 | return err; |
1654 | |
1655 | slave->dev->flags |= IFF_SLAVE; |
1656 | return 0; |
1657 | } |
1658 | |
1659 | static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) |
1660 | { |
1661 | netdev_upper_dev_unlink(dev: slave->dev, upper_dev: bond->dev); |
1662 | slave->dev->flags &= ~IFF_SLAVE; |
1663 | } |
1664 | |
1665 | static void slave_kobj_release(struct kobject *kobj) |
1666 | { |
1667 | struct slave *slave = to_slave(kobj); |
1668 | struct bonding *bond = bond_get_bond_by_slave(slave); |
1669 | |
1670 | cancel_delayed_work_sync(dwork: &slave->notify_work); |
1671 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
1672 | kfree(SLAVE_AD_INFO(slave)); |
1673 | |
1674 | kfree(objp: slave); |
1675 | } |
1676 | |
1677 | static struct kobj_type slave_ktype = { |
1678 | .release = slave_kobj_release, |
1679 | #ifdef CONFIG_SYSFS |
1680 | .sysfs_ops = &slave_sysfs_ops, |
1681 | #endif |
1682 | }; |
1683 | |
1684 | static int bond_kobj_init(struct slave *slave) |
1685 | { |
1686 | int err; |
1687 | |
1688 | err = kobject_init_and_add(kobj: &slave->kobj, ktype: &slave_ktype, |
1689 | parent: &(slave->dev->dev.kobj), fmt: "bonding_slave" ); |
1690 | if (err) |
1691 | kobject_put(kobj: &slave->kobj); |
1692 | |
1693 | return err; |
1694 | } |
1695 | |
1696 | static struct slave *bond_alloc_slave(struct bonding *bond, |
1697 | struct net_device *slave_dev) |
1698 | { |
1699 | struct slave *slave = NULL; |
1700 | |
1701 | slave = kzalloc(size: sizeof(*slave), GFP_KERNEL); |
1702 | if (!slave) |
1703 | return NULL; |
1704 | |
1705 | slave->bond = bond; |
1706 | slave->dev = slave_dev; |
1707 | INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); |
1708 | |
1709 | if (bond_kobj_init(slave)) |
1710 | return NULL; |
1711 | |
1712 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
1713 | SLAVE_AD_INFO(slave) = kzalloc(size: sizeof(struct ad_slave_info), |
1714 | GFP_KERNEL); |
1715 | if (!SLAVE_AD_INFO(slave)) { |
1716 | kobject_put(kobj: &slave->kobj); |
1717 | return NULL; |
1718 | } |
1719 | } |
1720 | |
1721 | return slave; |
1722 | } |
1723 | |
1724 | static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) |
1725 | { |
1726 | info->bond_mode = BOND_MODE(bond); |
1727 | info->miimon = bond->params.miimon; |
1728 | info->num_slaves = bond->slave_cnt; |
1729 | } |
1730 | |
1731 | static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) |
1732 | { |
1733 | strcpy(p: info->slave_name, q: slave->dev->name); |
1734 | info->link = slave->link; |
1735 | info->state = bond_slave_state(slave); |
1736 | info->link_failure_count = slave->link_failure_count; |
1737 | } |
1738 | |
1739 | static void bond_netdev_notify_work(struct work_struct *_work) |
1740 | { |
1741 | struct slave *slave = container_of(_work, struct slave, |
1742 | notify_work.work); |
1743 | |
1744 | if (rtnl_trylock()) { |
1745 | struct netdev_bonding_info binfo; |
1746 | |
1747 | bond_fill_ifslave(slave, info: &binfo.slave); |
1748 | bond_fill_ifbond(bond: slave->bond, info: &binfo.master); |
1749 | netdev_bonding_info_change(dev: slave->dev, bonding_info: &binfo); |
1750 | rtnl_unlock(); |
1751 | } else { |
1752 | queue_delayed_work(wq: slave->bond->wq, dwork: &slave->notify_work, delay: 1); |
1753 | } |
1754 | } |
1755 | |
1756 | void bond_queue_slave_event(struct slave *slave) |
1757 | { |
1758 | queue_delayed_work(wq: slave->bond->wq, dwork: &slave->notify_work, delay: 0); |
1759 | } |
1760 | |
1761 | void bond_lower_state_changed(struct slave *slave) |
1762 | { |
1763 | struct netdev_lag_lower_state_info info; |
1764 | |
1765 | info.link_up = slave->link == BOND_LINK_UP || |
1766 | slave->link == BOND_LINK_FAIL; |
1767 | info.tx_enabled = bond_is_active_slave(slave); |
1768 | netdev_lower_state_changed(lower_dev: slave->dev, lower_state_info: &info); |
1769 | } |
1770 | |
1771 | #define BOND_NL_ERR(bond_dev, extack, errmsg) do { \ |
1772 | if (extack) \ |
1773 | NL_SET_ERR_MSG(extack, errmsg); \ |
1774 | else \ |
1775 | netdev_err(bond_dev, "Error: %s\n", errmsg); \ |
1776 | } while (0) |
1777 | |
1778 | #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \ |
1779 | if (extack) \ |
1780 | NL_SET_ERR_MSG(extack, errmsg); \ |
1781 | else \ |
1782 | slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ |
1783 | } while (0) |
1784 | |
1785 | /* The bonding driver uses ether_setup() to convert a master bond device |
1786 | * to ARPHRD_ETHER, that resets the target netdevice's flags so we always |
1787 | * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP |
1788 | * if they were set |
1789 | */ |
1790 | static void bond_ether_setup(struct net_device *bond_dev) |
1791 | { |
1792 | unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP); |
1793 | |
1794 | ether_setup(dev: bond_dev); |
1795 | bond_dev->flags |= IFF_MASTER | flags; |
1796 | bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1797 | } |
1798 | |
1799 | void bond_xdp_set_features(struct net_device *bond_dev) |
1800 | { |
1801 | struct bonding *bond = netdev_priv(dev: bond_dev); |
1802 | xdp_features_t val = NETDEV_XDP_ACT_MASK; |
1803 | struct list_head *iter; |
1804 | struct slave *slave; |
1805 | |
1806 | ASSERT_RTNL(); |
1807 | |
1808 | if (!bond_xdp_check(bond)) { |
1809 | xdp_clear_features_flag(dev: bond_dev); |
1810 | return; |
1811 | } |
1812 | |
1813 | bond_for_each_slave(bond, slave, iter) |
1814 | val &= slave->dev->xdp_features; |
1815 | |
1816 | xdp_set_features_flag(dev: bond_dev, val); |
1817 | } |
1818 | |
1819 | /* enslave device <slave> to bond device <master> */ |
1820 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, |
1821 | struct netlink_ext_ack *extack) |
1822 | { |
1823 | struct bonding *bond = netdev_priv(dev: bond_dev); |
1824 | const struct net_device_ops *slave_ops = slave_dev->netdev_ops; |
1825 | struct slave *new_slave = NULL, *prev_slave; |
1826 | struct sockaddr_storage ss; |
1827 | int link_reporting; |
1828 | int res = 0, i; |
1829 | |
1830 | if (slave_dev->flags & IFF_MASTER && |
1831 | !netif_is_bond_master(dev: slave_dev)) { |
1832 | BOND_NL_ERR(bond_dev, extack, |
1833 | "Device type (master device) cannot be enslaved" ); |
1834 | return -EPERM; |
1835 | } |
1836 | |
1837 | if (!bond->params.use_carrier && |
1838 | slave_dev->ethtool_ops->get_link == NULL && |
1839 | slave_ops->ndo_eth_ioctl == NULL) { |
1840 | slave_warn(bond_dev, slave_dev, "no link monitoring support\n" ); |
1841 | } |
1842 | |
1843 | /* already in-use? */ |
1844 | if (netdev_is_rx_handler_busy(dev: slave_dev)) { |
1845 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1846 | "Device is in use and cannot be enslaved" ); |
1847 | return -EBUSY; |
1848 | } |
1849 | |
1850 | if (bond_dev == slave_dev) { |
1851 | BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself." ); |
1852 | return -EPERM; |
1853 | } |
1854 | |
1855 | /* vlan challenged mutual exclusion */ |
1856 | /* no need to lock since we're protected by rtnl_lock */ |
1857 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { |
1858 | slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n" ); |
1859 | if (vlan_uses_dev(dev: bond_dev)) { |
1860 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1861 | "Can not enslave VLAN challenged device to VLAN enabled bond" ); |
1862 | return -EPERM; |
1863 | } else { |
1864 | slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n" ); |
1865 | } |
1866 | } else { |
1867 | slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n" ); |
1868 | } |
1869 | |
1870 | if (slave_dev->features & NETIF_F_HW_ESP) |
1871 | slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n" ); |
1872 | |
1873 | /* Old ifenslave binaries are no longer supported. These can |
1874 | * be identified with moderate accuracy by the state of the slave: |
1875 | * the current ifenslave will set the interface down prior to |
1876 | * enslaving it; the old ifenslave will not. |
1877 | */ |
1878 | if (slave_dev->flags & IFF_UP) { |
1879 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1880 | "Device can not be enslaved while up" ); |
1881 | return -EPERM; |
1882 | } |
1883 | |
1884 | /* set bonding device ether type by slave - bonding netdevices are |
1885 | * created with ether_setup, so when the slave type is not ARPHRD_ETHER |
1886 | * there is a need to override some of the type dependent attribs/funcs. |
1887 | * |
1888 | * bond ether type mutual exclusion - don't allow slaves of dissimilar |
1889 | * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond |
1890 | */ |
1891 | if (!bond_has_slaves(bond)) { |
1892 | if (bond_dev->type != slave_dev->type) { |
1893 | slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n" , |
1894 | bond_dev->type, slave_dev->type); |
1895 | |
1896 | res = call_netdevice_notifiers(val: NETDEV_PRE_TYPE_CHANGE, |
1897 | dev: bond_dev); |
1898 | res = notifier_to_errno(ret: res); |
1899 | if (res) { |
1900 | slave_err(bond_dev, slave_dev, "refused to change device type\n" ); |
1901 | return -EBUSY; |
1902 | } |
1903 | |
1904 | /* Flush unicast and multicast addresses */ |
1905 | dev_uc_flush(dev: bond_dev); |
1906 | dev_mc_flush(dev: bond_dev); |
1907 | |
1908 | if (slave_dev->type != ARPHRD_ETHER) |
1909 | bond_setup_by_slave(bond_dev, slave_dev); |
1910 | else |
1911 | bond_ether_setup(bond_dev); |
1912 | |
1913 | call_netdevice_notifiers(val: NETDEV_POST_TYPE_CHANGE, |
1914 | dev: bond_dev); |
1915 | } |
1916 | } else if (bond_dev->type != slave_dev->type) { |
1917 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1918 | "Device type is different from other slaves" ); |
1919 | return -EINVAL; |
1920 | } |
1921 | |
1922 | if (slave_dev->type == ARPHRD_INFINIBAND && |
1923 | BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
1924 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1925 | "Only active-backup mode is supported for infiniband slaves" ); |
1926 | res = -EOPNOTSUPP; |
1927 | goto err_undo_flags; |
1928 | } |
1929 | |
1930 | if (!slave_ops->ndo_set_mac_address || |
1931 | slave_dev->type == ARPHRD_INFINIBAND) { |
1932 | slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n" ); |
1933 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && |
1934 | bond->params.fail_over_mac != BOND_FOM_ACTIVE) { |
1935 | if (!bond_has_slaves(bond)) { |
1936 | bond->params.fail_over_mac = BOND_FOM_ACTIVE; |
1937 | slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n" ); |
1938 | } else { |
1939 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
1940 | "Slave device does not support setting the MAC address, but fail_over_mac is not set to active" ); |
1941 | res = -EOPNOTSUPP; |
1942 | goto err_undo_flags; |
1943 | } |
1944 | } |
1945 | } |
1946 | |
1947 | call_netdevice_notifiers(val: NETDEV_JOIN, dev: slave_dev); |
1948 | |
1949 | /* If this is the first slave, then we need to set the master's hardware |
1950 | * address to be the same as the slave's. |
1951 | */ |
1952 | if (!bond_has_slaves(bond) && |
1953 | bond->dev->addr_assign_type == NET_ADDR_RANDOM) { |
1954 | res = bond_set_dev_addr(bond_dev: bond->dev, slave_dev); |
1955 | if (res) |
1956 | goto err_undo_flags; |
1957 | } |
1958 | |
1959 | new_slave = bond_alloc_slave(bond, slave_dev); |
1960 | if (!new_slave) { |
1961 | res = -ENOMEM; |
1962 | goto err_undo_flags; |
1963 | } |
1964 | |
1965 | /* Set the new_slave's queue_id to be zero. Queue ID mapping |
1966 | * is set via sysfs or module option if desired. |
1967 | */ |
1968 | new_slave->queue_id = 0; |
1969 | |
1970 | /* Save slave's original mtu and then set it to match the bond */ |
1971 | new_slave->original_mtu = slave_dev->mtu; |
1972 | res = dev_set_mtu(slave_dev, bond->dev->mtu); |
1973 | if (res) { |
1974 | slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n" , res); |
1975 | goto err_free; |
1976 | } |
1977 | |
1978 | /* Save slave's original ("permanent") mac address for modes |
1979 | * that need it, and for restoring it upon release, and then |
1980 | * set it to the master's address |
1981 | */ |
1982 | bond_hw_addr_copy(dst: new_slave->perm_hwaddr, src: slave_dev->dev_addr, |
1983 | len: slave_dev->addr_len); |
1984 | |
1985 | if (!bond->params.fail_over_mac || |
1986 | BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
1987 | /* Set slave to master's mac address. The application already |
1988 | * set the master's mac address to that of the first slave |
1989 | */ |
1990 | memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); |
1991 | ss.ss_family = slave_dev->type; |
1992 | res = dev_set_mac_address(dev: slave_dev, sa: (struct sockaddr *)&ss, |
1993 | extack); |
1994 | if (res) { |
1995 | slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n" , res); |
1996 | goto err_restore_mtu; |
1997 | } |
1998 | } |
1999 | |
2000 | /* set no_addrconf flag before open to prevent IPv6 addrconf */ |
2001 | slave_dev->priv_flags |= IFF_NO_ADDRCONF; |
2002 | |
2003 | /* open the slave since the application closed it */ |
2004 | res = dev_open(dev: slave_dev, extack); |
2005 | if (res) { |
2006 | slave_err(bond_dev, slave_dev, "Opening slave failed\n" ); |
2007 | goto err_restore_mac; |
2008 | } |
2009 | |
2010 | slave_dev->priv_flags |= IFF_BONDING; |
2011 | /* initialize slave stats */ |
2012 | dev_get_stats(dev: new_slave->dev, storage: &new_slave->slave_stats); |
2013 | |
2014 | if (bond_is_lb(bond)) { |
2015 | /* bond_alb_init_slave() must be called before all other stages since |
2016 | * it might fail and we do not want to have to undo everything |
2017 | */ |
2018 | res = bond_alb_init_slave(bond, slave: new_slave); |
2019 | if (res) |
2020 | goto err_close; |
2021 | } |
2022 | |
2023 | res = vlan_vids_add_by_dev(dev: slave_dev, by_dev: bond_dev); |
2024 | if (res) { |
2025 | slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n" ); |
2026 | goto err_close; |
2027 | } |
2028 | |
2029 | prev_slave = bond_last_slave(bond); |
2030 | |
2031 | new_slave->delay = 0; |
2032 | new_slave->link_failure_count = 0; |
2033 | |
2034 | if (bond_update_speed_duplex(slave: new_slave) && |
2035 | bond_needs_speed_duplex(bond)) |
2036 | new_slave->link = BOND_LINK_DOWN; |
2037 | |
2038 | new_slave->last_rx = jiffies - |
2039 | (msecs_to_jiffies(m: bond->params.arp_interval) + 1); |
2040 | for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) |
2041 | new_slave->target_last_arp_rx[i] = new_slave->last_rx; |
2042 | |
2043 | new_slave->last_tx = new_slave->last_rx; |
2044 | |
2045 | if (bond->params.miimon && !bond->params.use_carrier) { |
2046 | link_reporting = bond_check_dev_link(bond, slave_dev, reporting: 1); |
2047 | |
2048 | if ((link_reporting == -1) && !bond->params.arp_interval) { |
2049 | /* miimon is set but a bonded network driver |
2050 | * does not support ETHTOOL/MII and |
2051 | * arp_interval is not set. Note: if |
2052 | * use_carrier is enabled, we will never go |
2053 | * here (because netif_carrier is always |
2054 | * supported); thus, we don't need to change |
2055 | * the messages for netif_carrier. |
2056 | */ |
2057 | slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n" ); |
2058 | } else if (link_reporting == -1) { |
2059 | /* unable get link status using mii/ethtool */ |
2060 | slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n" ); |
2061 | } |
2062 | } |
2063 | |
2064 | /* check for initial state */ |
2065 | new_slave->link = BOND_LINK_NOCHANGE; |
2066 | if (bond->params.miimon) { |
2067 | if (bond_check_dev_link(bond, slave_dev, reporting: 0) == BMSR_LSTATUS) { |
2068 | if (bond->params.updelay) { |
2069 | bond_set_slave_link_state(slave: new_slave, |
2070 | BOND_LINK_BACK, |
2071 | BOND_SLAVE_NOTIFY_NOW); |
2072 | new_slave->delay = bond->params.updelay; |
2073 | } else { |
2074 | bond_set_slave_link_state(slave: new_slave, |
2075 | BOND_LINK_UP, |
2076 | BOND_SLAVE_NOTIFY_NOW); |
2077 | } |
2078 | } else { |
2079 | bond_set_slave_link_state(slave: new_slave, BOND_LINK_DOWN, |
2080 | BOND_SLAVE_NOTIFY_NOW); |
2081 | } |
2082 | } else if (bond->params.arp_interval) { |
2083 | bond_set_slave_link_state(slave: new_slave, |
2084 | state: (netif_carrier_ok(dev: slave_dev) ? |
2085 | BOND_LINK_UP : BOND_LINK_DOWN), |
2086 | BOND_SLAVE_NOTIFY_NOW); |
2087 | } else { |
2088 | bond_set_slave_link_state(slave: new_slave, BOND_LINK_UP, |
2089 | BOND_SLAVE_NOTIFY_NOW); |
2090 | } |
2091 | |
2092 | if (new_slave->link != BOND_LINK_DOWN) |
2093 | new_slave->last_link_up = jiffies; |
2094 | slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n" , |
2095 | new_slave->link == BOND_LINK_DOWN ? "DOWN" : |
2096 | (new_slave->link == BOND_LINK_UP ? "UP" : "BACK" )); |
2097 | |
2098 | if (bond_uses_primary(bond) && bond->params.primary[0]) { |
2099 | /* if there is a primary slave, remember it */ |
2100 | if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { |
2101 | rcu_assign_pointer(bond->primary_slave, new_slave); |
2102 | bond->force_primary = true; |
2103 | } |
2104 | } |
2105 | |
2106 | switch (BOND_MODE(bond)) { |
2107 | case BOND_MODE_ACTIVEBACKUP: |
2108 | bond_set_slave_inactive_flags(slave: new_slave, |
2109 | BOND_SLAVE_NOTIFY_NOW); |
2110 | break; |
2111 | case BOND_MODE_8023AD: |
2112 | /* in 802.3ad mode, the internal mechanism |
2113 | * will activate the slaves in the selected |
2114 | * aggregator |
2115 | */ |
2116 | bond_set_slave_inactive_flags(slave: new_slave, BOND_SLAVE_NOTIFY_NOW); |
2117 | /* if this is the first slave */ |
2118 | if (!prev_slave) { |
2119 | SLAVE_AD_INFO(new_slave)->id = 1; |
2120 | /* Initialize AD with the number of times that the AD timer is called in 1 second |
2121 | * can be called only after the mac address of the bond is set |
2122 | */ |
2123 | bond_3ad_initialize(bond); |
2124 | } else { |
2125 | SLAVE_AD_INFO(new_slave)->id = |
2126 | SLAVE_AD_INFO(prev_slave)->id + 1; |
2127 | } |
2128 | |
2129 | bond_3ad_bind_slave(slave: new_slave); |
2130 | break; |
2131 | case BOND_MODE_TLB: |
2132 | case BOND_MODE_ALB: |
2133 | bond_set_active_slave(slave: new_slave); |
2134 | bond_set_slave_inactive_flags(slave: new_slave, BOND_SLAVE_NOTIFY_NOW); |
2135 | break; |
2136 | default: |
2137 | slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n" ); |
2138 | |
2139 | /* always active in trunk mode */ |
2140 | bond_set_active_slave(slave: new_slave); |
2141 | |
2142 | /* In trunking mode there is little meaning to curr_active_slave |
2143 | * anyway (it holds no special properties of the bond device), |
2144 | * so we can change it without calling change_active_interface() |
2145 | */ |
2146 | if (!rcu_access_pointer(bond->curr_active_slave) && |
2147 | new_slave->link == BOND_LINK_UP) |
2148 | rcu_assign_pointer(bond->curr_active_slave, new_slave); |
2149 | |
2150 | break; |
2151 | } /* switch(bond_mode) */ |
2152 | |
2153 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2154 | if (bond->dev->npinfo) { |
2155 | if (slave_enable_netpoll(slave: new_slave)) { |
2156 | slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n" ); |
2157 | res = -EBUSY; |
2158 | goto err_detach; |
2159 | } |
2160 | } |
2161 | #endif |
2162 | |
2163 | if (!(bond_dev->features & NETIF_F_LRO)) |
2164 | dev_disable_lro(dev: slave_dev); |
2165 | |
2166 | res = netdev_rx_handler_register(dev: slave_dev, rx_handler: bond_handle_frame, |
2167 | rx_handler_data: new_slave); |
2168 | if (res) { |
2169 | slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n" , res); |
2170 | goto err_detach; |
2171 | } |
2172 | |
2173 | res = bond_master_upper_dev_link(bond, slave: new_slave, extack); |
2174 | if (res) { |
2175 | slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n" , res); |
2176 | goto err_unregister; |
2177 | } |
2178 | |
2179 | bond_lower_state_changed(slave: new_slave); |
2180 | |
2181 | res = bond_sysfs_slave_add(slave: new_slave); |
2182 | if (res) { |
2183 | slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n" , res); |
2184 | goto err_upper_unlink; |
2185 | } |
2186 | |
2187 | /* If the mode uses primary, then the following is handled by |
2188 | * bond_change_active_slave(). |
2189 | */ |
2190 | if (!bond_uses_primary(bond)) { |
2191 | /* set promiscuity level to new slave */ |
2192 | if (bond_dev->flags & IFF_PROMISC) { |
2193 | res = dev_set_promiscuity(dev: slave_dev, inc: 1); |
2194 | if (res) |
2195 | goto err_sysfs_del; |
2196 | } |
2197 | |
2198 | /* set allmulti level to new slave */ |
2199 | if (bond_dev->flags & IFF_ALLMULTI) { |
2200 | res = dev_set_allmulti(dev: slave_dev, inc: 1); |
2201 | if (res) { |
2202 | if (bond_dev->flags & IFF_PROMISC) |
2203 | dev_set_promiscuity(dev: slave_dev, inc: -1); |
2204 | goto err_sysfs_del; |
2205 | } |
2206 | } |
2207 | |
2208 | if (bond_dev->flags & IFF_UP) { |
2209 | netif_addr_lock_bh(dev: bond_dev); |
2210 | dev_mc_sync_multiple(to: slave_dev, from: bond_dev); |
2211 | dev_uc_sync_multiple(to: slave_dev, from: bond_dev); |
2212 | netif_addr_unlock_bh(dev: bond_dev); |
2213 | |
2214 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
2215 | dev_mc_add(dev: slave_dev, addr: lacpdu_mcast_addr); |
2216 | } |
2217 | } |
2218 | |
2219 | bond->slave_cnt++; |
2220 | bond_compute_features(bond); |
2221 | bond_set_carrier(bond); |
2222 | |
2223 | if (bond_uses_primary(bond)) { |
2224 | block_netpoll_tx(); |
2225 | bond_select_active_slave(bond); |
2226 | unblock_netpoll_tx(); |
2227 | } |
2228 | |
2229 | if (bond_mode_can_use_xmit_hash(bond)) |
2230 | bond_update_slave_arr(bond, NULL); |
2231 | |
2232 | |
2233 | if (!slave_dev->netdev_ops->ndo_bpf || |
2234 | !slave_dev->netdev_ops->ndo_xdp_xmit) { |
2235 | if (bond->xdp_prog) { |
2236 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
2237 | "Slave does not support XDP" ); |
2238 | res = -EOPNOTSUPP; |
2239 | goto err_sysfs_del; |
2240 | } |
2241 | } else if (bond->xdp_prog) { |
2242 | struct netdev_bpf xdp = { |
2243 | .command = XDP_SETUP_PROG, |
2244 | .flags = 0, |
2245 | .prog = bond->xdp_prog, |
2246 | .extack = extack, |
2247 | }; |
2248 | |
2249 | if (dev_xdp_prog_count(dev: slave_dev) > 0) { |
2250 | SLAVE_NL_ERR(bond_dev, slave_dev, extack, |
2251 | "Slave has XDP program loaded, please unload before enslaving" ); |
2252 | res = -EOPNOTSUPP; |
2253 | goto err_sysfs_del; |
2254 | } |
2255 | |
2256 | res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); |
2257 | if (res < 0) { |
2258 | /* ndo_bpf() sets extack error message */ |
2259 | slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n" , res); |
2260 | goto err_sysfs_del; |
2261 | } |
2262 | if (bond->xdp_prog) |
2263 | bpf_prog_inc(prog: bond->xdp_prog); |
2264 | } |
2265 | |
2266 | bond_xdp_set_features(bond_dev); |
2267 | |
2268 | slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n" , |
2269 | bond_is_active_slave(new_slave) ? "an active" : "a backup" , |
2270 | new_slave->link != BOND_LINK_DOWN ? "an up" : "a down" ); |
2271 | |
2272 | /* enslave is successful */ |
2273 | bond_queue_slave_event(slave: new_slave); |
2274 | return 0; |
2275 | |
2276 | /* Undo stages on error */ |
2277 | err_sysfs_del: |
2278 | bond_sysfs_slave_del(slave: new_slave); |
2279 | |
2280 | err_upper_unlink: |
2281 | bond_upper_dev_unlink(bond, slave: new_slave); |
2282 | |
2283 | err_unregister: |
2284 | netdev_rx_handler_unregister(dev: slave_dev); |
2285 | |
2286 | err_detach: |
2287 | vlan_vids_del_by_dev(dev: slave_dev, by_dev: bond_dev); |
2288 | if (rcu_access_pointer(bond->primary_slave) == new_slave) |
2289 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
2290 | if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { |
2291 | block_netpoll_tx(); |
2292 | bond_change_active_slave(bond, NULL); |
2293 | bond_select_active_slave(bond); |
2294 | unblock_netpoll_tx(); |
2295 | } |
2296 | /* either primary_slave or curr_active_slave might've changed */ |
2297 | synchronize_rcu(); |
2298 | slave_disable_netpoll(slave: new_slave); |
2299 | |
2300 | err_close: |
2301 | if (!netif_is_bond_master(dev: slave_dev)) |
2302 | slave_dev->priv_flags &= ~IFF_BONDING; |
2303 | dev_close(dev: slave_dev); |
2304 | |
2305 | err_restore_mac: |
2306 | slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; |
2307 | if (!bond->params.fail_over_mac || |
2308 | BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
2309 | /* XXX TODO - fom follow mode needs to change master's |
2310 | * MAC if this slave's MAC is in use by the bond, or at |
2311 | * least print a warning. |
2312 | */ |
2313 | bond_hw_addr_copy(dst: ss.__data, src: new_slave->perm_hwaddr, |
2314 | len: new_slave->dev->addr_len); |
2315 | ss.ss_family = slave_dev->type; |
2316 | dev_set_mac_address(dev: slave_dev, sa: (struct sockaddr *)&ss, NULL); |
2317 | } |
2318 | |
2319 | err_restore_mtu: |
2320 | dev_set_mtu(slave_dev, new_slave->original_mtu); |
2321 | |
2322 | err_free: |
2323 | kobject_put(kobj: &new_slave->kobj); |
2324 | |
2325 | err_undo_flags: |
2326 | /* Enslave of first slave has failed and we need to fix master's mac */ |
2327 | if (!bond_has_slaves(bond)) { |
2328 | if (ether_addr_equal_64bits(addr1: bond_dev->dev_addr, |
2329 | addr2: slave_dev->dev_addr)) |
2330 | eth_hw_addr_random(dev: bond_dev); |
2331 | if (bond_dev->type != ARPHRD_ETHER) { |
2332 | dev_close(dev: bond_dev); |
2333 | bond_ether_setup(bond_dev); |
2334 | } |
2335 | } |
2336 | |
2337 | return res; |
2338 | } |
2339 | |
2340 | /* Try to release the slave device <slave> from the bond device <master> |
2341 | * It is legal to access curr_active_slave without a lock because all the function |
2342 | * is RTNL-locked. If "all" is true it means that the function is being called |
2343 | * while destroying a bond interface and all slaves are being released. |
2344 | * |
2345 | * The rules for slave state should be: |
2346 | * for Active/Backup: |
2347 | * Active stays on all backups go down |
2348 | * for Bonded connections: |
2349 | * The first up interface should be left on and all others downed. |
2350 | */ |
2351 | static int __bond_release_one(struct net_device *bond_dev, |
2352 | struct net_device *slave_dev, |
2353 | bool all, bool unregister) |
2354 | { |
2355 | struct bonding *bond = netdev_priv(dev: bond_dev); |
2356 | struct slave *slave, *oldcurrent; |
2357 | struct sockaddr_storage ss; |
2358 | int old_flags = bond_dev->flags; |
2359 | netdev_features_t old_features = bond_dev->features; |
2360 | |
2361 | /* slave is not a slave or master is not master of this slave */ |
2362 | if (!(slave_dev->flags & IFF_SLAVE) || |
2363 | !netdev_has_upper_dev(dev: slave_dev, upper_dev: bond_dev)) { |
2364 | slave_dbg(bond_dev, slave_dev, "cannot release slave\n" ); |
2365 | return -EINVAL; |
2366 | } |
2367 | |
2368 | block_netpoll_tx(); |
2369 | |
2370 | slave = bond_get_slave_by_dev(bond, slave_dev); |
2371 | if (!slave) { |
2372 | /* not a slave of this bond */ |
2373 | slave_info(bond_dev, slave_dev, "interface not enslaved\n" ); |
2374 | unblock_netpoll_tx(); |
2375 | return -EINVAL; |
2376 | } |
2377 | |
2378 | bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); |
2379 | |
2380 | bond_sysfs_slave_del(slave); |
2381 | |
2382 | /* recompute stats just before removing the slave */ |
2383 | bond_get_stats(bond_dev: bond->dev, stats: &bond->bond_stats); |
2384 | |
2385 | if (bond->xdp_prog) { |
2386 | struct netdev_bpf xdp = { |
2387 | .command = XDP_SETUP_PROG, |
2388 | .flags = 0, |
2389 | .prog = NULL, |
2390 | .extack = NULL, |
2391 | }; |
2392 | if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) |
2393 | slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n" ); |
2394 | } |
2395 | |
2396 | /* unregister rx_handler early so bond_handle_frame wouldn't be called |
2397 | * for this slave anymore. |
2398 | */ |
2399 | netdev_rx_handler_unregister(dev: slave_dev); |
2400 | |
2401 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
2402 | bond_3ad_unbind_slave(slave); |
2403 | |
2404 | bond_upper_dev_unlink(bond, slave); |
2405 | |
2406 | if (bond_mode_can_use_xmit_hash(bond)) |
2407 | bond_update_slave_arr(bond, skipslave: slave); |
2408 | |
2409 | slave_info(bond_dev, slave_dev, "Releasing %s interface\n" , |
2410 | bond_is_active_slave(slave) ? "active" : "backup" ); |
2411 | |
2412 | oldcurrent = rcu_access_pointer(bond->curr_active_slave); |
2413 | |
2414 | RCU_INIT_POINTER(bond->current_arp_slave, NULL); |
2415 | |
2416 | if (!all && (!bond->params.fail_over_mac || |
2417 | BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { |
2418 | if (ether_addr_equal_64bits(addr1: bond_dev->dev_addr, addr2: slave->perm_hwaddr) && |
2419 | bond_has_slaves(bond)) |
2420 | slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n" , |
2421 | slave->perm_hwaddr); |
2422 | } |
2423 | |
2424 | if (rtnl_dereference(bond->primary_slave) == slave) |
2425 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
2426 | |
2427 | if (oldcurrent == slave) |
2428 | bond_change_active_slave(bond, NULL); |
2429 | |
2430 | if (bond_is_lb(bond)) { |
2431 | /* Must be called only after the slave has been |
2432 | * detached from the list and the curr_active_slave |
2433 | * has been cleared (if our_slave == old_current), |
2434 | * but before a new active slave is selected. |
2435 | */ |
2436 | bond_alb_deinit_slave(bond, slave); |
2437 | } |
2438 | |
2439 | if (all) { |
2440 | RCU_INIT_POINTER(bond->curr_active_slave, NULL); |
2441 | } else if (oldcurrent == slave) { |
2442 | /* Note that we hold RTNL over this sequence, so there |
2443 | * is no concern that another slave add/remove event |
2444 | * will interfere. |
2445 | */ |
2446 | bond_select_active_slave(bond); |
2447 | } |
2448 | |
2449 | bond_set_carrier(bond); |
2450 | if (!bond_has_slaves(bond)) |
2451 | eth_hw_addr_random(dev: bond_dev); |
2452 | |
2453 | unblock_netpoll_tx(); |
2454 | synchronize_rcu(); |
2455 | bond->slave_cnt--; |
2456 | |
2457 | if (!bond_has_slaves(bond)) { |
2458 | call_netdevice_notifiers(val: NETDEV_CHANGEADDR, dev: bond->dev); |
2459 | call_netdevice_notifiers(val: NETDEV_RELEASE, dev: bond->dev); |
2460 | } |
2461 | |
2462 | bond_compute_features(bond); |
2463 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && |
2464 | (old_features & NETIF_F_VLAN_CHALLENGED)) |
2465 | slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n" ); |
2466 | |
2467 | vlan_vids_del_by_dev(dev: slave_dev, by_dev: bond_dev); |
2468 | |
2469 | /* If the mode uses primary, then this case was handled above by |
2470 | * bond_change_active_slave(..., NULL) |
2471 | */ |
2472 | if (!bond_uses_primary(bond)) { |
2473 | /* unset promiscuity level from slave |
2474 | * NOTE: The NETDEV_CHANGEADDR call above may change the value |
2475 | * of the IFF_PROMISC flag in the bond_dev, but we need the |
2476 | * value of that flag before that change, as that was the value |
2477 | * when this slave was attached, so we cache at the start of the |
2478 | * function and use it here. Same goes for ALLMULTI below |
2479 | */ |
2480 | if (old_flags & IFF_PROMISC) |
2481 | dev_set_promiscuity(dev: slave_dev, inc: -1); |
2482 | |
2483 | /* unset allmulti level from slave */ |
2484 | if (old_flags & IFF_ALLMULTI) |
2485 | dev_set_allmulti(dev: slave_dev, inc: -1); |
2486 | |
2487 | if (old_flags & IFF_UP) |
2488 | bond_hw_addr_flush(bond_dev, slave_dev); |
2489 | } |
2490 | |
2491 | slave_disable_netpoll(slave); |
2492 | |
2493 | /* close slave before restoring its mac address */ |
2494 | dev_close(dev: slave_dev); |
2495 | |
2496 | slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; |
2497 | |
2498 | if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || |
2499 | BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
2500 | /* restore original ("permanent") mac address */ |
2501 | bond_hw_addr_copy(dst: ss.__data, src: slave->perm_hwaddr, |
2502 | len: slave->dev->addr_len); |
2503 | ss.ss_family = slave_dev->type; |
2504 | dev_set_mac_address(dev: slave_dev, sa: (struct sockaddr *)&ss, NULL); |
2505 | } |
2506 | |
2507 | if (unregister) |
2508 | __dev_set_mtu(slave_dev, slave->original_mtu); |
2509 | else |
2510 | dev_set_mtu(slave_dev, slave->original_mtu); |
2511 | |
2512 | if (!netif_is_bond_master(dev: slave_dev)) |
2513 | slave_dev->priv_flags &= ~IFF_BONDING; |
2514 | |
2515 | bond_xdp_set_features(bond_dev); |
2516 | kobject_put(kobj: &slave->kobj); |
2517 | |
2518 | return 0; |
2519 | } |
2520 | |
2521 | /* A wrapper used because of ndo_del_link */ |
2522 | int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) |
2523 | { |
2524 | return __bond_release_one(bond_dev, slave_dev, all: false, unregister: false); |
2525 | } |
2526 | |
2527 | /* First release a slave and then destroy the bond if no more slaves are left. |
2528 | * Must be under rtnl_lock when this function is called. |
2529 | */ |
2530 | static int bond_release_and_destroy(struct net_device *bond_dev, |
2531 | struct net_device *slave_dev) |
2532 | { |
2533 | struct bonding *bond = netdev_priv(dev: bond_dev); |
2534 | int ret; |
2535 | |
2536 | ret = __bond_release_one(bond_dev, slave_dev, all: false, unregister: true); |
2537 | if (ret == 0 && !bond_has_slaves(bond) && |
2538 | bond_dev->reg_state != NETREG_UNREGISTERING) { |
2539 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
2540 | netdev_info(dev: bond_dev, format: "Destroying bond\n" ); |
2541 | bond_remove_proc_entry(bond); |
2542 | unregister_netdevice(dev: bond_dev); |
2543 | } |
2544 | return ret; |
2545 | } |
2546 | |
2547 | static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) |
2548 | { |
2549 | struct bonding *bond = netdev_priv(dev: bond_dev); |
2550 | |
2551 | bond_fill_ifbond(bond, info); |
2552 | } |
2553 | |
2554 | static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) |
2555 | { |
2556 | struct bonding *bond = netdev_priv(dev: bond_dev); |
2557 | struct list_head *iter; |
2558 | int i = 0, res = -ENODEV; |
2559 | struct slave *slave; |
2560 | |
2561 | bond_for_each_slave(bond, slave, iter) { |
2562 | if (i++ == (int)info->slave_id) { |
2563 | res = 0; |
2564 | bond_fill_ifslave(slave, info); |
2565 | break; |
2566 | } |
2567 | } |
2568 | |
2569 | return res; |
2570 | } |
2571 | |
2572 | /*-------------------------------- Monitoring -------------------------------*/ |
2573 | |
2574 | /* called with rcu_read_lock() */ |
2575 | static int bond_miimon_inspect(struct bonding *bond) |
2576 | { |
2577 | bool ignore_updelay = false; |
2578 | int link_state, commit = 0; |
2579 | struct list_head *iter; |
2580 | struct slave *slave; |
2581 | |
2582 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { |
2583 | ignore_updelay = !rcu_dereference(bond->curr_active_slave); |
2584 | } else { |
2585 | struct bond_up_slave *usable_slaves; |
2586 | |
2587 | usable_slaves = rcu_dereference(bond->usable_slaves); |
2588 | |
2589 | if (usable_slaves && usable_slaves->count == 0) |
2590 | ignore_updelay = true; |
2591 | } |
2592 | |
2593 | bond_for_each_slave_rcu(bond, slave, iter) { |
2594 | bond_propose_link_state(slave, BOND_LINK_NOCHANGE); |
2595 | |
2596 | link_state = bond_check_dev_link(bond, slave_dev: slave->dev, reporting: 0); |
2597 | |
2598 | switch (slave->link) { |
2599 | case BOND_LINK_UP: |
2600 | if (link_state) |
2601 | continue; |
2602 | |
2603 | bond_propose_link_state(slave, BOND_LINK_FAIL); |
2604 | commit++; |
2605 | slave->delay = bond->params.downdelay; |
2606 | if (slave->delay) { |
2607 | slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n" , |
2608 | (BOND_MODE(bond) == |
2609 | BOND_MODE_ACTIVEBACKUP) ? |
2610 | (bond_is_active_slave(slave) ? |
2611 | "active " : "backup " ) : "" , |
2612 | bond->params.downdelay * bond->params.miimon); |
2613 | } |
2614 | fallthrough; |
2615 | case BOND_LINK_FAIL: |
2616 | if (link_state) { |
2617 | /* recovered before downdelay expired */ |
2618 | bond_propose_link_state(slave, BOND_LINK_UP); |
2619 | slave->last_link_up = jiffies; |
2620 | slave_info(bond->dev, slave->dev, "link status up again after %d ms\n" , |
2621 | (bond->params.downdelay - slave->delay) * |
2622 | bond->params.miimon); |
2623 | commit++; |
2624 | continue; |
2625 | } |
2626 | |
2627 | if (slave->delay <= 0) { |
2628 | bond_propose_link_state(slave, BOND_LINK_DOWN); |
2629 | commit++; |
2630 | continue; |
2631 | } |
2632 | |
2633 | slave->delay--; |
2634 | break; |
2635 | |
2636 | case BOND_LINK_DOWN: |
2637 | if (!link_state) |
2638 | continue; |
2639 | |
2640 | bond_propose_link_state(slave, BOND_LINK_BACK); |
2641 | commit++; |
2642 | slave->delay = bond->params.updelay; |
2643 | |
2644 | if (slave->delay) { |
2645 | slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n" , |
2646 | ignore_updelay ? 0 : |
2647 | bond->params.updelay * |
2648 | bond->params.miimon); |
2649 | } |
2650 | fallthrough; |
2651 | case BOND_LINK_BACK: |
2652 | if (!link_state) { |
2653 | bond_propose_link_state(slave, BOND_LINK_DOWN); |
2654 | slave_info(bond->dev, slave->dev, "link status down again after %d ms\n" , |
2655 | (bond->params.updelay - slave->delay) * |
2656 | bond->params.miimon); |
2657 | commit++; |
2658 | continue; |
2659 | } |
2660 | |
2661 | if (ignore_updelay) |
2662 | slave->delay = 0; |
2663 | |
2664 | if (slave->delay <= 0) { |
2665 | bond_propose_link_state(slave, BOND_LINK_UP); |
2666 | commit++; |
2667 | ignore_updelay = false; |
2668 | continue; |
2669 | } |
2670 | |
2671 | slave->delay--; |
2672 | break; |
2673 | } |
2674 | } |
2675 | |
2676 | return commit; |
2677 | } |
2678 | |
2679 | static void bond_miimon_link_change(struct bonding *bond, |
2680 | struct slave *slave, |
2681 | char link) |
2682 | { |
2683 | switch (BOND_MODE(bond)) { |
2684 | case BOND_MODE_8023AD: |
2685 | bond_3ad_handle_link_change(slave, link); |
2686 | break; |
2687 | case BOND_MODE_TLB: |
2688 | case BOND_MODE_ALB: |
2689 | bond_alb_handle_link_change(bond, slave, link); |
2690 | break; |
2691 | case BOND_MODE_XOR: |
2692 | bond_update_slave_arr(bond, NULL); |
2693 | break; |
2694 | } |
2695 | } |
2696 | |
2697 | static void bond_miimon_commit(struct bonding *bond) |
2698 | { |
2699 | struct slave *slave, *primary, *active; |
2700 | bool do_failover = false; |
2701 | struct list_head *iter; |
2702 | |
2703 | ASSERT_RTNL(); |
2704 | |
2705 | bond_for_each_slave(bond, slave, iter) { |
2706 | switch (slave->link_new_state) { |
2707 | case BOND_LINK_NOCHANGE: |
2708 | /* For 802.3ad mode, check current slave speed and |
2709 | * duplex again in case its port was disabled after |
2710 | * invalid speed/duplex reporting but recovered before |
2711 | * link monitoring could make a decision on the actual |
2712 | * link status |
2713 | */ |
2714 | if (BOND_MODE(bond) == BOND_MODE_8023AD && |
2715 | slave->link == BOND_LINK_UP) |
2716 | bond_3ad_adapter_speed_duplex_changed(slave); |
2717 | continue; |
2718 | |
2719 | case BOND_LINK_UP: |
2720 | if (bond_update_speed_duplex(slave) && |
2721 | bond_needs_speed_duplex(bond)) { |
2722 | slave->link = BOND_LINK_DOWN; |
2723 | if (net_ratelimit()) |
2724 | slave_warn(bond->dev, slave->dev, |
2725 | "failed to get link speed/duplex\n" ); |
2726 | continue; |
2727 | } |
2728 | bond_set_slave_link_state(slave, BOND_LINK_UP, |
2729 | BOND_SLAVE_NOTIFY_NOW); |
2730 | slave->last_link_up = jiffies; |
2731 | |
2732 | primary = rtnl_dereference(bond->primary_slave); |
2733 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
2734 | /* prevent it from being the active one */ |
2735 | bond_set_backup_slave(slave); |
2736 | } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
2737 | /* make it immediately active */ |
2738 | bond_set_active_slave(slave); |
2739 | } |
2740 | |
2741 | slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n" , |
2742 | slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, |
2743 | slave->duplex ? "full" : "half" ); |
2744 | |
2745 | bond_miimon_link_change(bond, slave, BOND_LINK_UP); |
2746 | |
2747 | active = rtnl_dereference(bond->curr_active_slave); |
2748 | if (!active || slave == primary || slave->prio > active->prio) |
2749 | do_failover = true; |
2750 | |
2751 | continue; |
2752 | |
2753 | case BOND_LINK_DOWN: |
2754 | if (slave->link_failure_count < UINT_MAX) |
2755 | slave->link_failure_count++; |
2756 | |
2757 | bond_set_slave_link_state(slave, BOND_LINK_DOWN, |
2758 | BOND_SLAVE_NOTIFY_NOW); |
2759 | |
2760 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || |
2761 | BOND_MODE(bond) == BOND_MODE_8023AD) |
2762 | bond_set_slave_inactive_flags(slave, |
2763 | BOND_SLAVE_NOTIFY_NOW); |
2764 | |
2765 | slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n" ); |
2766 | |
2767 | bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); |
2768 | |
2769 | if (slave == rcu_access_pointer(bond->curr_active_slave)) |
2770 | do_failover = true; |
2771 | |
2772 | continue; |
2773 | |
2774 | default: |
2775 | slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n" , |
2776 | slave->link_new_state); |
2777 | bond_propose_link_state(slave, BOND_LINK_NOCHANGE); |
2778 | |
2779 | continue; |
2780 | } |
2781 | } |
2782 | |
2783 | if (do_failover) { |
2784 | block_netpoll_tx(); |
2785 | bond_select_active_slave(bond); |
2786 | unblock_netpoll_tx(); |
2787 | } |
2788 | |
2789 | bond_set_carrier(bond); |
2790 | } |
2791 | |
2792 | /* bond_mii_monitor |
2793 | * |
2794 | * Really a wrapper that splits the mii monitor into two phases: an |
2795 | * inspection, then (if inspection indicates something needs to be done) |
2796 | * an acquisition of appropriate locks followed by a commit phase to |
2797 | * implement whatever link state changes are indicated. |
2798 | */ |
2799 | static void bond_mii_monitor(struct work_struct *work) |
2800 | { |
2801 | struct bonding *bond = container_of(work, struct bonding, |
2802 | mii_work.work); |
2803 | bool should_notify_peers = false; |
2804 | bool commit; |
2805 | unsigned long delay; |
2806 | struct slave *slave; |
2807 | struct list_head *iter; |
2808 | |
2809 | delay = msecs_to_jiffies(m: bond->params.miimon); |
2810 | |
2811 | if (!bond_has_slaves(bond)) |
2812 | goto re_arm; |
2813 | |
2814 | rcu_read_lock(); |
2815 | should_notify_peers = bond_should_notify_peers(bond); |
2816 | commit = !!bond_miimon_inspect(bond); |
2817 | if (bond->send_peer_notif) { |
2818 | rcu_read_unlock(); |
2819 | if (rtnl_trylock()) { |
2820 | bond->send_peer_notif--; |
2821 | rtnl_unlock(); |
2822 | } |
2823 | } else { |
2824 | rcu_read_unlock(); |
2825 | } |
2826 | |
2827 | if (commit) { |
2828 | /* Race avoidance with bond_close cancel of workqueue */ |
2829 | if (!rtnl_trylock()) { |
2830 | delay = 1; |
2831 | should_notify_peers = false; |
2832 | goto re_arm; |
2833 | } |
2834 | |
2835 | bond_for_each_slave(bond, slave, iter) { |
2836 | bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); |
2837 | } |
2838 | bond_miimon_commit(bond); |
2839 | |
2840 | rtnl_unlock(); /* might sleep, hold no other locks */ |
2841 | } |
2842 | |
2843 | re_arm: |
2844 | if (bond->params.miimon) |
2845 | queue_delayed_work(wq: bond->wq, dwork: &bond->mii_work, delay); |
2846 | |
2847 | if (should_notify_peers) { |
2848 | if (!rtnl_trylock()) |
2849 | return; |
2850 | call_netdevice_notifiers(val: NETDEV_NOTIFY_PEERS, dev: bond->dev); |
2851 | rtnl_unlock(); |
2852 | } |
2853 | } |
2854 | |
2855 | static int bond_upper_dev_walk(struct net_device *upper, |
2856 | struct netdev_nested_priv *priv) |
2857 | { |
2858 | __be32 ip = *(__be32 *)priv->data; |
2859 | |
2860 | return ip == bond_confirm_addr(dev: upper, dst: 0, local: ip); |
2861 | } |
2862 | |
2863 | static bool bond_has_this_ip(struct bonding *bond, __be32 ip) |
2864 | { |
2865 | struct netdev_nested_priv priv = { |
2866 | .data = (void *)&ip, |
2867 | }; |
2868 | bool ret = false; |
2869 | |
2870 | if (ip == bond_confirm_addr(dev: bond->dev, dst: 0, local: ip)) |
2871 | return true; |
2872 | |
2873 | rcu_read_lock(); |
2874 | if (netdev_walk_all_upper_dev_rcu(dev: bond->dev, fn: bond_upper_dev_walk, priv: &priv)) |
2875 | ret = true; |
2876 | rcu_read_unlock(); |
2877 | |
2878 | return ret; |
2879 | } |
2880 | |
2881 | #define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff) |
2882 | |
2883 | static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags, |
2884 | struct sk_buff *skb) |
2885 | { |
2886 | struct net_device *bond_dev = slave->bond->dev; |
2887 | struct net_device *slave_dev = slave->dev; |
2888 | struct bond_vlan_tag *outer_tag = tags; |
2889 | |
2890 | if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE) |
2891 | return true; |
2892 | |
2893 | tags++; |
2894 | |
2895 | /* Go through all the tags backwards and add them to the packet */ |
2896 | while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) { |
2897 | if (!tags->vlan_id) { |
2898 | tags++; |
2899 | continue; |
2900 | } |
2901 | |
2902 | slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n" , |
2903 | ntohs(outer_tag->vlan_proto), tags->vlan_id); |
2904 | skb = vlan_insert_tag_set_proto(skb, vlan_proto: tags->vlan_proto, |
2905 | vlan_tci: tags->vlan_id); |
2906 | if (!skb) { |
2907 | net_err_ratelimited("failed to insert inner VLAN tag\n" ); |
2908 | return false; |
2909 | } |
2910 | |
2911 | tags++; |
2912 | } |
2913 | /* Set the outer tag */ |
2914 | if (outer_tag->vlan_id) { |
2915 | slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n" , |
2916 | ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); |
2917 | __vlan_hwaccel_put_tag(skb, vlan_proto: outer_tag->vlan_proto, |
2918 | vlan_tci: outer_tag->vlan_id); |
2919 | } |
2920 | |
2921 | return true; |
2922 | } |
2923 | |
2924 | /* We go to the (large) trouble of VLAN tagging ARP frames because |
2925 | * switches in VLAN mode (especially if ports are configured as |
2926 | * "native" to a VLAN) might not pass non-tagged frames. |
2927 | */ |
2928 | static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, |
2929 | __be32 src_ip, struct bond_vlan_tag *tags) |
2930 | { |
2931 | struct net_device *bond_dev = slave->bond->dev; |
2932 | struct net_device *slave_dev = slave->dev; |
2933 | struct sk_buff *skb; |
2934 | |
2935 | slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n" , |
2936 | arp_op, &dest_ip, &src_ip); |
2937 | |
2938 | skb = arp_create(type: arp_op, ETH_P_ARP, dest_ip, dev: slave_dev, src_ip, |
2939 | NULL, src_hw: slave_dev->dev_addr, NULL); |
2940 | |
2941 | if (!skb) { |
2942 | net_err_ratelimited("ARP packet allocation failed\n" ); |
2943 | return; |
2944 | } |
2945 | |
2946 | if (bond_handle_vlan(slave, tags, skb)) { |
2947 | slave_update_last_tx(slave); |
2948 | arp_xmit(skb); |
2949 | } |
2950 | |
2951 | return; |
2952 | } |
2953 | |
2954 | /* Validate the device path between the @start_dev and the @end_dev. |
2955 | * The path is valid if the @end_dev is reachable through device |
2956 | * stacking. |
2957 | * When the path is validated, collect any vlan information in the |
2958 | * path. |
2959 | */ |
2960 | struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, |
2961 | struct net_device *end_dev, |
2962 | int level) |
2963 | { |
2964 | struct bond_vlan_tag *tags; |
2965 | struct net_device *upper; |
2966 | struct list_head *iter; |
2967 | |
2968 | if (start_dev == end_dev) { |
2969 | tags = kcalloc(n: level + 1, size: sizeof(*tags), GFP_ATOMIC); |
2970 | if (!tags) |
2971 | return ERR_PTR(error: -ENOMEM); |
2972 | tags[level].vlan_proto = BOND_VLAN_PROTO_NONE; |
2973 | return tags; |
2974 | } |
2975 | |
2976 | netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { |
2977 | tags = bond_verify_device_path(start_dev: upper, end_dev, level: level + 1); |
2978 | if (IS_ERR_OR_NULL(ptr: tags)) { |
2979 | if (IS_ERR(ptr: tags)) |
2980 | return tags; |
2981 | continue; |
2982 | } |
2983 | if (is_vlan_dev(dev: upper)) { |
2984 | tags[level].vlan_proto = vlan_dev_vlan_proto(dev: upper); |
2985 | tags[level].vlan_id = vlan_dev_vlan_id(dev: upper); |
2986 | } |
2987 | |
2988 | return tags; |
2989 | } |
2990 | |
2991 | return NULL; |
2992 | } |
2993 | |
2994 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) |
2995 | { |
2996 | struct rtable *rt; |
2997 | struct bond_vlan_tag *tags; |
2998 | __be32 *targets = bond->params.arp_targets, addr; |
2999 | int i; |
3000 | |
3001 | for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { |
3002 | slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n" , |
3003 | __func__, &targets[i]); |
3004 | tags = NULL; |
3005 | |
3006 | /* Find out through which dev should the packet go */ |
3007 | rt = ip_route_output(net: dev_net(dev: bond->dev), daddr: targets[i], saddr: 0, |
3008 | RTO_ONLINK, oif: 0); |
3009 | if (IS_ERR(ptr: rt)) { |
3010 | /* there's no route to target - try to send arp |
3011 | * probe to generate any traffic (arp_validate=0) |
3012 | */ |
3013 | if (bond->params.arp_validate) |
3014 | pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n" , |
3015 | bond->dev->name, |
3016 | &targets[i]); |
3017 | bond_arp_send(slave, ARPOP_REQUEST, dest_ip: targets[i], |
3018 | src_ip: 0, tags); |
3019 | continue; |
3020 | } |
3021 | |
3022 | /* bond device itself */ |
3023 | if (rt->dst.dev == bond->dev) |
3024 | goto found; |
3025 | |
3026 | rcu_read_lock(); |
3027 | tags = bond_verify_device_path(start_dev: bond->dev, end_dev: rt->dst.dev, level: 0); |
3028 | rcu_read_unlock(); |
3029 | |
3030 | if (!IS_ERR_OR_NULL(ptr: tags)) |
3031 | goto found; |
3032 | |
3033 | /* Not our device - skip */ |
3034 | slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n" , |
3035 | &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL" ); |
3036 | |
3037 | ip_rt_put(rt); |
3038 | continue; |
3039 | |
3040 | found: |
3041 | addr = bond_confirm_addr(dev: rt->dst.dev, dst: targets[i], local: 0); |
3042 | ip_rt_put(rt); |
3043 | bond_arp_send(slave, ARPOP_REQUEST, dest_ip: targets[i], src_ip: addr, tags); |
3044 | kfree(objp: tags); |
3045 | } |
3046 | } |
3047 | |
3048 | static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) |
3049 | { |
3050 | int i; |
3051 | |
3052 | if (!sip || !bond_has_this_ip(bond, ip: tip)) { |
3053 | slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n" , |
3054 | __func__, &sip, &tip); |
3055 | return; |
3056 | } |
3057 | |
3058 | i = bond_get_targets_ip(targets: bond->params.arp_targets, ip: sip); |
3059 | if (i == -1) { |
3060 | slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n" , |
3061 | __func__, &sip); |
3062 | return; |
3063 | } |
3064 | slave->last_rx = jiffies; |
3065 | slave->target_last_arp_rx[i] = jiffies; |
3066 | } |
3067 | |
3068 | static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, |
3069 | struct slave *slave) |
3070 | { |
3071 | struct arphdr *arp = (struct arphdr *)skb->data; |
3072 | struct slave *curr_active_slave, *curr_arp_slave; |
3073 | unsigned char *arp_ptr; |
3074 | __be32 sip, tip; |
3075 | unsigned int alen; |
3076 | |
3077 | alen = arp_hdr_len(dev: bond->dev); |
3078 | |
3079 | if (alen > skb_headlen(skb)) { |
3080 | arp = kmalloc(size: alen, GFP_ATOMIC); |
3081 | if (!arp) |
3082 | goto out_unlock; |
3083 | if (skb_copy_bits(skb, offset: 0, to: arp, len: alen) < 0) |
3084 | goto out_unlock; |
3085 | } |
3086 | |
3087 | if (arp->ar_hln != bond->dev->addr_len || |
3088 | skb->pkt_type == PACKET_OTHERHOST || |
3089 | skb->pkt_type == PACKET_LOOPBACK || |
3090 | arp->ar_hrd != htons(ARPHRD_ETHER) || |
3091 | arp->ar_pro != htons(ETH_P_IP) || |
3092 | arp->ar_pln != 4) |
3093 | goto out_unlock; |
3094 | |
3095 | arp_ptr = (unsigned char *)(arp + 1); |
3096 | arp_ptr += bond->dev->addr_len; |
3097 | memcpy(&sip, arp_ptr, 4); |
3098 | arp_ptr += 4 + bond->dev->addr_len; |
3099 | memcpy(&tip, arp_ptr, 4); |
3100 | |
3101 | slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n" , |
3102 | __func__, slave->dev->name, bond_slave_state(slave), |
3103 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), |
3104 | &sip, &tip); |
3105 | |
3106 | curr_active_slave = rcu_dereference(bond->curr_active_slave); |
3107 | curr_arp_slave = rcu_dereference(bond->current_arp_slave); |
3108 | |
3109 | /* We 'trust' the received ARP enough to validate it if: |
3110 | * |
3111 | * (a) the slave receiving the ARP is active (which includes the |
3112 | * current ARP slave, if any), or |
3113 | * |
3114 | * (b) the receiving slave isn't active, but there is a currently |
3115 | * active slave and it received valid arp reply(s) after it became |
3116 | * the currently active slave, or |
3117 | * |
3118 | * (c) there is an ARP slave that sent an ARP during the prior ARP |
3119 | * interval, and we receive an ARP reply on any slave. We accept |
3120 | * these because switch FDB update delays may deliver the ARP |
3121 | * reply to a slave other than the sender of the ARP request. |
3122 | * |
3123 | * Note: for (b), backup slaves are receiving the broadcast ARP |
3124 | * request, not a reply. This request passes from the sending |
3125 | * slave through the L2 switch(es) to the receiving slave. Since |
3126 | * this is checking the request, sip/tip are swapped for |
3127 | * validation. |
3128 | * |
3129 | * This is done to avoid endless looping when we can't reach the |
3130 | * arp_ip_target and fool ourselves with our own arp requests. |
3131 | */ |
3132 | if (bond_is_active_slave(slave)) |
3133 | bond_validate_arp(bond, slave, sip, tip); |
3134 | else if (curr_active_slave && |
3135 | time_after(slave_last_rx(bond, curr_active_slave), |
3136 | curr_active_slave->last_link_up)) |
3137 | bond_validate_arp(bond, slave, sip: tip, tip: sip); |
3138 | else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && |
3139 | bond_time_in_interval(bond, last_act: slave_last_tx(slave: curr_arp_slave), mod: 1)) |
3140 | bond_validate_arp(bond, slave, sip, tip); |
3141 | |
3142 | out_unlock: |
3143 | if (arp != (struct arphdr *)skb->data) |
3144 | kfree(objp: arp); |
3145 | return RX_HANDLER_ANOTHER; |
3146 | } |
3147 | |
3148 | #if IS_ENABLED(CONFIG_IPV6) |
3149 | static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, |
3150 | const struct in6_addr *saddr, struct bond_vlan_tag *tags) |
3151 | { |
3152 | struct net_device *bond_dev = slave->bond->dev; |
3153 | struct net_device *slave_dev = slave->dev; |
3154 | struct in6_addr mcaddr; |
3155 | struct sk_buff *skb; |
3156 | |
3157 | slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n" , |
3158 | daddr, saddr); |
3159 | |
3160 | skb = ndisc_ns_create(dev: slave_dev, solicit: daddr, saddr, nonce: 0); |
3161 | if (!skb) { |
3162 | net_err_ratelimited("NS packet allocation failed\n" ); |
3163 | return; |
3164 | } |
3165 | |
3166 | addrconf_addr_solict_mult(addr: daddr, solicited: &mcaddr); |
3167 | if (bond_handle_vlan(slave, tags, skb)) { |
3168 | slave_update_last_tx(slave); |
3169 | ndisc_send_skb(skb, daddr: &mcaddr, saddr); |
3170 | } |
3171 | } |
3172 | |
3173 | static void bond_ns_send_all(struct bonding *bond, struct slave *slave) |
3174 | { |
3175 | struct in6_addr *targets = bond->params.ns_targets; |
3176 | struct bond_vlan_tag *tags; |
3177 | struct dst_entry *dst; |
3178 | struct in6_addr saddr; |
3179 | struct flowi6 fl6; |
3180 | int i; |
3181 | |
3182 | for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(a: &targets[i]); i++) { |
3183 | slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n" , |
3184 | __func__, &targets[i]); |
3185 | tags = NULL; |
3186 | |
3187 | /* Find out through which dev should the packet go */ |
3188 | memset(&fl6, 0, sizeof(struct flowi6)); |
3189 | fl6.daddr = targets[i]; |
3190 | fl6.flowi6_oif = bond->dev->ifindex; |
3191 | |
3192 | dst = ip6_route_output(net: dev_net(dev: bond->dev), NULL, fl6: &fl6); |
3193 | if (dst->error) { |
3194 | dst_release(dst); |
3195 | /* there's no route to target - try to send arp |
3196 | * probe to generate any traffic (arp_validate=0) |
3197 | */ |
3198 | if (bond->params.arp_validate) |
3199 | pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n" , |
3200 | bond->dev->name, |
3201 | &targets[i]); |
3202 | bond_ns_send(slave, daddr: &targets[i], saddr: &in6addr_any, tags); |
3203 | continue; |
3204 | } |
3205 | |
3206 | /* bond device itself */ |
3207 | if (dst->dev == bond->dev) |
3208 | goto found; |
3209 | |
3210 | rcu_read_lock(); |
3211 | tags = bond_verify_device_path(start_dev: bond->dev, end_dev: dst->dev, level: 0); |
3212 | rcu_read_unlock(); |
3213 | |
3214 | if (!IS_ERR_OR_NULL(ptr: tags)) |
3215 | goto found; |
3216 | |
3217 | /* Not our device - skip */ |
3218 | slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n" , |
3219 | &targets[i], dst->dev ? dst->dev->name : "NULL" ); |
3220 | |
3221 | dst_release(dst); |
3222 | continue; |
3223 | |
3224 | found: |
3225 | if (!ipv6_dev_get_saddr(net: dev_net(dev: dst->dev), dev: dst->dev, daddr: &targets[i], srcprefs: 0, saddr: &saddr)) |
3226 | bond_ns_send(slave, daddr: &targets[i], saddr: &saddr, tags); |
3227 | else |
3228 | bond_ns_send(slave, daddr: &targets[i], saddr: &in6addr_any, tags); |
3229 | |
3230 | dst_release(dst); |
3231 | kfree(objp: tags); |
3232 | } |
3233 | } |
3234 | |
3235 | static int bond_confirm_addr6(struct net_device *dev, |
3236 | struct netdev_nested_priv *priv) |
3237 | { |
3238 | struct in6_addr *addr = (struct in6_addr *)priv->data; |
3239 | |
3240 | return ipv6_chk_addr(net: dev_net(dev), addr, dev, strict: 0); |
3241 | } |
3242 | |
3243 | static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) |
3244 | { |
3245 | struct netdev_nested_priv priv = { |
3246 | .data = addr, |
3247 | }; |
3248 | int ret = false; |
3249 | |
3250 | if (bond_confirm_addr6(dev: bond->dev, priv: &priv)) |
3251 | return true; |
3252 | |
3253 | rcu_read_lock(); |
3254 | if (netdev_walk_all_upper_dev_rcu(dev: bond->dev, fn: bond_confirm_addr6, priv: &priv)) |
3255 | ret = true; |
3256 | rcu_read_unlock(); |
3257 | |
3258 | return ret; |
3259 | } |
3260 | |
3261 | static void bond_validate_na(struct bonding *bond, struct slave *slave, |
3262 | struct in6_addr *saddr, struct in6_addr *daddr) |
3263 | { |
3264 | int i; |
3265 | |
3266 | /* Ignore NAs that: |
3267 | * 1. Source address is unspecified address. |
3268 | * 2. Dest address is neither all-nodes multicast address nor |
3269 | * exist on bond interface. |
3270 | */ |
3271 | if (ipv6_addr_any(a: saddr) || |
3272 | (!ipv6_addr_equal(a1: daddr, a2: &in6addr_linklocal_allnodes) && |
3273 | !bond_has_this_ip6(bond, addr: daddr))) { |
3274 | slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n" , |
3275 | __func__, saddr, daddr); |
3276 | return; |
3277 | } |
3278 | |
3279 | i = bond_get_targets_ip6(targets: bond->params.ns_targets, ip: saddr); |
3280 | if (i == -1) { |
3281 | slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n" , |
3282 | __func__, saddr); |
3283 | return; |
3284 | } |
3285 | slave->last_rx = jiffies; |
3286 | slave->target_last_arp_rx[i] = jiffies; |
3287 | } |
3288 | |
3289 | static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, |
3290 | struct slave *slave) |
3291 | { |
3292 | struct slave *curr_active_slave, *curr_arp_slave; |
3293 | struct in6_addr *saddr, *daddr; |
3294 | struct { |
3295 | struct ipv6hdr ip6; |
3296 | struct icmp6hdr icmp6; |
3297 | } *combined, _combined; |
3298 | |
3299 | if (skb->pkt_type == PACKET_OTHERHOST || |
3300 | skb->pkt_type == PACKET_LOOPBACK) |
3301 | goto out; |
3302 | |
3303 | combined = skb_header_pointer(skb, offset: 0, len: sizeof(_combined), buffer: &_combined); |
3304 | if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP || |
3305 | (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION && |
3306 | combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)) |
3307 | goto out; |
3308 | |
3309 | saddr = &combined->ip6.saddr; |
3310 | daddr = &combined->ip6.daddr; |
3311 | |
3312 | slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n" , |
3313 | __func__, slave->dev->name, bond_slave_state(slave), |
3314 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), |
3315 | saddr, daddr); |
3316 | |
3317 | curr_active_slave = rcu_dereference(bond->curr_active_slave); |
3318 | curr_arp_slave = rcu_dereference(bond->current_arp_slave); |
3319 | |
3320 | /* We 'trust' the received ARP enough to validate it if: |
3321 | * see bond_arp_rcv(). |
3322 | */ |
3323 | if (bond_is_active_slave(slave)) |
3324 | bond_validate_na(bond, slave, saddr, daddr); |
3325 | else if (curr_active_slave && |
3326 | time_after(slave_last_rx(bond, curr_active_slave), |
3327 | curr_active_slave->last_link_up)) |
3328 | bond_validate_na(bond, slave, saddr: daddr, daddr: saddr); |
3329 | else if (curr_arp_slave && |
3330 | bond_time_in_interval(bond, last_act: slave_last_tx(slave: curr_arp_slave), mod: 1)) |
3331 | bond_validate_na(bond, slave, saddr, daddr); |
3332 | |
3333 | out: |
3334 | return RX_HANDLER_ANOTHER; |
3335 | } |
3336 | #endif |
3337 | |
3338 | int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, |
3339 | struct slave *slave) |
3340 | { |
3341 | #if IS_ENABLED(CONFIG_IPV6) |
3342 | bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6); |
3343 | #endif |
3344 | bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); |
3345 | |
3346 | slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n" , |
3347 | __func__, skb->dev->name); |
3348 | |
3349 | /* Use arp validate logic for both ARP and NS */ |
3350 | if (!slave_do_arp_validate(bond, slave)) { |
3351 | if ((slave_do_arp_validate_only(bond) && is_arp) || |
3352 | #if IS_ENABLED(CONFIG_IPV6) |
3353 | (slave_do_arp_validate_only(bond) && is_ipv6) || |
3354 | #endif |
3355 | !slave_do_arp_validate_only(bond)) |
3356 | slave->last_rx = jiffies; |
3357 | return RX_HANDLER_ANOTHER; |
3358 | } else if (is_arp) { |
3359 | return bond_arp_rcv(skb, bond, slave); |
3360 | #if IS_ENABLED(CONFIG_IPV6) |
3361 | } else if (is_ipv6) { |
3362 | return bond_na_rcv(skb, bond, slave); |
3363 | #endif |
3364 | } else { |
3365 | return RX_HANDLER_ANOTHER; |
3366 | } |
3367 | } |
3368 | |
3369 | static void bond_send_validate(struct bonding *bond, struct slave *slave) |
3370 | { |
3371 | bond_arp_send_all(bond, slave); |
3372 | #if IS_ENABLED(CONFIG_IPV6) |
3373 | bond_ns_send_all(bond, slave); |
3374 | #endif |
3375 | } |
3376 | |
3377 | /* function to verify if we're in the arp_interval timeslice, returns true if |
3378 | * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + |
3379 | * arp_interval/2) . the arp_interval/2 is needed for really fast networks. |
3380 | */ |
3381 | static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, |
3382 | int mod) |
3383 | { |
3384 | int delta_in_ticks = msecs_to_jiffies(m: bond->params.arp_interval); |
3385 | |
3386 | return time_in_range(jiffies, |
3387 | last_act - delta_in_ticks, |
3388 | last_act + mod * delta_in_ticks + delta_in_ticks/2); |
3389 | } |
3390 | |
3391 | /* This function is called regularly to monitor each slave's link |
3392 | * ensuring that traffic is being sent and received when arp monitoring |
3393 | * is used in load-balancing mode. if the adapter has been dormant, then an |
3394 | * arp is transmitted to generate traffic. see activebackup_arp_monitor for |
3395 | * arp monitoring in active backup mode. |
3396 | */ |
3397 | static void bond_loadbalance_arp_mon(struct bonding *bond) |
3398 | { |
3399 | struct slave *slave, *oldcurrent; |
3400 | struct list_head *iter; |
3401 | int do_failover = 0, slave_state_changed = 0; |
3402 | |
3403 | if (!bond_has_slaves(bond)) |
3404 | goto re_arm; |
3405 | |
3406 | rcu_read_lock(); |
3407 | |
3408 | oldcurrent = rcu_dereference(bond->curr_active_slave); |
3409 | /* see if any of the previous devices are up now (i.e. they have |
3410 | * xmt and rcv traffic). the curr_active_slave does not come into |
3411 | * the picture unless it is null. also, slave->last_link_up is not |
3412 | * needed here because we send an arp on each slave and give a slave |
3413 | * as long as it needs to get the tx/rx within the delta. |
3414 | * TODO: what about up/down delay in arp mode? it wasn't here before |
3415 | * so it can wait |
3416 | */ |
3417 | bond_for_each_slave_rcu(bond, slave, iter) { |
3418 | unsigned long last_tx = slave_last_tx(slave); |
3419 | |
3420 | bond_propose_link_state(slave, BOND_LINK_NOCHANGE); |
3421 | |
3422 | if (slave->link != BOND_LINK_UP) { |
3423 | if (bond_time_in_interval(bond, last_act: last_tx, mod: 1) && |
3424 | bond_time_in_interval(bond, last_act: slave->last_rx, mod: 1)) { |
3425 | |
3426 | bond_propose_link_state(slave, BOND_LINK_UP); |
3427 | slave_state_changed = 1; |
3428 | |
3429 | /* primary_slave has no meaning in round-robin |
3430 | * mode. the window of a slave being up and |
3431 | * curr_active_slave being null after enslaving |
3432 | * is closed. |
3433 | */ |
3434 | if (!oldcurrent) { |
3435 | slave_info(bond->dev, slave->dev, "link status definitely up\n" ); |
3436 | do_failover = 1; |
3437 | } else { |
3438 | slave_info(bond->dev, slave->dev, "interface is now up\n" ); |
3439 | } |
3440 | } |
3441 | } else { |
3442 | /* slave->link == BOND_LINK_UP */ |
3443 | |
3444 | /* not all switches will respond to an arp request |
3445 | * when the source ip is 0, so don't take the link down |
3446 | * if we don't know our ip yet |
3447 | */ |
3448 | if (!bond_time_in_interval(bond, last_act: last_tx, mod: bond->params.missed_max) || |
3449 | !bond_time_in_interval(bond, last_act: slave->last_rx, mod: bond->params.missed_max)) { |
3450 | |
3451 | bond_propose_link_state(slave, BOND_LINK_DOWN); |
3452 | slave_state_changed = 1; |
3453 | |
3454 | if (slave->link_failure_count < UINT_MAX) |
3455 | slave->link_failure_count++; |
3456 | |
3457 | slave_info(bond->dev, slave->dev, "interface is now down\n" ); |
3458 | |
3459 | if (slave == oldcurrent) |
3460 | do_failover = 1; |
3461 | } |
3462 | } |
3463 | |
3464 | /* note: if switch is in round-robin mode, all links |
3465 | * must tx arp to ensure all links rx an arp - otherwise |
3466 | * links may oscillate or not come up at all; if switch is |
3467 | * in something like xor mode, there is nothing we can |
3468 | * do - all replies will be rx'ed on same link causing slaves |
3469 | * to be unstable during low/no traffic periods |
3470 | */ |
3471 | if (bond_slave_is_up(slave)) |
3472 | bond_send_validate(bond, slave); |
3473 | } |
3474 | |
3475 | rcu_read_unlock(); |
3476 | |
3477 | if (do_failover || slave_state_changed) { |
3478 | if (!rtnl_trylock()) |
3479 | goto re_arm; |
3480 | |
3481 | bond_for_each_slave(bond, slave, iter) { |
3482 | if (slave->link_new_state != BOND_LINK_NOCHANGE) |
3483 | slave->link = slave->link_new_state; |
3484 | } |
3485 | |
3486 | if (slave_state_changed) { |
3487 | bond_slave_state_change(bond); |
3488 | if (BOND_MODE(bond) == BOND_MODE_XOR) |
3489 | bond_update_slave_arr(bond, NULL); |
3490 | } |
3491 | if (do_failover) { |
3492 | block_netpoll_tx(); |
3493 | bond_select_active_slave(bond); |
3494 | unblock_netpoll_tx(); |
3495 | } |
3496 | rtnl_unlock(); |
3497 | } |
3498 | |
3499 | re_arm: |
3500 | if (bond->params.arp_interval) |
3501 | queue_delayed_work(wq: bond->wq, dwork: &bond->arp_work, |
3502 | delay: msecs_to_jiffies(m: bond->params.arp_interval)); |
3503 | } |
3504 | |
3505 | /* Called to inspect slaves for active-backup mode ARP monitor link state |
3506 | * changes. Sets proposed link state in slaves to specify what action |
3507 | * should take place for the slave. Returns 0 if no changes are found, >0 |
3508 | * if changes to link states must be committed. |
3509 | * |
3510 | * Called with rcu_read_lock held. |
3511 | */ |
3512 | static int bond_ab_arp_inspect(struct bonding *bond) |
3513 | { |
3514 | unsigned long last_tx, last_rx; |
3515 | struct list_head *iter; |
3516 | struct slave *slave; |
3517 | int commit = 0; |
3518 | |
3519 | bond_for_each_slave_rcu(bond, slave, iter) { |
3520 | bond_propose_link_state(slave, BOND_LINK_NOCHANGE); |
3521 | last_rx = slave_last_rx(bond, slave); |
3522 | |
3523 | if (slave->link != BOND_LINK_UP) { |
3524 | if (bond_time_in_interval(bond, last_act: last_rx, mod: 1)) { |
3525 | bond_propose_link_state(slave, BOND_LINK_UP); |
3526 | commit++; |
3527 | } else if (slave->link == BOND_LINK_BACK) { |
3528 | bond_propose_link_state(slave, BOND_LINK_FAIL); |
3529 | commit++; |
3530 | } |
3531 | continue; |
3532 | } |
3533 | |
3534 | /* Give slaves 2*delta after being enslaved or made |
3535 | * active. This avoids bouncing, as the last receive |
3536 | * times need a full ARP monitor cycle to be updated. |
3537 | */ |
3538 | if (bond_time_in_interval(bond, last_act: slave->last_link_up, mod: 2)) |
3539 | continue; |
3540 | |
3541 | /* Backup slave is down if: |
3542 | * - No current_arp_slave AND |
3543 | * - more than (missed_max+1)*delta since last receive AND |
3544 | * - the bond has an IP address |
3545 | * |
3546 | * Note: a non-null current_arp_slave indicates |
3547 | * the curr_active_slave went down and we are |
3548 | * searching for a new one; under this condition |
3549 | * we only take the curr_active_slave down - this |
3550 | * gives each slave a chance to tx/rx traffic |
3551 | * before being taken out |
3552 | */ |
3553 | if (!bond_is_active_slave(slave) && |
3554 | !rcu_access_pointer(bond->current_arp_slave) && |
3555 | !bond_time_in_interval(bond, last_act: last_rx, mod: bond->params.missed_max + 1)) { |
3556 | bond_propose_link_state(slave, BOND_LINK_DOWN); |
3557 | commit++; |
3558 | } |
3559 | |
3560 | /* Active slave is down if: |
3561 | * - more than missed_max*delta since transmitting OR |
3562 | * - (more than missed_max*delta since receive AND |
3563 | * the bond has an IP address) |
3564 | */ |
3565 | last_tx = slave_last_tx(slave); |
3566 | if (bond_is_active_slave(slave) && |
3567 | (!bond_time_in_interval(bond, last_act: last_tx, mod: bond->params.missed_max) || |
3568 | !bond_time_in_interval(bond, last_act: last_rx, mod: bond->params.missed_max))) { |
3569 | bond_propose_link_state(slave, BOND_LINK_DOWN); |
3570 | commit++; |
3571 | } |
3572 | } |
3573 | |
3574 | return commit; |
3575 | } |
3576 | |
3577 | /* Called to commit link state changes noted by inspection step of |
3578 | * active-backup mode ARP monitor. |
3579 | * |
3580 | * Called with RTNL hold. |
3581 | */ |
3582 | static void bond_ab_arp_commit(struct bonding *bond) |
3583 | { |
3584 | bool do_failover = false; |
3585 | struct list_head *iter; |
3586 | unsigned long last_tx; |
3587 | struct slave *slave; |
3588 | |
3589 | bond_for_each_slave(bond, slave, iter) { |
3590 | switch (slave->link_new_state) { |
3591 | case BOND_LINK_NOCHANGE: |
3592 | continue; |
3593 | |
3594 | case BOND_LINK_UP: |
3595 | last_tx = slave_last_tx(slave); |
3596 | if (rtnl_dereference(bond->curr_active_slave) != slave || |
3597 | (!rtnl_dereference(bond->curr_active_slave) && |
3598 | bond_time_in_interval(bond, last_act: last_tx, mod: 1))) { |
3599 | struct slave *current_arp_slave; |
3600 | |
3601 | current_arp_slave = rtnl_dereference(bond->current_arp_slave); |
3602 | bond_set_slave_link_state(slave, BOND_LINK_UP, |
3603 | BOND_SLAVE_NOTIFY_NOW); |
3604 | if (current_arp_slave) { |
3605 | bond_set_slave_inactive_flags( |
3606 | slave: current_arp_slave, |
3607 | BOND_SLAVE_NOTIFY_NOW); |
3608 | RCU_INIT_POINTER(bond->current_arp_slave, NULL); |
3609 | } |
3610 | |
3611 | slave_info(bond->dev, slave->dev, "link status definitely up\n" ); |
3612 | |
3613 | if (!rtnl_dereference(bond->curr_active_slave) || |
3614 | slave == rtnl_dereference(bond->primary_slave) || |
3615 | slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) |
3616 | do_failover = true; |
3617 | |
3618 | } |
3619 | |
3620 | continue; |
3621 | |
3622 | case BOND_LINK_DOWN: |
3623 | if (slave->link_failure_count < UINT_MAX) |
3624 | slave->link_failure_count++; |
3625 | |
3626 | bond_set_slave_link_state(slave, BOND_LINK_DOWN, |
3627 | BOND_SLAVE_NOTIFY_NOW); |
3628 | bond_set_slave_inactive_flags(slave, |
3629 | BOND_SLAVE_NOTIFY_NOW); |
3630 | |
3631 | slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n" ); |
3632 | |
3633 | if (slave == rtnl_dereference(bond->curr_active_slave)) { |
3634 | RCU_INIT_POINTER(bond->current_arp_slave, NULL); |
3635 | do_failover = true; |
3636 | } |
3637 | |
3638 | continue; |
3639 | |
3640 | case BOND_LINK_FAIL: |
3641 | bond_set_slave_link_state(slave, BOND_LINK_FAIL, |
3642 | BOND_SLAVE_NOTIFY_NOW); |
3643 | bond_set_slave_inactive_flags(slave, |
3644 | BOND_SLAVE_NOTIFY_NOW); |
3645 | |
3646 | /* A slave has just been enslaved and has become |
3647 | * the current active slave. |
3648 | */ |
3649 | if (rtnl_dereference(bond->curr_active_slave)) |
3650 | RCU_INIT_POINTER(bond->current_arp_slave, NULL); |
3651 | continue; |
3652 | |
3653 | default: |
3654 | slave_err(bond->dev, slave->dev, |
3655 | "impossible: link_new_state %d on slave\n" , |
3656 | slave->link_new_state); |
3657 | continue; |
3658 | } |
3659 | } |
3660 | |
3661 | if (do_failover) { |
3662 | block_netpoll_tx(); |
3663 | bond_select_active_slave(bond); |
3664 | unblock_netpoll_tx(); |
3665 | } |
3666 | |
3667 | bond_set_carrier(bond); |
3668 | } |
3669 | |
3670 | /* Send ARP probes for active-backup mode ARP monitor. |
3671 | * |
3672 | * Called with rcu_read_lock held. |
3673 | */ |
3674 | static bool bond_ab_arp_probe(struct bonding *bond) |
3675 | { |
3676 | struct slave *slave, *before = NULL, *new_slave = NULL, |
3677 | *curr_arp_slave = rcu_dereference(bond->current_arp_slave), |
3678 | *curr_active_slave = rcu_dereference(bond->curr_active_slave); |
3679 | struct list_head *iter; |
3680 | bool found = false; |
3681 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; |
3682 | |
3683 | if (curr_arp_slave && curr_active_slave) |
3684 | netdev_info(dev: bond->dev, format: "PROBE: c_arp %s && cas %s BAD\n" , |
3685 | curr_arp_slave->dev->name, |
3686 | curr_active_slave->dev->name); |
3687 | |
3688 | if (curr_active_slave) { |
3689 | bond_send_validate(bond, slave: curr_active_slave); |
3690 | return should_notify_rtnl; |
3691 | } |
3692 | |
3693 | /* if we don't have a curr_active_slave, search for the next available |
3694 | * backup slave from the current_arp_slave and make it the candidate |
3695 | * for becoming the curr_active_slave |
3696 | */ |
3697 | |
3698 | if (!curr_arp_slave) { |
3699 | curr_arp_slave = bond_first_slave_rcu(bond); |
3700 | if (!curr_arp_slave) |
3701 | return should_notify_rtnl; |
3702 | } |
3703 | |
3704 | bond_for_each_slave_rcu(bond, slave, iter) { |
3705 | if (!found && !before && bond_slave_is_up(slave)) |
3706 | before = slave; |
3707 | |
3708 | if (found && !new_slave && bond_slave_is_up(slave)) |
3709 | new_slave = slave; |
3710 | /* if the link state is up at this point, we |
3711 | * mark it down - this can happen if we have |
3712 | * simultaneous link failures and |
3713 | * reselect_active_interface doesn't make this |
3714 | * one the current slave so it is still marked |
3715 | * up when it is actually down |
3716 | */ |
3717 | if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { |
3718 | bond_set_slave_link_state(slave, BOND_LINK_DOWN, |
3719 | BOND_SLAVE_NOTIFY_LATER); |
3720 | if (slave->link_failure_count < UINT_MAX) |
3721 | slave->link_failure_count++; |
3722 | |
3723 | bond_set_slave_inactive_flags(slave, |
3724 | BOND_SLAVE_NOTIFY_LATER); |
3725 | |
3726 | slave_info(bond->dev, slave->dev, "backup interface is now down\n" ); |
3727 | } |
3728 | if (slave == curr_arp_slave) |
3729 | found = true; |
3730 | } |
3731 | |
3732 | if (!new_slave && before) |
3733 | new_slave = before; |
3734 | |
3735 | if (!new_slave) |
3736 | goto check_state; |
3737 | |
3738 | bond_set_slave_link_state(slave: new_slave, BOND_LINK_BACK, |
3739 | BOND_SLAVE_NOTIFY_LATER); |
3740 | bond_set_slave_active_flags(slave: new_slave, BOND_SLAVE_NOTIFY_LATER); |
3741 | bond_send_validate(bond, slave: new_slave); |
3742 | new_slave->last_link_up = jiffies; |
3743 | rcu_assign_pointer(bond->current_arp_slave, new_slave); |
3744 | |
3745 | check_state: |
3746 | bond_for_each_slave_rcu(bond, slave, iter) { |
3747 | if (slave->should_notify || slave->should_notify_link) { |
3748 | should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; |
3749 | break; |
3750 | } |
3751 | } |
3752 | return should_notify_rtnl; |
3753 | } |
3754 | |
3755 | static void bond_activebackup_arp_mon(struct bonding *bond) |
3756 | { |
3757 | bool should_notify_peers = false; |
3758 | bool should_notify_rtnl = false; |
3759 | int delta_in_ticks; |
3760 | |
3761 | delta_in_ticks = msecs_to_jiffies(m: bond->params.arp_interval); |
3762 | |
3763 | if (!bond_has_slaves(bond)) |
3764 | goto re_arm; |
3765 | |
3766 | rcu_read_lock(); |
3767 | |
3768 | should_notify_peers = bond_should_notify_peers(bond); |
3769 | |
3770 | if (bond_ab_arp_inspect(bond)) { |
3771 | rcu_read_unlock(); |
3772 | |
3773 | /* Race avoidance with bond_close flush of workqueue */ |
3774 | if (!rtnl_trylock()) { |
3775 | delta_in_ticks = 1; |
3776 | should_notify_peers = false; |
3777 | goto re_arm; |
3778 | } |
3779 | |
3780 | bond_ab_arp_commit(bond); |
3781 | |
3782 | rtnl_unlock(); |
3783 | rcu_read_lock(); |
3784 | } |
3785 | |
3786 | should_notify_rtnl = bond_ab_arp_probe(bond); |
3787 | rcu_read_unlock(); |
3788 | |
3789 | re_arm: |
3790 | if (bond->params.arp_interval) |
3791 | queue_delayed_work(wq: bond->wq, dwork: &bond->arp_work, delay: delta_in_ticks); |
3792 | |
3793 | if (should_notify_peers || should_notify_rtnl) { |
3794 | if (!rtnl_trylock()) |
3795 | return; |
3796 | |
3797 | if (should_notify_peers) { |
3798 | bond->send_peer_notif--; |
3799 | call_netdevice_notifiers(val: NETDEV_NOTIFY_PEERS, |
3800 | dev: bond->dev); |
3801 | } |
3802 | if (should_notify_rtnl) { |
3803 | bond_slave_state_notify(bond); |
3804 | bond_slave_link_notify(bond); |
3805 | } |
3806 | |
3807 | rtnl_unlock(); |
3808 | } |
3809 | } |
3810 | |
3811 | static void bond_arp_monitor(struct work_struct *work) |
3812 | { |
3813 | struct bonding *bond = container_of(work, struct bonding, |
3814 | arp_work.work); |
3815 | |
3816 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) |
3817 | bond_activebackup_arp_mon(bond); |
3818 | else |
3819 | bond_loadbalance_arp_mon(bond); |
3820 | } |
3821 | |
3822 | /*-------------------------- netdev event handling --------------------------*/ |
3823 | |
3824 | /* Change device name */ |
3825 | static int bond_event_changename(struct bonding *bond) |
3826 | { |
3827 | bond_remove_proc_entry(bond); |
3828 | bond_create_proc_entry(bond); |
3829 | |
3830 | bond_debug_reregister(bond); |
3831 | |
3832 | return NOTIFY_DONE; |
3833 | } |
3834 | |
3835 | static int bond_master_netdev_event(unsigned long event, |
3836 | struct net_device *bond_dev) |
3837 | { |
3838 | struct bonding *event_bond = netdev_priv(dev: bond_dev); |
3839 | |
3840 | netdev_dbg(bond_dev, "%s called\n" , __func__); |
3841 | |
3842 | switch (event) { |
3843 | case NETDEV_CHANGENAME: |
3844 | return bond_event_changename(bond: event_bond); |
3845 | case NETDEV_UNREGISTER: |
3846 | bond_remove_proc_entry(bond: event_bond); |
3847 | #ifdef CONFIG_XFRM_OFFLOAD |
3848 | xfrm_dev_state_flush(net: dev_net(dev: bond_dev), dev: bond_dev, task_valid: true); |
3849 | #endif /* CONFIG_XFRM_OFFLOAD */ |
3850 | break; |
3851 | case NETDEV_REGISTER: |
3852 | bond_create_proc_entry(bond: event_bond); |
3853 | break; |
3854 | default: |
3855 | break; |
3856 | } |
3857 | |
3858 | return NOTIFY_DONE; |
3859 | } |
3860 | |
3861 | static int bond_slave_netdev_event(unsigned long event, |
3862 | struct net_device *slave_dev) |
3863 | { |
3864 | struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; |
3865 | struct bonding *bond; |
3866 | struct net_device *bond_dev; |
3867 | |
3868 | /* A netdev event can be generated while enslaving a device |
3869 | * before netdev_rx_handler_register is called in which case |
3870 | * slave will be NULL |
3871 | */ |
3872 | if (!slave) { |
3873 | netdev_dbg(slave_dev, "%s called on NULL slave\n" , __func__); |
3874 | return NOTIFY_DONE; |
3875 | } |
3876 | |
3877 | bond_dev = slave->bond->dev; |
3878 | bond = slave->bond; |
3879 | primary = rtnl_dereference(bond->primary_slave); |
3880 | |
3881 | slave_dbg(bond_dev, slave_dev, "%s called\n" , __func__); |
3882 | |
3883 | switch (event) { |
3884 | case NETDEV_UNREGISTER: |
3885 | if (bond_dev->type != ARPHRD_ETHER) |
3886 | bond_release_and_destroy(bond_dev, slave_dev); |
3887 | else |
3888 | __bond_release_one(bond_dev, slave_dev, all: false, unregister: true); |
3889 | break; |
3890 | case NETDEV_UP: |
3891 | case NETDEV_CHANGE: |
3892 | /* For 802.3ad mode only: |
3893 | * Getting invalid Speed/Duplex values here will put slave |
3894 | * in weird state. Mark it as link-fail if the link was |
3895 | * previously up or link-down if it hasn't yet come up, and |
3896 | * let link-monitoring (miimon) set it right when correct |
3897 | * speeds/duplex are available. |
3898 | */ |
3899 | if (bond_update_speed_duplex(slave) && |
3900 | BOND_MODE(bond) == BOND_MODE_8023AD) { |
3901 | if (slave->last_link_up) |
3902 | slave->link = BOND_LINK_FAIL; |
3903 | else |
3904 | slave->link = BOND_LINK_DOWN; |
3905 | } |
3906 | |
3907 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
3908 | bond_3ad_adapter_speed_duplex_changed(slave); |
3909 | fallthrough; |
3910 | case NETDEV_DOWN: |
3911 | /* Refresh slave-array if applicable! |
3912 | * If the setup does not use miimon or arpmon (mode-specific!), |
3913 | * then these events will not cause the slave-array to be |
3914 | * refreshed. This will cause xmit to use a slave that is not |
3915 | * usable. Avoid such situation by refeshing the array at these |
3916 | * events. If these (miimon/arpmon) parameters are configured |
3917 | * then array gets refreshed twice and that should be fine! |
3918 | */ |
3919 | if (bond_mode_can_use_xmit_hash(bond)) |
3920 | bond_update_slave_arr(bond, NULL); |
3921 | break; |
3922 | case NETDEV_CHANGEMTU: |
3923 | /* TODO: Should slaves be allowed to |
3924 | * independently alter their MTU? For |
3925 | * an active-backup bond, slaves need |
3926 | * not be the same type of device, so |
3927 | * MTUs may vary. For other modes, |
3928 | * slaves arguably should have the |
3929 | * same MTUs. To do this, we'd need to |
3930 | * take over the slave's change_mtu |
3931 | * function for the duration of their |
3932 | * servitude. |
3933 | */ |
3934 | break; |
3935 | case NETDEV_CHANGENAME: |
3936 | /* we don't care if we don't have primary set */ |
3937 | if (!bond_uses_primary(bond) || |
3938 | !bond->params.primary[0]) |
3939 | break; |
3940 | |
3941 | if (slave == primary) { |
3942 | /* slave's name changed - he's no longer primary */ |
3943 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
3944 | } else if (!strcmp(slave_dev->name, bond->params.primary)) { |
3945 | /* we have a new primary slave */ |
3946 | rcu_assign_pointer(bond->primary_slave, slave); |
3947 | } else { /* we didn't change primary - exit */ |
3948 | break; |
3949 | } |
3950 | |
3951 | netdev_info(dev: bond->dev, format: "Primary slave changed to %s, reselecting active slave\n" , |
3952 | primary ? slave_dev->name : "none" ); |
3953 | |
3954 | block_netpoll_tx(); |
3955 | bond_select_active_slave(bond); |
3956 | unblock_netpoll_tx(); |
3957 | break; |
3958 | case NETDEV_FEAT_CHANGE: |
3959 | if (!bond->notifier_ctx) { |
3960 | bond->notifier_ctx = true; |
3961 | bond_compute_features(bond); |
3962 | bond->notifier_ctx = false; |
3963 | } |
3964 | break; |
3965 | case NETDEV_RESEND_IGMP: |
3966 | /* Propagate to master device */ |
3967 | call_netdevice_notifiers(val: event, dev: slave->bond->dev); |
3968 | break; |
3969 | case NETDEV_XDP_FEAT_CHANGE: |
3970 | bond_xdp_set_features(bond_dev); |
3971 | break; |
3972 | default: |
3973 | break; |
3974 | } |
3975 | |
3976 | return NOTIFY_DONE; |
3977 | } |
3978 | |
3979 | /* bond_netdev_event: handle netdev notifier chain events. |
3980 | * |
3981 | * This function receives events for the netdev chain. The caller (an |
3982 | * ioctl handler calling blocking_notifier_call_chain) holds the necessary |
3983 | * locks for us to safely manipulate the slave devices (RTNL lock, |
3984 | * dev_probe_lock). |
3985 | */ |
3986 | static int bond_netdev_event(struct notifier_block *this, |
3987 | unsigned long event, void *ptr) |
3988 | { |
3989 | struct net_device *event_dev = netdev_notifier_info_to_dev(info: ptr); |
3990 | |
3991 | netdev_dbg(event_dev, "%s received %s\n" , |
3992 | __func__, netdev_cmd_to_name(event)); |
3993 | |
3994 | if (!(event_dev->priv_flags & IFF_BONDING)) |
3995 | return NOTIFY_DONE; |
3996 | |
3997 | if (event_dev->flags & IFF_MASTER) { |
3998 | int ret; |
3999 | |
4000 | ret = bond_master_netdev_event(event, bond_dev: event_dev); |
4001 | if (ret != NOTIFY_DONE) |
4002 | return ret; |
4003 | } |
4004 | |
4005 | if (event_dev->flags & IFF_SLAVE) |
4006 | return bond_slave_netdev_event(event, slave_dev: event_dev); |
4007 | |
4008 | return NOTIFY_DONE; |
4009 | } |
4010 | |
4011 | static struct notifier_block bond_netdev_notifier = { |
4012 | .notifier_call = bond_netdev_event, |
4013 | }; |
4014 | |
4015 | /*---------------------------- Hashing Policies -----------------------------*/ |
4016 | |
4017 | /* Helper to access data in a packet, with or without a backing skb. |
4018 | * If skb is given the data is linearized if necessary via pskb_may_pull. |
4019 | */ |
4020 | static inline const void *bond_pull_data(struct sk_buff *skb, |
4021 | const void *data, int hlen, int n) |
4022 | { |
4023 | if (likely(n <= hlen)) |
4024 | return data; |
4025 | else if (skb && likely(pskb_may_pull(skb, n))) |
4026 | return skb->data; |
4027 | |
4028 | return NULL; |
4029 | } |
4030 | |
4031 | /* L2 hash helper */ |
4032 | static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) |
4033 | { |
4034 | struct ethhdr *ep; |
4035 | |
4036 | data = bond_pull_data(skb, data, hlen, n: mhoff + sizeof(struct ethhdr)); |
4037 | if (!data) |
4038 | return 0; |
4039 | |
4040 | ep = (struct ethhdr *)(data + mhoff); |
4041 | return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); |
4042 | } |
4043 | |
4044 | static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, |
4045 | int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) |
4046 | { |
4047 | const struct ipv6hdr *iph6; |
4048 | const struct iphdr *iph; |
4049 | |
4050 | if (l2_proto == htons(ETH_P_IP)) { |
4051 | data = bond_pull_data(skb, data, hlen, n: *nhoff + sizeof(*iph)); |
4052 | if (!data) |
4053 | return false; |
4054 | |
4055 | iph = (const struct iphdr *)(data + *nhoff); |
4056 | iph_to_flow_copy_v4addrs(flow: fk, iph); |
4057 | *nhoff += iph->ihl << 2; |
4058 | if (!ip_is_fragment(iph)) |
4059 | *ip_proto = iph->protocol; |
4060 | } else if (l2_proto == htons(ETH_P_IPV6)) { |
4061 | data = bond_pull_data(skb, data, hlen, n: *nhoff + sizeof(*iph6)); |
4062 | if (!data) |
4063 | return false; |
4064 | |
4065 | iph6 = (const struct ipv6hdr *)(data + *nhoff); |
4066 | iph_to_flow_copy_v6addrs(flow: fk, iph: iph6); |
4067 | *nhoff += sizeof(*iph6); |
4068 | *ip_proto = iph6->nexthdr; |
4069 | } else { |
4070 | return false; |
4071 | } |
4072 | |
4073 | if (l34 && *ip_proto >= 0) |
4074 | fk->ports.ports = __skb_flow_get_ports(skb, thoff: *nhoff, ip_proto: *ip_proto, data, hlen_proto: hlen); |
4075 | |
4076 | return true; |
4077 | } |
4078 | |
4079 | static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) |
4080 | { |
4081 | u32 srcmac_vendor = 0, srcmac_dev = 0; |
4082 | struct ethhdr *mac_hdr; |
4083 | u16 vlan = 0; |
4084 | int i; |
4085 | |
4086 | data = bond_pull_data(skb, data, hlen, n: mhoff + sizeof(struct ethhdr)); |
4087 | if (!data) |
4088 | return 0; |
4089 | mac_hdr = (struct ethhdr *)(data + mhoff); |
4090 | |
4091 | for (i = 0; i < 3; i++) |
4092 | srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; |
4093 | |
4094 | for (i = 3; i < ETH_ALEN; i++) |
4095 | srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i]; |
4096 | |
4097 | if (skb && skb_vlan_tag_present(skb)) |
4098 | vlan = skb_vlan_tag_get(skb); |
4099 | |
4100 | return vlan ^ srcmac_vendor ^ srcmac_dev; |
4101 | } |
4102 | |
4103 | /* Extract the appropriate headers based on bond's xmit policy */ |
4104 | static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, |
4105 | __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk) |
4106 | { |
4107 | bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; |
4108 | int ip_proto = -1; |
4109 | |
4110 | switch (bond->params.xmit_policy) { |
4111 | case BOND_XMIT_POLICY_ENCAP23: |
4112 | case BOND_XMIT_POLICY_ENCAP34: |
4113 | memset(fk, 0, sizeof(*fk)); |
4114 | return __skb_flow_dissect(NULL, skb, flow_dissector: &flow_keys_bonding, |
4115 | target_container: fk, data, proto: l2_proto, nhoff, hlen, flags: 0); |
4116 | default: |
4117 | break; |
4118 | } |
4119 | |
4120 | fk->ports.ports = 0; |
4121 | memset(&fk->icmp, 0, sizeof(fk->icmp)); |
4122 | if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, nhoff: &nhoff, ip_proto: &ip_proto, l34)) |
4123 | return false; |
4124 | |
4125 | /* ICMP error packets contains at least 8 bytes of the header |
4126 | * of the packet which generated the error. Use this information |
4127 | * to correlate ICMP error packets within the same flow which |
4128 | * generated the error. |
4129 | */ |
4130 | if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) { |
4131 | skb_flow_get_icmp_tci(skb, key_icmp: &fk->icmp, data, thoff: nhoff, hlen); |
4132 | if (ip_proto == IPPROTO_ICMP) { |
4133 | if (!icmp_is_err(type: fk->icmp.type)) |
4134 | return true; |
4135 | |
4136 | nhoff += sizeof(struct icmphdr); |
4137 | } else if (ip_proto == IPPROTO_ICMPV6) { |
4138 | if (!icmpv6_is_err(type: fk->icmp.type)) |
4139 | return true; |
4140 | |
4141 | nhoff += sizeof(struct icmp6hdr); |
4142 | } |
4143 | return bond_flow_ip(skb, fk, data, hlen, l2_proto, nhoff: &nhoff, ip_proto: &ip_proto, l34); |
4144 | } |
4145 | |
4146 | return true; |
4147 | } |
4148 | |
4149 | static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy) |
4150 | { |
4151 | hash ^= (__force u32)flow_get_u32_dst(flow) ^ |
4152 | (__force u32)flow_get_u32_src(flow); |
4153 | hash ^= (hash >> 16); |
4154 | hash ^= (hash >> 8); |
4155 | |
4156 | /* discard lowest hash bit to deal with the common even ports pattern */ |
4157 | if (xmit_policy == BOND_XMIT_POLICY_LAYER34 || |
4158 | xmit_policy == BOND_XMIT_POLICY_ENCAP34) |
4159 | return hash >> 1; |
4160 | |
4161 | return hash; |
4162 | } |
4163 | |
4164 | /* Generate hash based on xmit policy. If @skb is given it is used to linearize |
4165 | * the data as required, but this function can be used without it if the data is |
4166 | * known to be linear (e.g. with xdp_buff). |
4167 | */ |
4168 | static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, |
4169 | __be16 l2_proto, int mhoff, int nhoff, int hlen) |
4170 | { |
4171 | struct flow_keys flow; |
4172 | u32 hash; |
4173 | |
4174 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) |
4175 | return bond_vlan_srcmac_hash(skb, data, mhoff, hlen); |
4176 | |
4177 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || |
4178 | !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, fk: &flow)) |
4179 | return bond_eth_hash(skb, data, mhoff, hlen); |
4180 | |
4181 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || |
4182 | bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { |
4183 | hash = bond_eth_hash(skb, data, mhoff, hlen); |
4184 | } else { |
4185 | if (flow.icmp.id) |
4186 | memcpy(&hash, &flow.icmp, sizeof(hash)); |
4187 | else |
4188 | memcpy(&hash, &flow.ports.ports, sizeof(hash)); |
4189 | } |
4190 | |
4191 | return bond_ip_hash(hash, flow: &flow, xmit_policy: bond->params.xmit_policy); |
4192 | } |
4193 | |
4194 | /** |
4195 | * bond_xmit_hash - generate a hash value based on the xmit policy |
4196 | * @bond: bonding device |
4197 | * @skb: buffer to use for headers |
4198 | * |
4199 | * This function will extract the necessary headers from the skb buffer and use |
4200 | * them to generate a hash based on the xmit_policy set in the bonding device |
4201 | */ |
4202 | u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) |
4203 | { |
4204 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && |
4205 | skb->l4_hash) |
4206 | return skb->hash; |
4207 | |
4208 | return __bond_xmit_hash(bond, skb, data: skb->data, l2_proto: skb->protocol, |
4209 | mhoff: 0, nhoff: skb_network_offset(skb), |
4210 | hlen: skb_headlen(skb)); |
4211 | } |
4212 | |
4213 | /** |
4214 | * bond_xmit_hash_xdp - generate a hash value based on the xmit policy |
4215 | * @bond: bonding device |
4216 | * @xdp: buffer to use for headers |
4217 | * |
4218 | * The XDP variant of bond_xmit_hash. |
4219 | */ |
4220 | static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) |
4221 | { |
4222 | struct ethhdr *eth; |
4223 | |
4224 | if (xdp->data + sizeof(struct ethhdr) > xdp->data_end) |
4225 | return 0; |
4226 | |
4227 | eth = (struct ethhdr *)xdp->data; |
4228 | |
4229 | return __bond_xmit_hash(bond, NULL, data: xdp->data, l2_proto: eth->h_proto, mhoff: 0, |
4230 | nhoff: sizeof(struct ethhdr), hlen: xdp->data_end - xdp->data); |
4231 | } |
4232 | |
4233 | /*-------------------------- Device entry points ----------------------------*/ |
4234 | |
4235 | void bond_work_init_all(struct bonding *bond) |
4236 | { |
4237 | INIT_DELAYED_WORK(&bond->mcast_work, |
4238 | bond_resend_igmp_join_requests_delayed); |
4239 | INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); |
4240 | INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); |
4241 | INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); |
4242 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); |
4243 | INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); |
4244 | } |
4245 | |
4246 | static void bond_work_cancel_all(struct bonding *bond) |
4247 | { |
4248 | cancel_delayed_work_sync(dwork: &bond->mii_work); |
4249 | cancel_delayed_work_sync(dwork: &bond->arp_work); |
4250 | cancel_delayed_work_sync(dwork: &bond->alb_work); |
4251 | cancel_delayed_work_sync(dwork: &bond->ad_work); |
4252 | cancel_delayed_work_sync(dwork: &bond->mcast_work); |
4253 | cancel_delayed_work_sync(dwork: &bond->slave_arr_work); |
4254 | } |
4255 | |
4256 | static int bond_open(struct net_device *bond_dev) |
4257 | { |
4258 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4259 | struct list_head *iter; |
4260 | struct slave *slave; |
4261 | |
4262 | if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { |
4263 | bond->rr_tx_counter = alloc_percpu(u32); |
4264 | if (!bond->rr_tx_counter) |
4265 | return -ENOMEM; |
4266 | } |
4267 | |
4268 | /* reset slave->backup and slave->inactive */ |
4269 | if (bond_has_slaves(bond)) { |
4270 | bond_for_each_slave(bond, slave, iter) { |
4271 | if (bond_uses_primary(bond) && |
4272 | slave != rcu_access_pointer(bond->curr_active_slave)) { |
4273 | bond_set_slave_inactive_flags(slave, |
4274 | BOND_SLAVE_NOTIFY_NOW); |
4275 | } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { |
4276 | bond_set_slave_active_flags(slave, |
4277 | BOND_SLAVE_NOTIFY_NOW); |
4278 | } |
4279 | } |
4280 | } |
4281 | |
4282 | if (bond_is_lb(bond)) { |
4283 | /* bond_alb_initialize must be called before the timer |
4284 | * is started. |
4285 | */ |
4286 | if (bond_alb_initialize(bond, rlb_enabled: (BOND_MODE(bond) == BOND_MODE_ALB))) |
4287 | return -ENOMEM; |
4288 | if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) |
4289 | queue_delayed_work(wq: bond->wq, dwork: &bond->alb_work, delay: 0); |
4290 | } |
4291 | |
4292 | if (bond->params.miimon) /* link check interval, in milliseconds. */ |
4293 | queue_delayed_work(wq: bond->wq, dwork: &bond->mii_work, delay: 0); |
4294 | |
4295 | if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ |
4296 | queue_delayed_work(wq: bond->wq, dwork: &bond->arp_work, delay: 0); |
4297 | bond->recv_probe = bond_rcv_validate; |
4298 | } |
4299 | |
4300 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
4301 | queue_delayed_work(wq: bond->wq, dwork: &bond->ad_work, delay: 0); |
4302 | /* register to receive LACPDUs */ |
4303 | bond->recv_probe = bond_3ad_lacpdu_recv; |
4304 | bond_3ad_initiate_agg_selection(bond, timeout: 1); |
4305 | |
4306 | bond_for_each_slave(bond, slave, iter) |
4307 | dev_mc_add(dev: slave->dev, addr: lacpdu_mcast_addr); |
4308 | } |
4309 | |
4310 | if (bond_mode_can_use_xmit_hash(bond)) |
4311 | bond_update_slave_arr(bond, NULL); |
4312 | |
4313 | return 0; |
4314 | } |
4315 | |
4316 | static int bond_close(struct net_device *bond_dev) |
4317 | { |
4318 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4319 | struct slave *slave; |
4320 | |
4321 | bond_work_cancel_all(bond); |
4322 | bond->send_peer_notif = 0; |
4323 | if (bond_is_lb(bond)) |
4324 | bond_alb_deinitialize(bond); |
4325 | bond->recv_probe = NULL; |
4326 | |
4327 | if (bond_uses_primary(bond)) { |
4328 | rcu_read_lock(); |
4329 | slave = rcu_dereference(bond->curr_active_slave); |
4330 | if (slave) |
4331 | bond_hw_addr_flush(bond_dev, slave_dev: slave->dev); |
4332 | rcu_read_unlock(); |
4333 | } else { |
4334 | struct list_head *iter; |
4335 | |
4336 | bond_for_each_slave(bond, slave, iter) |
4337 | bond_hw_addr_flush(bond_dev, slave_dev: slave->dev); |
4338 | } |
4339 | |
4340 | return 0; |
4341 | } |
4342 | |
4343 | /* fold stats, assuming all rtnl_link_stats64 fields are u64, but |
4344 | * that some drivers can provide 32bit values only. |
4345 | */ |
4346 | static void bond_fold_stats(struct rtnl_link_stats64 *_res, |
4347 | const struct rtnl_link_stats64 *_new, |
4348 | const struct rtnl_link_stats64 *_old) |
4349 | { |
4350 | const u64 *new = (const u64 *)_new; |
4351 | const u64 *old = (const u64 *)_old; |
4352 | u64 *res = (u64 *)_res; |
4353 | int i; |
4354 | |
4355 | for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { |
4356 | u64 nv = new[i]; |
4357 | u64 ov = old[i]; |
4358 | s64 delta = nv - ov; |
4359 | |
4360 | /* detects if this particular field is 32bit only */ |
4361 | if (((nv | ov) >> 32) == 0) |
4362 | delta = (s64)(s32)((u32)nv - (u32)ov); |
4363 | |
4364 | /* filter anomalies, some drivers reset their stats |
4365 | * at down/up events. |
4366 | */ |
4367 | if (delta > 0) |
4368 | res[i] += delta; |
4369 | } |
4370 | } |
4371 | |
4372 | #ifdef CONFIG_LOCKDEP |
4373 | static int bond_get_lowest_level_rcu(struct net_device *dev) |
4374 | { |
4375 | struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; |
4376 | struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; |
4377 | int cur = 0, max = 0; |
4378 | |
4379 | now = dev; |
4380 | iter = &dev->adj_list.lower; |
4381 | |
4382 | while (1) { |
4383 | next = NULL; |
4384 | while (1) { |
4385 | ldev = netdev_next_lower_dev_rcu(dev: now, iter: &iter); |
4386 | if (!ldev) |
4387 | break; |
4388 | |
4389 | next = ldev; |
4390 | niter = &ldev->adj_list.lower; |
4391 | dev_stack[cur] = now; |
4392 | iter_stack[cur++] = iter; |
4393 | if (max <= cur) |
4394 | max = cur; |
4395 | break; |
4396 | } |
4397 | |
4398 | if (!next) { |
4399 | if (!cur) |
4400 | return max; |
4401 | next = dev_stack[--cur]; |
4402 | niter = iter_stack[cur]; |
4403 | } |
4404 | |
4405 | now = next; |
4406 | iter = niter; |
4407 | } |
4408 | |
4409 | return max; |
4410 | } |
4411 | #endif |
4412 | |
4413 | static void bond_get_stats(struct net_device *bond_dev, |
4414 | struct rtnl_link_stats64 *stats) |
4415 | { |
4416 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4417 | struct rtnl_link_stats64 temp; |
4418 | struct list_head *iter; |
4419 | struct slave *slave; |
4420 | int nest_level = 0; |
4421 | |
4422 | |
4423 | rcu_read_lock(); |
4424 | #ifdef CONFIG_LOCKDEP |
4425 | nest_level = bond_get_lowest_level_rcu(dev: bond_dev); |
4426 | #endif |
4427 | |
4428 | spin_lock_nested(&bond->stats_lock, nest_level); |
4429 | memcpy(stats, &bond->bond_stats, sizeof(*stats)); |
4430 | |
4431 | bond_for_each_slave_rcu(bond, slave, iter) { |
4432 | const struct rtnl_link_stats64 *new = |
4433 | dev_get_stats(dev: slave->dev, storage: &temp); |
4434 | |
4435 | bond_fold_stats(res: stats, new: new, old: &slave->slave_stats); |
4436 | |
4437 | /* save off the slave stats for the next run */ |
4438 | memcpy(&slave->slave_stats, new, sizeof(*new)); |
4439 | } |
4440 | |
4441 | memcpy(&bond->bond_stats, stats, sizeof(*stats)); |
4442 | spin_unlock(lock: &bond->stats_lock); |
4443 | rcu_read_unlock(); |
4444 | } |
4445 | |
4446 | static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) |
4447 | { |
4448 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4449 | struct mii_ioctl_data *mii = NULL; |
4450 | |
4451 | netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n" , cmd); |
4452 | |
4453 | switch (cmd) { |
4454 | case SIOCGMIIPHY: |
4455 | mii = if_mii(rq: ifr); |
4456 | if (!mii) |
4457 | return -EINVAL; |
4458 | |
4459 | mii->phy_id = 0; |
4460 | fallthrough; |
4461 | case SIOCGMIIREG: |
4462 | /* We do this again just in case we were called by SIOCGMIIREG |
4463 | * instead of SIOCGMIIPHY. |
4464 | */ |
4465 | mii = if_mii(rq: ifr); |
4466 | if (!mii) |
4467 | return -EINVAL; |
4468 | |
4469 | if (mii->reg_num == 1) { |
4470 | mii->val_out = 0; |
4471 | if (netif_carrier_ok(dev: bond->dev)) |
4472 | mii->val_out = BMSR_LSTATUS; |
4473 | } |
4474 | |
4475 | break; |
4476 | default: |
4477 | return -EOPNOTSUPP; |
4478 | } |
4479 | |
4480 | return 0; |
4481 | } |
4482 | |
4483 | static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) |
4484 | { |
4485 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4486 | struct net_device *slave_dev = NULL; |
4487 | struct ifbond k_binfo; |
4488 | struct ifbond __user *u_binfo = NULL; |
4489 | struct ifslave k_sinfo; |
4490 | struct ifslave __user *u_sinfo = NULL; |
4491 | struct bond_opt_value newval; |
4492 | struct net *net; |
4493 | int res = 0; |
4494 | |
4495 | netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n" , cmd); |
4496 | |
4497 | switch (cmd) { |
4498 | case SIOCBONDINFOQUERY: |
4499 | u_binfo = (struct ifbond __user *)ifr->ifr_data; |
4500 | |
4501 | if (copy_from_user(to: &k_binfo, from: u_binfo, n: sizeof(ifbond))) |
4502 | return -EFAULT; |
4503 | |
4504 | bond_info_query(bond_dev, info: &k_binfo); |
4505 | if (copy_to_user(to: u_binfo, from: &k_binfo, n: sizeof(ifbond))) |
4506 | return -EFAULT; |
4507 | |
4508 | return 0; |
4509 | case SIOCBONDSLAVEINFOQUERY: |
4510 | u_sinfo = (struct ifslave __user *)ifr->ifr_data; |
4511 | |
4512 | if (copy_from_user(to: &k_sinfo, from: u_sinfo, n: sizeof(ifslave))) |
4513 | return -EFAULT; |
4514 | |
4515 | res = bond_slave_info_query(bond_dev, info: &k_sinfo); |
4516 | if (res == 0 && |
4517 | copy_to_user(to: u_sinfo, from: &k_sinfo, n: sizeof(ifslave))) |
4518 | return -EFAULT; |
4519 | |
4520 | return res; |
4521 | default: |
4522 | break; |
4523 | } |
4524 | |
4525 | net = dev_net(dev: bond_dev); |
4526 | |
4527 | if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN)) |
4528 | return -EPERM; |
4529 | |
4530 | slave_dev = __dev_get_by_name(net, name: ifr->ifr_slave); |
4531 | |
4532 | slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n" , slave_dev); |
4533 | |
4534 | if (!slave_dev) |
4535 | return -ENODEV; |
4536 | |
4537 | switch (cmd) { |
4538 | case SIOCBONDENSLAVE: |
4539 | res = bond_enslave(bond_dev, slave_dev, NULL); |
4540 | break; |
4541 | case SIOCBONDRELEASE: |
4542 | res = bond_release(bond_dev, slave_dev); |
4543 | break; |
4544 | case SIOCBONDSETHWADDR: |
4545 | res = bond_set_dev_addr(bond_dev, slave_dev); |
4546 | break; |
4547 | case SIOCBONDCHANGEACTIVE: |
4548 | bond_opt_initstr(&newval, slave_dev->name); |
4549 | res = __bond_opt_set_notify(bond, option: BOND_OPT_ACTIVE_SLAVE, |
4550 | val: &newval); |
4551 | break; |
4552 | default: |
4553 | res = -EOPNOTSUPP; |
4554 | } |
4555 | |
4556 | return res; |
4557 | } |
4558 | |
4559 | static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr, |
4560 | void __user *data, int cmd) |
4561 | { |
4562 | struct ifreq ifrdata = { .ifr_data = data }; |
4563 | |
4564 | switch (cmd) { |
4565 | case BOND_INFO_QUERY_OLD: |
4566 | return bond_do_ioctl(bond_dev, ifr: &ifrdata, SIOCBONDINFOQUERY); |
4567 | case BOND_SLAVE_INFO_QUERY_OLD: |
4568 | return bond_do_ioctl(bond_dev, ifr: &ifrdata, SIOCBONDSLAVEINFOQUERY); |
4569 | case BOND_ENSLAVE_OLD: |
4570 | return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE); |
4571 | case BOND_RELEASE_OLD: |
4572 | return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE); |
4573 | case BOND_SETHWADDR_OLD: |
4574 | return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR); |
4575 | case BOND_CHANGE_ACTIVE_OLD: |
4576 | return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE); |
4577 | } |
4578 | |
4579 | return -EOPNOTSUPP; |
4580 | } |
4581 | |
4582 | static void bond_change_rx_flags(struct net_device *bond_dev, int change) |
4583 | { |
4584 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4585 | |
4586 | if (change & IFF_PROMISC) |
4587 | bond_set_promiscuity(bond, |
4588 | inc: bond_dev->flags & IFF_PROMISC ? 1 : -1); |
4589 | |
4590 | if (change & IFF_ALLMULTI) |
4591 | bond_set_allmulti(bond, |
4592 | inc: bond_dev->flags & IFF_ALLMULTI ? 1 : -1); |
4593 | } |
4594 | |
4595 | static void bond_set_rx_mode(struct net_device *bond_dev) |
4596 | { |
4597 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4598 | struct list_head *iter; |
4599 | struct slave *slave; |
4600 | |
4601 | rcu_read_lock(); |
4602 | if (bond_uses_primary(bond)) { |
4603 | slave = rcu_dereference(bond->curr_active_slave); |
4604 | if (slave) { |
4605 | dev_uc_sync(to: slave->dev, from: bond_dev); |
4606 | dev_mc_sync(to: slave->dev, from: bond_dev); |
4607 | } |
4608 | } else { |
4609 | bond_for_each_slave_rcu(bond, slave, iter) { |
4610 | dev_uc_sync_multiple(to: slave->dev, from: bond_dev); |
4611 | dev_mc_sync_multiple(to: slave->dev, from: bond_dev); |
4612 | } |
4613 | } |
4614 | rcu_read_unlock(); |
4615 | } |
4616 | |
4617 | static int bond_neigh_init(struct neighbour *n) |
4618 | { |
4619 | struct bonding *bond = netdev_priv(dev: n->dev); |
4620 | const struct net_device_ops *slave_ops; |
4621 | struct neigh_parms parms; |
4622 | struct slave *slave; |
4623 | int ret = 0; |
4624 | |
4625 | rcu_read_lock(); |
4626 | slave = bond_first_slave_rcu(bond); |
4627 | if (!slave) |
4628 | goto out; |
4629 | slave_ops = slave->dev->netdev_ops; |
4630 | if (!slave_ops->ndo_neigh_setup) |
4631 | goto out; |
4632 | |
4633 | /* TODO: find another way [1] to implement this. |
4634 | * Passing a zeroed structure is fragile, |
4635 | * but at least we do not pass garbage. |
4636 | * |
4637 | * [1] One way would be that ndo_neigh_setup() never touch |
4638 | * struct neigh_parms, but propagate the new neigh_setup() |
4639 | * back to ___neigh_create() / neigh_parms_alloc() |
4640 | */ |
4641 | memset(&parms, 0, sizeof(parms)); |
4642 | ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); |
4643 | |
4644 | if (ret) |
4645 | goto out; |
4646 | |
4647 | if (parms.neigh_setup) |
4648 | ret = parms.neigh_setup(n); |
4649 | out: |
4650 | rcu_read_unlock(); |
4651 | return ret; |
4652 | } |
4653 | |
4654 | /* The bonding ndo_neigh_setup is called at init time beofre any |
4655 | * slave exists. So we must declare proxy setup function which will |
4656 | * be used at run time to resolve the actual slave neigh param setup. |
4657 | * |
4658 | * It's also called by master devices (such as vlans) to setup their |
4659 | * underlying devices. In that case - do nothing, we're already set up from |
4660 | * our init. |
4661 | */ |
4662 | static int bond_neigh_setup(struct net_device *dev, |
4663 | struct neigh_parms *parms) |
4664 | { |
4665 | /* modify only our neigh_parms */ |
4666 | if (parms->dev == dev) |
4667 | parms->neigh_setup = bond_neigh_init; |
4668 | |
4669 | return 0; |
4670 | } |
4671 | |
4672 | /* Change the MTU of all of a master's slaves to match the master */ |
4673 | static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) |
4674 | { |
4675 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4676 | struct slave *slave, *rollback_slave; |
4677 | struct list_head *iter; |
4678 | int res = 0; |
4679 | |
4680 | netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n" , bond, new_mtu); |
4681 | |
4682 | bond_for_each_slave(bond, slave, iter) { |
4683 | slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n" , |
4684 | slave, slave->dev->netdev_ops->ndo_change_mtu); |
4685 | |
4686 | res = dev_set_mtu(slave->dev, new_mtu); |
4687 | |
4688 | if (res) { |
4689 | /* If we failed to set the slave's mtu to the new value |
4690 | * we must abort the operation even in ACTIVE_BACKUP |
4691 | * mode, because if we allow the backup slaves to have |
4692 | * different mtu values than the active slave we'll |
4693 | * need to change their mtu when doing a failover. That |
4694 | * means changing their mtu from timer context, which |
4695 | * is probably not a good idea. |
4696 | */ |
4697 | slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n" , |
4698 | res, new_mtu); |
4699 | goto unwind; |
4700 | } |
4701 | } |
4702 | |
4703 | bond_dev->mtu = new_mtu; |
4704 | |
4705 | return 0; |
4706 | |
4707 | unwind: |
4708 | /* unwind from head to the slave that failed */ |
4709 | bond_for_each_slave(bond, rollback_slave, iter) { |
4710 | int tmp_res; |
4711 | |
4712 | if (rollback_slave == slave) |
4713 | break; |
4714 | |
4715 | tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu); |
4716 | if (tmp_res) |
4717 | slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n" , |
4718 | tmp_res); |
4719 | } |
4720 | |
4721 | return res; |
4722 | } |
4723 | |
4724 | /* Change HW address |
4725 | * |
4726 | * Note that many devices must be down to change the HW address, and |
4727 | * downing the master releases all slaves. We can make bonds full of |
4728 | * bonding devices to test this, however. |
4729 | */ |
4730 | static int bond_set_mac_address(struct net_device *bond_dev, void *addr) |
4731 | { |
4732 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4733 | struct slave *slave, *rollback_slave; |
4734 | struct sockaddr_storage *ss = addr, tmp_ss; |
4735 | struct list_head *iter; |
4736 | int res = 0; |
4737 | |
4738 | if (BOND_MODE(bond) == BOND_MODE_ALB) |
4739 | return bond_alb_set_mac_address(bond_dev, addr); |
4740 | |
4741 | |
4742 | netdev_dbg(bond_dev, "%s: bond=%p\n" , __func__, bond); |
4743 | |
4744 | /* If fail_over_mac is enabled, do nothing and return success. |
4745 | * Returning an error causes ifenslave to fail. |
4746 | */ |
4747 | if (bond->params.fail_over_mac && |
4748 | BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) |
4749 | return 0; |
4750 | |
4751 | if (!is_valid_ether_addr(addr: ss->__data)) |
4752 | return -EADDRNOTAVAIL; |
4753 | |
4754 | bond_for_each_slave(bond, slave, iter) { |
4755 | slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n" , |
4756 | __func__, slave); |
4757 | res = dev_set_mac_address(dev: slave->dev, sa: addr, NULL); |
4758 | if (res) { |
4759 | /* TODO: consider downing the slave |
4760 | * and retry ? |
4761 | * User should expect communications |
4762 | * breakage anyway until ARP finish |
4763 | * updating, so... |
4764 | */ |
4765 | slave_dbg(bond_dev, slave->dev, "%s: err %d\n" , |
4766 | __func__, res); |
4767 | goto unwind; |
4768 | } |
4769 | } |
4770 | |
4771 | /* success */ |
4772 | dev_addr_set(dev: bond_dev, addr: ss->__data); |
4773 | return 0; |
4774 | |
4775 | unwind: |
4776 | memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len); |
4777 | tmp_ss.ss_family = bond_dev->type; |
4778 | |
4779 | /* unwind from head to the slave that failed */ |
4780 | bond_for_each_slave(bond, rollback_slave, iter) { |
4781 | int tmp_res; |
4782 | |
4783 | if (rollback_slave == slave) |
4784 | break; |
4785 | |
4786 | tmp_res = dev_set_mac_address(dev: rollback_slave->dev, |
4787 | sa: (struct sockaddr *)&tmp_ss, NULL); |
4788 | if (tmp_res) { |
4789 | slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n" , |
4790 | __func__, tmp_res); |
4791 | } |
4792 | } |
4793 | |
4794 | return res; |
4795 | } |
4796 | |
4797 | /** |
4798 | * bond_get_slave_by_id - get xmit slave with slave_id |
4799 | * @bond: bonding device that is transmitting |
4800 | * @slave_id: slave id up to slave_cnt-1 through which to transmit |
4801 | * |
4802 | * This function tries to get slave with slave_id but in case |
4803 | * it fails, it tries to find the first available slave for transmission. |
4804 | */ |
4805 | static struct slave *bond_get_slave_by_id(struct bonding *bond, |
4806 | int slave_id) |
4807 | { |
4808 | struct list_head *iter; |
4809 | struct slave *slave; |
4810 | int i = slave_id; |
4811 | |
4812 | /* Here we start from the slave with slave_id */ |
4813 | bond_for_each_slave_rcu(bond, slave, iter) { |
4814 | if (--i < 0) { |
4815 | if (bond_slave_can_tx(slave)) |
4816 | return slave; |
4817 | } |
4818 | } |
4819 | |
4820 | /* Here we start from the first slave up to slave_id */ |
4821 | i = slave_id; |
4822 | bond_for_each_slave_rcu(bond, slave, iter) { |
4823 | if (--i < 0) |
4824 | break; |
4825 | if (bond_slave_can_tx(slave)) |
4826 | return slave; |
4827 | } |
4828 | /* no slave that can tx has been found */ |
4829 | return NULL; |
4830 | } |
4831 | |
4832 | /** |
4833 | * bond_rr_gen_slave_id - generate slave id based on packets_per_slave |
4834 | * @bond: bonding device to use |
4835 | * |
4836 | * Based on the value of the bonding device's packets_per_slave parameter |
4837 | * this function generates a slave id, which is usually used as the next |
4838 | * slave to transmit through. |
4839 | */ |
4840 | static u32 bond_rr_gen_slave_id(struct bonding *bond) |
4841 | { |
4842 | u32 slave_id; |
4843 | struct reciprocal_value reciprocal_packets_per_slave; |
4844 | int packets_per_slave = bond->params.packets_per_slave; |
4845 | |
4846 | switch (packets_per_slave) { |
4847 | case 0: |
4848 | slave_id = get_random_u32(); |
4849 | break; |
4850 | case 1: |
4851 | slave_id = this_cpu_inc_return(*bond->rr_tx_counter); |
4852 | break; |
4853 | default: |
4854 | reciprocal_packets_per_slave = |
4855 | bond->params.reciprocal_packets_per_slave; |
4856 | slave_id = this_cpu_inc_return(*bond->rr_tx_counter); |
4857 | slave_id = reciprocal_divide(a: slave_id, |
4858 | R: reciprocal_packets_per_slave); |
4859 | break; |
4860 | } |
4861 | |
4862 | return slave_id; |
4863 | } |
4864 | |
4865 | static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, |
4866 | struct sk_buff *skb) |
4867 | { |
4868 | struct slave *slave; |
4869 | int slave_cnt; |
4870 | u32 slave_id; |
4871 | |
4872 | /* Start with the curr_active_slave that joined the bond as the |
4873 | * default for sending IGMP traffic. For failover purposes one |
4874 | * needs to maintain some consistency for the interface that will |
4875 | * send the join/membership reports. The curr_active_slave found |
4876 | * will send all of this type of traffic. |
4877 | */ |
4878 | if (skb->protocol == htons(ETH_P_IP)) { |
4879 | int noff = skb_network_offset(skb); |
4880 | struct iphdr *iph; |
4881 | |
4882 | if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) |
4883 | goto non_igmp; |
4884 | |
4885 | iph = ip_hdr(skb); |
4886 | if (iph->protocol == IPPROTO_IGMP) { |
4887 | slave = rcu_dereference(bond->curr_active_slave); |
4888 | if (slave) |
4889 | return slave; |
4890 | return bond_get_slave_by_id(bond, slave_id: 0); |
4891 | } |
4892 | } |
4893 | |
4894 | non_igmp: |
4895 | slave_cnt = READ_ONCE(bond->slave_cnt); |
4896 | if (likely(slave_cnt)) { |
4897 | slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; |
4898 | return bond_get_slave_by_id(bond, slave_id); |
4899 | } |
4900 | return NULL; |
4901 | } |
4902 | |
4903 | static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, |
4904 | struct xdp_buff *xdp) |
4905 | { |
4906 | struct slave *slave; |
4907 | int slave_cnt; |
4908 | u32 slave_id; |
4909 | const struct ethhdr *eth; |
4910 | void *data = xdp->data; |
4911 | |
4912 | if (data + sizeof(struct ethhdr) > xdp->data_end) |
4913 | goto non_igmp; |
4914 | |
4915 | eth = (struct ethhdr *)data; |
4916 | data += sizeof(struct ethhdr); |
4917 | |
4918 | /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */ |
4919 | if (eth->h_proto == htons(ETH_P_IP)) { |
4920 | const struct iphdr *iph; |
4921 | |
4922 | if (data + sizeof(struct iphdr) > xdp->data_end) |
4923 | goto non_igmp; |
4924 | |
4925 | iph = (struct iphdr *)data; |
4926 | |
4927 | if (iph->protocol == IPPROTO_IGMP) { |
4928 | slave = rcu_dereference(bond->curr_active_slave); |
4929 | if (slave) |
4930 | return slave; |
4931 | return bond_get_slave_by_id(bond, slave_id: 0); |
4932 | } |
4933 | } |
4934 | |
4935 | non_igmp: |
4936 | slave_cnt = READ_ONCE(bond->slave_cnt); |
4937 | if (likely(slave_cnt)) { |
4938 | slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; |
4939 | return bond_get_slave_by_id(bond, slave_id); |
4940 | } |
4941 | return NULL; |
4942 | } |
4943 | |
4944 | static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, |
4945 | struct net_device *bond_dev) |
4946 | { |
4947 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4948 | struct slave *slave; |
4949 | |
4950 | slave = bond_xmit_roundrobin_slave_get(bond, skb); |
4951 | if (likely(slave)) |
4952 | return bond_dev_queue_xmit(bond, skb, slave_dev: slave->dev); |
4953 | |
4954 | return bond_tx_drop(dev: bond_dev, skb); |
4955 | } |
4956 | |
4957 | static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) |
4958 | { |
4959 | return rcu_dereference(bond->curr_active_slave); |
4960 | } |
4961 | |
4962 | /* In active-backup mode, we know that bond->curr_active_slave is always valid if |
4963 | * the bond has a usable interface. |
4964 | */ |
4965 | static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, |
4966 | struct net_device *bond_dev) |
4967 | { |
4968 | struct bonding *bond = netdev_priv(dev: bond_dev); |
4969 | struct slave *slave; |
4970 | |
4971 | slave = bond_xmit_activebackup_slave_get(bond); |
4972 | if (slave) |
4973 | return bond_dev_queue_xmit(bond, skb, slave_dev: slave->dev); |
4974 | |
4975 | return bond_tx_drop(dev: bond_dev, skb); |
4976 | } |
4977 | |
4978 | /* Use this to update slave_array when (a) it's not appropriate to update |
4979 | * slave_array right away (note that update_slave_array() may sleep) |
4980 | * and / or (b) RTNL is not held. |
4981 | */ |
4982 | void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) |
4983 | { |
4984 | queue_delayed_work(wq: bond->wq, dwork: &bond->slave_arr_work, delay); |
4985 | } |
4986 | |
4987 | /* Slave array work handler. Holds only RTNL */ |
4988 | static void bond_slave_arr_handler(struct work_struct *work) |
4989 | { |
4990 | struct bonding *bond = container_of(work, struct bonding, |
4991 | slave_arr_work.work); |
4992 | int ret; |
4993 | |
4994 | if (!rtnl_trylock()) |
4995 | goto err; |
4996 | |
4997 | ret = bond_update_slave_arr(bond, NULL); |
4998 | rtnl_unlock(); |
4999 | if (ret) { |
5000 | pr_warn_ratelimited("Failed to update slave array from WT\n" ); |
5001 | goto err; |
5002 | } |
5003 | return; |
5004 | |
5005 | err: |
5006 | bond_slave_arr_work_rearm(bond, delay: 1); |
5007 | } |
5008 | |
5009 | static void bond_skip_slave(struct bond_up_slave *slaves, |
5010 | struct slave *skipslave) |
5011 | { |
5012 | int idx; |
5013 | |
5014 | /* Rare situation where caller has asked to skip a specific |
5015 | * slave but allocation failed (most likely!). BTW this is |
5016 | * only possible when the call is initiated from |
5017 | * __bond_release_one(). In this situation; overwrite the |
5018 | * skipslave entry in the array with the last entry from the |
5019 | * array to avoid a situation where the xmit path may choose |
5020 | * this to-be-skipped slave to send a packet out. |
5021 | */ |
5022 | for (idx = 0; slaves && idx < slaves->count; idx++) { |
5023 | if (skipslave == slaves->arr[idx]) { |
5024 | slaves->arr[idx] = |
5025 | slaves->arr[slaves->count - 1]; |
5026 | slaves->count--; |
5027 | break; |
5028 | } |
5029 | } |
5030 | } |
5031 | |
5032 | static void bond_set_slave_arr(struct bonding *bond, |
5033 | struct bond_up_slave *usable_slaves, |
5034 | struct bond_up_slave *all_slaves) |
5035 | { |
5036 | struct bond_up_slave *usable, *all; |
5037 | |
5038 | usable = rtnl_dereference(bond->usable_slaves); |
5039 | rcu_assign_pointer(bond->usable_slaves, usable_slaves); |
5040 | kfree_rcu(usable, rcu); |
5041 | |
5042 | all = rtnl_dereference(bond->all_slaves); |
5043 | rcu_assign_pointer(bond->all_slaves, all_slaves); |
5044 | kfree_rcu(all, rcu); |
5045 | } |
5046 | |
5047 | static void bond_reset_slave_arr(struct bonding *bond) |
5048 | { |
5049 | bond_set_slave_arr(bond, NULL, NULL); |
5050 | } |
5051 | |
5052 | /* Build the usable slaves array in control path for modes that use xmit-hash |
5053 | * to determine the slave interface - |
5054 | * (a) BOND_MODE_8023AD |
5055 | * (b) BOND_MODE_XOR |
5056 | * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 |
5057 | * |
5058 | * The caller is expected to hold RTNL only and NO other lock! |
5059 | */ |
5060 | int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) |
5061 | { |
5062 | struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL; |
5063 | struct slave *slave; |
5064 | struct list_head *iter; |
5065 | int agg_id = 0; |
5066 | int ret = 0; |
5067 | |
5068 | might_sleep(); |
5069 | |
5070 | usable_slaves = kzalloc(struct_size(usable_slaves, arr, |
5071 | bond->slave_cnt), GFP_KERNEL); |
5072 | all_slaves = kzalloc(struct_size(all_slaves, arr, |
5073 | bond->slave_cnt), GFP_KERNEL); |
5074 | if (!usable_slaves || !all_slaves) { |
5075 | ret = -ENOMEM; |
5076 | goto out; |
5077 | } |
5078 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
5079 | struct ad_info ad_info; |
5080 | |
5081 | spin_lock_bh(lock: &bond->mode_lock); |
5082 | if (bond_3ad_get_active_agg_info(bond, ad_info: &ad_info)) { |
5083 | spin_unlock_bh(lock: &bond->mode_lock); |
5084 | pr_debug("bond_3ad_get_active_agg_info failed\n" ); |
5085 | /* No active aggragator means it's not safe to use |
5086 | * the previous array. |
5087 | */ |
5088 | bond_reset_slave_arr(bond); |
5089 | goto out; |
5090 | } |
5091 | spin_unlock_bh(lock: &bond->mode_lock); |
5092 | agg_id = ad_info.aggregator_id; |
5093 | } |
5094 | bond_for_each_slave(bond, slave, iter) { |
5095 | if (skipslave == slave) |
5096 | continue; |
5097 | |
5098 | all_slaves->arr[all_slaves->count++] = slave; |
5099 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
5100 | struct aggregator *agg; |
5101 | |
5102 | agg = SLAVE_AD_INFO(slave)->port.aggregator; |
5103 | if (!agg || agg->aggregator_identifier != agg_id) |
5104 | continue; |
5105 | } |
5106 | if (!bond_slave_can_tx(slave)) |
5107 | continue; |
5108 | |
5109 | slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n" , |
5110 | usable_slaves->count); |
5111 | |
5112 | usable_slaves->arr[usable_slaves->count++] = slave; |
5113 | } |
5114 | |
5115 | bond_set_slave_arr(bond, usable_slaves, all_slaves); |
5116 | return ret; |
5117 | out: |
5118 | if (ret != 0 && skipslave) { |
5119 | bond_skip_slave(rtnl_dereference(bond->all_slaves), |
5120 | skipslave); |
5121 | bond_skip_slave(rtnl_dereference(bond->usable_slaves), |
5122 | skipslave); |
5123 | } |
5124 | kfree_rcu(all_slaves, rcu); |
5125 | kfree_rcu(usable_slaves, rcu); |
5126 | |
5127 | return ret; |
5128 | } |
5129 | |
5130 | static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, |
5131 | struct sk_buff *skb, |
5132 | struct bond_up_slave *slaves) |
5133 | { |
5134 | struct slave *slave; |
5135 | unsigned int count; |
5136 | u32 hash; |
5137 | |
5138 | hash = bond_xmit_hash(bond, skb); |
5139 | count = slaves ? READ_ONCE(slaves->count) : 0; |
5140 | if (unlikely(!count)) |
5141 | return NULL; |
5142 | |
5143 | slave = slaves->arr[hash % count]; |
5144 | return slave; |
5145 | } |
5146 | |
5147 | static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, |
5148 | struct xdp_buff *xdp) |
5149 | { |
5150 | struct bond_up_slave *slaves; |
5151 | unsigned int count; |
5152 | u32 hash; |
5153 | |
5154 | hash = bond_xmit_hash_xdp(bond, xdp); |
5155 | slaves = rcu_dereference(bond->usable_slaves); |
5156 | count = slaves ? READ_ONCE(slaves->count) : 0; |
5157 | if (unlikely(!count)) |
5158 | return NULL; |
5159 | |
5160 | return slaves->arr[hash % count]; |
5161 | } |
5162 | |
5163 | /* Use this Xmit function for 3AD as well as XOR modes. The current |
5164 | * usable slave array is formed in the control path. The xmit function |
5165 | * just calculates hash and sends the packet out. |
5166 | */ |
5167 | static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, |
5168 | struct net_device *dev) |
5169 | { |
5170 | struct bonding *bond = netdev_priv(dev); |
5171 | struct bond_up_slave *slaves; |
5172 | struct slave *slave; |
5173 | |
5174 | slaves = rcu_dereference(bond->usable_slaves); |
5175 | slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); |
5176 | if (likely(slave)) |
5177 | return bond_dev_queue_xmit(bond, skb, slave_dev: slave->dev); |
5178 | |
5179 | return bond_tx_drop(dev, skb); |
5180 | } |
5181 | |
5182 | /* in broadcast mode, we send everything to all usable interfaces. */ |
5183 | static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, |
5184 | struct net_device *bond_dev) |
5185 | { |
5186 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5187 | struct slave *slave = NULL; |
5188 | struct list_head *iter; |
5189 | bool xmit_suc = false; |
5190 | bool skb_used = false; |
5191 | |
5192 | bond_for_each_slave_rcu(bond, slave, iter) { |
5193 | struct sk_buff *skb2; |
5194 | |
5195 | if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) |
5196 | continue; |
5197 | |
5198 | if (bond_is_last_slave(bond, slave)) { |
5199 | skb2 = skb; |
5200 | skb_used = true; |
5201 | } else { |
5202 | skb2 = skb_clone(skb, GFP_ATOMIC); |
5203 | if (!skb2) { |
5204 | net_err_ratelimited("%s: Error: %s: skb_clone() failed\n" , |
5205 | bond_dev->name, __func__); |
5206 | continue; |
5207 | } |
5208 | } |
5209 | |
5210 | if (bond_dev_queue_xmit(bond, skb: skb2, slave_dev: slave->dev) == NETDEV_TX_OK) |
5211 | xmit_suc = true; |
5212 | } |
5213 | |
5214 | if (!skb_used) |
5215 | dev_kfree_skb_any(skb); |
5216 | |
5217 | if (xmit_suc) |
5218 | return NETDEV_TX_OK; |
5219 | |
5220 | dev_core_stats_tx_dropped_inc(dev: bond_dev); |
5221 | return NET_XMIT_DROP; |
5222 | } |
5223 | |
5224 | /*------------------------- Device initialization ---------------------------*/ |
5225 | |
5226 | /* Lookup the slave that corresponds to a qid */ |
5227 | static inline int bond_slave_override(struct bonding *bond, |
5228 | struct sk_buff *skb) |
5229 | { |
5230 | struct slave *slave = NULL; |
5231 | struct list_head *iter; |
5232 | |
5233 | if (!skb_rx_queue_recorded(skb)) |
5234 | return 1; |
5235 | |
5236 | /* Find out if any slaves have the same mapping as this skb. */ |
5237 | bond_for_each_slave_rcu(bond, slave, iter) { |
5238 | if (slave->queue_id == skb_get_queue_mapping(skb)) { |
5239 | if (bond_slave_is_up(slave) && |
5240 | slave->link == BOND_LINK_UP) { |
5241 | bond_dev_queue_xmit(bond, skb, slave_dev: slave->dev); |
5242 | return 0; |
5243 | } |
5244 | /* If the slave isn't UP, use default transmit policy. */ |
5245 | break; |
5246 | } |
5247 | } |
5248 | |
5249 | return 1; |
5250 | } |
5251 | |
5252 | |
5253 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, |
5254 | struct net_device *sb_dev) |
5255 | { |
5256 | /* This helper function exists to help dev_pick_tx get the correct |
5257 | * destination queue. Using a helper function skips a call to |
5258 | * skb_tx_hash and will put the skbs in the queue we expect on their |
5259 | * way down to the bonding driver. |
5260 | */ |
5261 | u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; |
5262 | |
5263 | /* Save the original txq to restore before passing to the driver */ |
5264 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); |
5265 | |
5266 | if (unlikely(txq >= dev->real_num_tx_queues)) { |
5267 | do { |
5268 | txq -= dev->real_num_tx_queues; |
5269 | } while (txq >= dev->real_num_tx_queues); |
5270 | } |
5271 | return txq; |
5272 | } |
5273 | |
5274 | static struct net_device *bond_xmit_get_slave(struct net_device *master_dev, |
5275 | struct sk_buff *skb, |
5276 | bool all_slaves) |
5277 | { |
5278 | struct bonding *bond = netdev_priv(dev: master_dev); |
5279 | struct bond_up_slave *slaves; |
5280 | struct slave *slave = NULL; |
5281 | |
5282 | switch (BOND_MODE(bond)) { |
5283 | case BOND_MODE_ROUNDROBIN: |
5284 | slave = bond_xmit_roundrobin_slave_get(bond, skb); |
5285 | break; |
5286 | case BOND_MODE_ACTIVEBACKUP: |
5287 | slave = bond_xmit_activebackup_slave_get(bond); |
5288 | break; |
5289 | case BOND_MODE_8023AD: |
5290 | case BOND_MODE_XOR: |
5291 | if (all_slaves) |
5292 | slaves = rcu_dereference(bond->all_slaves); |
5293 | else |
5294 | slaves = rcu_dereference(bond->usable_slaves); |
5295 | slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); |
5296 | break; |
5297 | case BOND_MODE_BROADCAST: |
5298 | break; |
5299 | case BOND_MODE_ALB: |
5300 | slave = bond_xmit_alb_slave_get(bond, skb); |
5301 | break; |
5302 | case BOND_MODE_TLB: |
5303 | slave = bond_xmit_tlb_slave_get(bond, skb); |
5304 | break; |
5305 | default: |
5306 | /* Should never happen, mode already checked */ |
5307 | WARN_ONCE(true, "Unknown bonding mode" ); |
5308 | break; |
5309 | } |
5310 | |
5311 | if (slave) |
5312 | return slave->dev; |
5313 | return NULL; |
5314 | } |
5315 | |
5316 | static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) |
5317 | { |
5318 | switch (sk->sk_family) { |
5319 | #if IS_ENABLED(CONFIG_IPV6) |
5320 | case AF_INET6: |
5321 | if (ipv6_only_sock(sk) || |
5322 | ipv6_addr_type(addr: &sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { |
5323 | flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; |
5324 | flow->addrs.v6addrs.src = inet6_sk(sk: sk)->saddr; |
5325 | flow->addrs.v6addrs.dst = sk->sk_v6_daddr; |
5326 | break; |
5327 | } |
5328 | fallthrough; |
5329 | #endif |
5330 | default: /* AF_INET */ |
5331 | flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; |
5332 | flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr; |
5333 | flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr; |
5334 | break; |
5335 | } |
5336 | |
5337 | flow->ports.src = inet_sk(sk)->inet_sport; |
5338 | flow->ports.dst = inet_sk(sk)->inet_dport; |
5339 | } |
5340 | |
5341 | /** |
5342 | * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields |
5343 | * @sk: socket to use for headers |
5344 | * |
5345 | * This function will extract the necessary field from the socket and use |
5346 | * them to generate a hash based on the LAYER34 xmit_policy. |
5347 | * Assumes that sk is a TCP or UDP socket. |
5348 | */ |
5349 | static u32 bond_sk_hash_l34(struct sock *sk) |
5350 | { |
5351 | struct flow_keys flow; |
5352 | u32 hash; |
5353 | |
5354 | bond_sk_to_flow(sk, flow: &flow); |
5355 | |
5356 | /* L4 */ |
5357 | memcpy(&hash, &flow.ports.ports, sizeof(hash)); |
5358 | /* L3 */ |
5359 | return bond_ip_hash(hash, flow: &flow, BOND_XMIT_POLICY_LAYER34); |
5360 | } |
5361 | |
5362 | static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, |
5363 | struct sock *sk) |
5364 | { |
5365 | struct bond_up_slave *slaves; |
5366 | struct slave *slave; |
5367 | unsigned int count; |
5368 | u32 hash; |
5369 | |
5370 | slaves = rcu_dereference(bond->usable_slaves); |
5371 | count = slaves ? READ_ONCE(slaves->count) : 0; |
5372 | if (unlikely(!count)) |
5373 | return NULL; |
5374 | |
5375 | hash = bond_sk_hash_l34(sk); |
5376 | slave = slaves->arr[hash % count]; |
5377 | |
5378 | return slave->dev; |
5379 | } |
5380 | |
5381 | static struct net_device *bond_sk_get_lower_dev(struct net_device *dev, |
5382 | struct sock *sk) |
5383 | { |
5384 | struct bonding *bond = netdev_priv(dev); |
5385 | struct net_device *lower = NULL; |
5386 | |
5387 | rcu_read_lock(); |
5388 | if (bond_sk_check(bond)) |
5389 | lower = __bond_sk_get_lower_dev(bond, sk); |
5390 | rcu_read_unlock(); |
5391 | |
5392 | return lower; |
5393 | } |
5394 | |
5395 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
5396 | static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, |
5397 | struct net_device *dev) |
5398 | { |
5399 | struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); |
5400 | |
5401 | /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded |
5402 | * was true, if tls_device_down is running in parallel, but it's OK, |
5403 | * because bond_get_slave_by_dev has a NULL check. |
5404 | */ |
5405 | if (likely(bond_get_slave_by_dev(bond, tls_netdev))) |
5406 | return bond_dev_queue_xmit(bond, skb, slave_dev: tls_netdev); |
5407 | return bond_tx_drop(dev, skb); |
5408 | } |
5409 | #endif |
5410 | |
5411 | static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) |
5412 | { |
5413 | struct bonding *bond = netdev_priv(dev); |
5414 | |
5415 | if (bond_should_override_tx_queue(bond) && |
5416 | !bond_slave_override(bond, skb)) |
5417 | return NETDEV_TX_OK; |
5418 | |
5419 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
5420 | if (tls_is_skb_tx_device_offloaded(skb)) |
5421 | return bond_tls_device_xmit(bond, skb, dev); |
5422 | #endif |
5423 | |
5424 | switch (BOND_MODE(bond)) { |
5425 | case BOND_MODE_ROUNDROBIN: |
5426 | return bond_xmit_roundrobin(skb, bond_dev: dev); |
5427 | case BOND_MODE_ACTIVEBACKUP: |
5428 | return bond_xmit_activebackup(skb, bond_dev: dev); |
5429 | case BOND_MODE_8023AD: |
5430 | case BOND_MODE_XOR: |
5431 | return bond_3ad_xor_xmit(skb, dev); |
5432 | case BOND_MODE_BROADCAST: |
5433 | return bond_xmit_broadcast(skb, bond_dev: dev); |
5434 | case BOND_MODE_ALB: |
5435 | return bond_alb_xmit(skb, bond_dev: dev); |
5436 | case BOND_MODE_TLB: |
5437 | return bond_tlb_xmit(skb, bond_dev: dev); |
5438 | default: |
5439 | /* Should never happen, mode already checked */ |
5440 | netdev_err(dev, format: "Unknown bonding mode %d\n" , BOND_MODE(bond)); |
5441 | WARN_ON_ONCE(1); |
5442 | return bond_tx_drop(dev, skb); |
5443 | } |
5444 | } |
5445 | |
5446 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) |
5447 | { |
5448 | struct bonding *bond = netdev_priv(dev); |
5449 | netdev_tx_t ret = NETDEV_TX_OK; |
5450 | |
5451 | /* If we risk deadlock from transmitting this in the |
5452 | * netpoll path, tell netpoll to queue the frame for later tx |
5453 | */ |
5454 | if (unlikely(is_netpoll_tx_blocked(dev))) |
5455 | return NETDEV_TX_BUSY; |
5456 | |
5457 | rcu_read_lock(); |
5458 | if (bond_has_slaves(bond)) |
5459 | ret = __bond_start_xmit(skb, dev); |
5460 | else |
5461 | ret = bond_tx_drop(dev, skb); |
5462 | rcu_read_unlock(); |
5463 | |
5464 | return ret; |
5465 | } |
5466 | |
5467 | static struct net_device * |
5468 | bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) |
5469 | { |
5470 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5471 | struct slave *slave; |
5472 | |
5473 | /* Caller needs to hold rcu_read_lock() */ |
5474 | |
5475 | switch (BOND_MODE(bond)) { |
5476 | case BOND_MODE_ROUNDROBIN: |
5477 | slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); |
5478 | break; |
5479 | |
5480 | case BOND_MODE_ACTIVEBACKUP: |
5481 | slave = bond_xmit_activebackup_slave_get(bond); |
5482 | break; |
5483 | |
5484 | case BOND_MODE_8023AD: |
5485 | case BOND_MODE_XOR: |
5486 | slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); |
5487 | break; |
5488 | |
5489 | default: |
5490 | /* Should never happen. Mode guarded by bond_xdp_check() */ |
5491 | netdev_err(dev: bond_dev, format: "Unknown bonding mode %d for xdp xmit\n" , BOND_MODE(bond)); |
5492 | WARN_ON_ONCE(1); |
5493 | return NULL; |
5494 | } |
5495 | |
5496 | if (slave) |
5497 | return slave->dev; |
5498 | |
5499 | return NULL; |
5500 | } |
5501 | |
5502 | static int bond_xdp_xmit(struct net_device *bond_dev, |
5503 | int n, struct xdp_frame **frames, u32 flags) |
5504 | { |
5505 | int nxmit, err = -ENXIO; |
5506 | |
5507 | rcu_read_lock(); |
5508 | |
5509 | for (nxmit = 0; nxmit < n; nxmit++) { |
5510 | struct xdp_frame *frame = frames[nxmit]; |
5511 | struct xdp_frame *frames1[] = {frame}; |
5512 | struct net_device *slave_dev; |
5513 | struct xdp_buff xdp; |
5514 | |
5515 | xdp_convert_frame_to_buff(frame, xdp: &xdp); |
5516 | |
5517 | slave_dev = bond_xdp_get_xmit_slave(bond_dev, xdp: &xdp); |
5518 | if (!slave_dev) { |
5519 | err = -ENXIO; |
5520 | break; |
5521 | } |
5522 | |
5523 | err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags); |
5524 | if (err < 1) |
5525 | break; |
5526 | } |
5527 | |
5528 | rcu_read_unlock(); |
5529 | |
5530 | /* If error happened on the first frame then we can pass the error up, otherwise |
5531 | * report the number of frames that were xmitted. |
5532 | */ |
5533 | if (err < 0) |
5534 | return (nxmit == 0 ? err : nxmit); |
5535 | |
5536 | return nxmit; |
5537 | } |
5538 | |
5539 | static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
5540 | struct netlink_ext_ack *extack) |
5541 | { |
5542 | struct bonding *bond = netdev_priv(dev); |
5543 | struct list_head *iter; |
5544 | struct slave *slave, *rollback_slave; |
5545 | struct bpf_prog *old_prog; |
5546 | struct netdev_bpf xdp = { |
5547 | .command = XDP_SETUP_PROG, |
5548 | .flags = 0, |
5549 | .prog = prog, |
5550 | .extack = extack, |
5551 | }; |
5552 | int err; |
5553 | |
5554 | ASSERT_RTNL(); |
5555 | |
5556 | if (!bond_xdp_check(bond)) |
5557 | return -EOPNOTSUPP; |
5558 | |
5559 | old_prog = bond->xdp_prog; |
5560 | bond->xdp_prog = prog; |
5561 | |
5562 | bond_for_each_slave(bond, slave, iter) { |
5563 | struct net_device *slave_dev = slave->dev; |
5564 | |
5565 | if (!slave_dev->netdev_ops->ndo_bpf || |
5566 | !slave_dev->netdev_ops->ndo_xdp_xmit) { |
5567 | SLAVE_NL_ERR(dev, slave_dev, extack, |
5568 | "Slave device does not support XDP" ); |
5569 | err = -EOPNOTSUPP; |
5570 | goto err; |
5571 | } |
5572 | |
5573 | if (dev_xdp_prog_count(dev: slave_dev) > 0) { |
5574 | SLAVE_NL_ERR(dev, slave_dev, extack, |
5575 | "Slave has XDP program loaded, please unload before enslaving" ); |
5576 | err = -EOPNOTSUPP; |
5577 | goto err; |
5578 | } |
5579 | |
5580 | err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); |
5581 | if (err < 0) { |
5582 | /* ndo_bpf() sets extack error message */ |
5583 | slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n" , err); |
5584 | goto err; |
5585 | } |
5586 | if (prog) |
5587 | bpf_prog_inc(prog); |
5588 | } |
5589 | |
5590 | if (prog) { |
5591 | static_branch_inc(&bpf_master_redirect_enabled_key); |
5592 | } else if (old_prog) { |
5593 | bpf_prog_put(prog: old_prog); |
5594 | static_branch_dec(&bpf_master_redirect_enabled_key); |
5595 | } |
5596 | |
5597 | return 0; |
5598 | |
5599 | err: |
5600 | /* unwind the program changes */ |
5601 | bond->xdp_prog = old_prog; |
5602 | xdp.prog = old_prog; |
5603 | xdp.extack = NULL; /* do not overwrite original error */ |
5604 | |
5605 | bond_for_each_slave(bond, rollback_slave, iter) { |
5606 | struct net_device *slave_dev = rollback_slave->dev; |
5607 | int err_unwind; |
5608 | |
5609 | if (slave == rollback_slave) |
5610 | break; |
5611 | |
5612 | err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); |
5613 | if (err_unwind < 0) |
5614 | slave_err(dev, slave_dev, |
5615 | "Error %d when unwinding XDP program change\n" , err_unwind); |
5616 | else if (xdp.prog) |
5617 | bpf_prog_inc(prog: xdp.prog); |
5618 | } |
5619 | return err; |
5620 | } |
5621 | |
5622 | static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
5623 | { |
5624 | switch (xdp->command) { |
5625 | case XDP_SETUP_PROG: |
5626 | return bond_xdp_set(dev, prog: xdp->prog, extack: xdp->extack); |
5627 | default: |
5628 | return -EINVAL; |
5629 | } |
5630 | } |
5631 | |
5632 | static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) |
5633 | { |
5634 | if (speed == 0 || speed == SPEED_UNKNOWN) |
5635 | speed = slave->speed; |
5636 | else |
5637 | speed = min(speed, slave->speed); |
5638 | |
5639 | return speed; |
5640 | } |
5641 | |
5642 | /* Set the BOND_PHC_INDEX flag to notify user space */ |
5643 | static int bond_set_phc_index_flag(struct kernel_hwtstamp_config *kernel_cfg) |
5644 | { |
5645 | struct ifreq *ifr = kernel_cfg->ifr; |
5646 | struct hwtstamp_config cfg; |
5647 | |
5648 | if (kernel_cfg->copied_to_user) { |
5649 | /* Lower device has a legacy implementation */ |
5650 | if (copy_from_user(to: &cfg, from: ifr->ifr_data, n: sizeof(cfg))) |
5651 | return -EFAULT; |
5652 | |
5653 | cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; |
5654 | if (copy_to_user(to: ifr->ifr_data, from: &cfg, n: sizeof(cfg))) |
5655 | return -EFAULT; |
5656 | } else { |
5657 | kernel_cfg->flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; |
5658 | } |
5659 | |
5660 | return 0; |
5661 | } |
5662 | |
5663 | static int bond_hwtstamp_get(struct net_device *dev, |
5664 | struct kernel_hwtstamp_config *cfg) |
5665 | { |
5666 | struct bonding *bond = netdev_priv(dev); |
5667 | struct net_device *real_dev; |
5668 | int err; |
5669 | |
5670 | real_dev = bond_option_active_slave_get_rcu(bond); |
5671 | if (!real_dev) |
5672 | return -EOPNOTSUPP; |
5673 | |
5674 | err = generic_hwtstamp_get_lower(dev: real_dev, kernel_cfg: cfg); |
5675 | if (err) |
5676 | return err; |
5677 | |
5678 | return bond_set_phc_index_flag(kernel_cfg: cfg); |
5679 | } |
5680 | |
5681 | static int bond_hwtstamp_set(struct net_device *dev, |
5682 | struct kernel_hwtstamp_config *cfg, |
5683 | struct netlink_ext_ack *extack) |
5684 | { |
5685 | struct bonding *bond = netdev_priv(dev); |
5686 | struct net_device *real_dev; |
5687 | int err; |
5688 | |
5689 | if (!(cfg->flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) |
5690 | return -EOPNOTSUPP; |
5691 | |
5692 | real_dev = bond_option_active_slave_get_rcu(bond); |
5693 | if (!real_dev) |
5694 | return -EOPNOTSUPP; |
5695 | |
5696 | err = generic_hwtstamp_set_lower(dev: real_dev, kernel_cfg: cfg, extack); |
5697 | if (err) |
5698 | return err; |
5699 | |
5700 | return bond_set_phc_index_flag(kernel_cfg: cfg); |
5701 | } |
5702 | |
5703 | static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, |
5704 | struct ethtool_link_ksettings *cmd) |
5705 | { |
5706 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5707 | struct list_head *iter; |
5708 | struct slave *slave; |
5709 | u32 speed = 0; |
5710 | |
5711 | cmd->base.duplex = DUPLEX_UNKNOWN; |
5712 | cmd->base.port = PORT_OTHER; |
5713 | |
5714 | /* Since bond_slave_can_tx returns false for all inactive or down slaves, we |
5715 | * do not need to check mode. Though link speed might not represent |
5716 | * the true receive or transmit bandwidth (not all modes are symmetric) |
5717 | * this is an accurate maximum. |
5718 | */ |
5719 | bond_for_each_slave(bond, slave, iter) { |
5720 | if (bond_slave_can_tx(slave)) { |
5721 | bond_update_speed_duplex(slave); |
5722 | if (slave->speed != SPEED_UNKNOWN) { |
5723 | if (BOND_MODE(bond) == BOND_MODE_BROADCAST) |
5724 | speed = bond_mode_bcast_speed(slave, |
5725 | speed); |
5726 | else |
5727 | speed += slave->speed; |
5728 | } |
5729 | if (cmd->base.duplex == DUPLEX_UNKNOWN && |
5730 | slave->duplex != DUPLEX_UNKNOWN) |
5731 | cmd->base.duplex = slave->duplex; |
5732 | } |
5733 | } |
5734 | cmd->base.speed = speed ? : SPEED_UNKNOWN; |
5735 | |
5736 | return 0; |
5737 | } |
5738 | |
5739 | static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, |
5740 | struct ethtool_drvinfo *drvinfo) |
5741 | { |
5742 | strscpy(p: drvinfo->driver, DRV_NAME, size: sizeof(drvinfo->driver)); |
5743 | snprintf(buf: drvinfo->fw_version, size: sizeof(drvinfo->fw_version), fmt: "%d" , |
5744 | BOND_ABI_VERSION); |
5745 | } |
5746 | |
5747 | static int bond_ethtool_get_ts_info(struct net_device *bond_dev, |
5748 | struct ethtool_ts_info *info) |
5749 | { |
5750 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5751 | struct ethtool_ts_info ts_info; |
5752 | const struct ethtool_ops *ops; |
5753 | struct net_device *real_dev; |
5754 | bool sw_tx_support = false; |
5755 | struct phy_device *phydev; |
5756 | struct list_head *iter; |
5757 | struct slave *slave; |
5758 | int ret = 0; |
5759 | |
5760 | rcu_read_lock(); |
5761 | real_dev = bond_option_active_slave_get_rcu(bond); |
5762 | dev_hold(dev: real_dev); |
5763 | rcu_read_unlock(); |
5764 | |
5765 | if (real_dev) { |
5766 | ops = real_dev->ethtool_ops; |
5767 | phydev = real_dev->phydev; |
5768 | |
5769 | if (phy_has_tsinfo(phydev)) { |
5770 | ret = phy_ts_info(phydev, tsinfo: info); |
5771 | goto out; |
5772 | } else if (ops->get_ts_info) { |
5773 | ret = ops->get_ts_info(real_dev, info); |
5774 | goto out; |
5775 | } |
5776 | } else { |
5777 | /* Check if all slaves support software tx timestamping */ |
5778 | rcu_read_lock(); |
5779 | bond_for_each_slave_rcu(bond, slave, iter) { |
5780 | ret = -1; |
5781 | ops = slave->dev->ethtool_ops; |
5782 | phydev = slave->dev->phydev; |
5783 | |
5784 | if (phy_has_tsinfo(phydev)) |
5785 | ret = phy_ts_info(phydev, tsinfo: &ts_info); |
5786 | else if (ops->get_ts_info) |
5787 | ret = ops->get_ts_info(slave->dev, &ts_info); |
5788 | |
5789 | if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) { |
5790 | sw_tx_support = true; |
5791 | continue; |
5792 | } |
5793 | |
5794 | sw_tx_support = false; |
5795 | break; |
5796 | } |
5797 | rcu_read_unlock(); |
5798 | } |
5799 | |
5800 | ret = 0; |
5801 | info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | |
5802 | SOF_TIMESTAMPING_SOFTWARE; |
5803 | if (sw_tx_support) |
5804 | info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; |
5805 | |
5806 | info->phc_index = -1; |
5807 | |
5808 | out: |
5809 | dev_put(dev: real_dev); |
5810 | return ret; |
5811 | } |
5812 | |
5813 | static const struct ethtool_ops bond_ethtool_ops = { |
5814 | .get_drvinfo = bond_ethtool_get_drvinfo, |
5815 | .get_link = ethtool_op_get_link, |
5816 | .get_link_ksettings = bond_ethtool_get_link_ksettings, |
5817 | .get_ts_info = bond_ethtool_get_ts_info, |
5818 | }; |
5819 | |
5820 | static const struct net_device_ops bond_netdev_ops = { |
5821 | .ndo_init = bond_init, |
5822 | .ndo_uninit = bond_uninit, |
5823 | .ndo_open = bond_open, |
5824 | .ndo_stop = bond_close, |
5825 | .ndo_start_xmit = bond_start_xmit, |
5826 | .ndo_select_queue = bond_select_queue, |
5827 | .ndo_get_stats64 = bond_get_stats, |
5828 | .ndo_eth_ioctl = bond_eth_ioctl, |
5829 | .ndo_siocbond = bond_do_ioctl, |
5830 | .ndo_siocdevprivate = bond_siocdevprivate, |
5831 | .ndo_change_rx_flags = bond_change_rx_flags, |
5832 | .ndo_set_rx_mode = bond_set_rx_mode, |
5833 | .ndo_change_mtu = bond_change_mtu, |
5834 | .ndo_set_mac_address = bond_set_mac_address, |
5835 | .ndo_neigh_setup = bond_neigh_setup, |
5836 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, |
5837 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, |
5838 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5839 | .ndo_netpoll_setup = bond_netpoll_setup, |
5840 | .ndo_netpoll_cleanup = bond_netpoll_cleanup, |
5841 | .ndo_poll_controller = bond_poll_controller, |
5842 | #endif |
5843 | .ndo_add_slave = bond_enslave, |
5844 | .ndo_del_slave = bond_release, |
5845 | .ndo_fix_features = bond_fix_features, |
5846 | .ndo_features_check = passthru_features_check, |
5847 | .ndo_get_xmit_slave = bond_xmit_get_slave, |
5848 | .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, |
5849 | .ndo_bpf = bond_xdp, |
5850 | .ndo_xdp_xmit = bond_xdp_xmit, |
5851 | .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, |
5852 | .ndo_hwtstamp_get = bond_hwtstamp_get, |
5853 | .ndo_hwtstamp_set = bond_hwtstamp_set, |
5854 | }; |
5855 | |
5856 | static const struct device_type bond_type = { |
5857 | .name = "bond" , |
5858 | }; |
5859 | |
5860 | static void bond_destructor(struct net_device *bond_dev) |
5861 | { |
5862 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5863 | |
5864 | if (bond->wq) |
5865 | destroy_workqueue(wq: bond->wq); |
5866 | |
5867 | free_percpu(pdata: bond->rr_tx_counter); |
5868 | } |
5869 | |
5870 | void bond_setup(struct net_device *bond_dev) |
5871 | { |
5872 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5873 | |
5874 | spin_lock_init(&bond->mode_lock); |
5875 | bond->params = bonding_defaults; |
5876 | |
5877 | /* Initialize pointers */ |
5878 | bond->dev = bond_dev; |
5879 | |
5880 | /* Initialize the device entry points */ |
5881 | ether_setup(dev: bond_dev); |
5882 | bond_dev->max_mtu = ETH_MAX_MTU; |
5883 | bond_dev->netdev_ops = &bond_netdev_ops; |
5884 | bond_dev->ethtool_ops = &bond_ethtool_ops; |
5885 | |
5886 | bond_dev->needs_free_netdev = true; |
5887 | bond_dev->priv_destructor = bond_destructor; |
5888 | |
5889 | SET_NETDEV_DEVTYPE(bond_dev, &bond_type); |
5890 | |
5891 | /* Initialize the device options */ |
5892 | bond_dev->flags |= IFF_MASTER; |
5893 | bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; |
5894 | bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
5895 | |
5896 | #ifdef CONFIG_XFRM_OFFLOAD |
5897 | /* set up xfrm device ops (only supported in active-backup right now) */ |
5898 | bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; |
5899 | INIT_LIST_HEAD(list: &bond->ipsec_list); |
5900 | spin_lock_init(&bond->ipsec_lock); |
5901 | #endif /* CONFIG_XFRM_OFFLOAD */ |
5902 | |
5903 | /* don't acquire bond device's netif_tx_lock when transmitting */ |
5904 | bond_dev->features |= NETIF_F_LLTX; |
5905 | |
5906 | /* By default, we declare the bond to be fully |
5907 | * VLAN hardware accelerated capable. Special |
5908 | * care is taken in the various xmit functions |
5909 | * when there are slaves that are not hw accel |
5910 | * capable |
5911 | */ |
5912 | |
5913 | /* Don't allow bond devices to change network namespaces. */ |
5914 | bond_dev->features |= NETIF_F_NETNS_LOCAL; |
5915 | |
5916 | bond_dev->hw_features = BOND_VLAN_FEATURES | |
5917 | NETIF_F_HW_VLAN_CTAG_RX | |
5918 | NETIF_F_HW_VLAN_CTAG_FILTER | |
5919 | NETIF_F_HW_VLAN_STAG_RX | |
5920 | NETIF_F_HW_VLAN_STAG_FILTER; |
5921 | |
5922 | bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; |
5923 | bond_dev->features |= bond_dev->hw_features; |
5924 | bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; |
5925 | #ifdef CONFIG_XFRM_OFFLOAD |
5926 | bond_dev->hw_features |= BOND_XFRM_FEATURES; |
5927 | /* Only enable XFRM features if this is an active-backup config */ |
5928 | if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) |
5929 | bond_dev->features |= BOND_XFRM_FEATURES; |
5930 | #endif /* CONFIG_XFRM_OFFLOAD */ |
5931 | |
5932 | if (bond_xdp_check(bond)) |
5933 | bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; |
5934 | } |
5935 | |
5936 | /* Destroy a bonding device. |
5937 | * Must be under rtnl_lock when this function is called. |
5938 | */ |
5939 | static void bond_uninit(struct net_device *bond_dev) |
5940 | { |
5941 | struct bonding *bond = netdev_priv(dev: bond_dev); |
5942 | struct list_head *iter; |
5943 | struct slave *slave; |
5944 | |
5945 | bond_netpoll_cleanup(bond_dev); |
5946 | |
5947 | /* Release the bonded slaves */ |
5948 | bond_for_each_slave(bond, slave, iter) |
5949 | __bond_release_one(bond_dev, slave_dev: slave->dev, all: true, unregister: true); |
5950 | netdev_info(dev: bond_dev, format: "Released all slaves\n" ); |
5951 | |
5952 | bond_set_slave_arr(bond, NULL, NULL); |
5953 | |
5954 | list_del(entry: &bond->bond_list); |
5955 | |
5956 | bond_debug_unregister(bond); |
5957 | } |
5958 | |
5959 | /*------------------------- Module initialization ---------------------------*/ |
5960 | |
5961 | static int __init bond_check_params(struct bond_params *params) |
5962 | { |
5963 | int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; |
5964 | struct bond_opt_value newval; |
5965 | const struct bond_opt_value *valptr; |
5966 | int arp_all_targets_value = 0; |
5967 | u16 ad_actor_sys_prio = 0; |
5968 | u16 ad_user_port_key = 0; |
5969 | __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; |
5970 | int arp_ip_count; |
5971 | int bond_mode = BOND_MODE_ROUNDROBIN; |
5972 | int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; |
5973 | int lacp_fast = 0; |
5974 | int tlb_dynamic_lb; |
5975 | |
5976 | /* Convert string parameters. */ |
5977 | if (mode) { |
5978 | bond_opt_initstr(&newval, mode); |
5979 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_MODE), val: &newval); |
5980 | if (!valptr) { |
5981 | pr_err("Error: Invalid bonding mode \"%s\"\n" , mode); |
5982 | return -EINVAL; |
5983 | } |
5984 | bond_mode = valptr->value; |
5985 | } |
5986 | |
5987 | if (xmit_hash_policy) { |
5988 | if (bond_mode == BOND_MODE_ROUNDROBIN || |
5989 | bond_mode == BOND_MODE_ACTIVEBACKUP || |
5990 | bond_mode == BOND_MODE_BROADCAST) { |
5991 | pr_info("xmit_hash_policy param is irrelevant in mode %s\n" , |
5992 | bond_mode_name(bond_mode)); |
5993 | } else { |
5994 | bond_opt_initstr(&newval, xmit_hash_policy); |
5995 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_XMIT_HASH), |
5996 | val: &newval); |
5997 | if (!valptr) { |
5998 | pr_err("Error: Invalid xmit_hash_policy \"%s\"\n" , |
5999 | xmit_hash_policy); |
6000 | return -EINVAL; |
6001 | } |
6002 | xmit_hashtype = valptr->value; |
6003 | } |
6004 | } |
6005 | |
6006 | if (lacp_rate) { |
6007 | if (bond_mode != BOND_MODE_8023AD) { |
6008 | pr_info("lacp_rate param is irrelevant in mode %s\n" , |
6009 | bond_mode_name(bond_mode)); |
6010 | } else { |
6011 | bond_opt_initstr(&newval, lacp_rate); |
6012 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_LACP_RATE), |
6013 | val: &newval); |
6014 | if (!valptr) { |
6015 | pr_err("Error: Invalid lacp rate \"%s\"\n" , |
6016 | lacp_rate); |
6017 | return -EINVAL; |
6018 | } |
6019 | lacp_fast = valptr->value; |
6020 | } |
6021 | } |
6022 | |
6023 | if (ad_select) { |
6024 | bond_opt_initstr(&newval, ad_select); |
6025 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_AD_SELECT), |
6026 | val: &newval); |
6027 | if (!valptr) { |
6028 | pr_err("Error: Invalid ad_select \"%s\"\n" , ad_select); |
6029 | return -EINVAL; |
6030 | } |
6031 | params->ad_select = valptr->value; |
6032 | if (bond_mode != BOND_MODE_8023AD) |
6033 | pr_warn("ad_select param only affects 802.3ad mode\n" ); |
6034 | } else { |
6035 | params->ad_select = BOND_AD_STABLE; |
6036 | } |
6037 | |
6038 | if (max_bonds < 0) { |
6039 | pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n" , |
6040 | max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); |
6041 | max_bonds = BOND_DEFAULT_MAX_BONDS; |
6042 | } |
6043 | |
6044 | if (miimon < 0) { |
6045 | pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n" , |
6046 | miimon, INT_MAX); |
6047 | miimon = 0; |
6048 | } |
6049 | |
6050 | if (updelay < 0) { |
6051 | pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n" , |
6052 | updelay, INT_MAX); |
6053 | updelay = 0; |
6054 | } |
6055 | |
6056 | if (downdelay < 0) { |
6057 | pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n" , |
6058 | downdelay, INT_MAX); |
6059 | downdelay = 0; |
6060 | } |
6061 | |
6062 | if ((use_carrier != 0) && (use_carrier != 1)) { |
6063 | pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n" , |
6064 | use_carrier); |
6065 | use_carrier = 1; |
6066 | } |
6067 | |
6068 | if (num_peer_notif < 0 || num_peer_notif > 255) { |
6069 | pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n" , |
6070 | num_peer_notif); |
6071 | num_peer_notif = 1; |
6072 | } |
6073 | |
6074 | /* reset values for 802.3ad/TLB/ALB */ |
6075 | if (!bond_mode_uses_arp(mode: bond_mode)) { |
6076 | if (!miimon) { |
6077 | pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n" ); |
6078 | pr_warn("Forcing miimon to 100msec\n" ); |
6079 | miimon = BOND_DEFAULT_MIIMON; |
6080 | } |
6081 | } |
6082 | |
6083 | if (tx_queues < 1 || tx_queues > 255) { |
6084 | pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n" , |
6085 | tx_queues, BOND_DEFAULT_TX_QUEUES); |
6086 | tx_queues = BOND_DEFAULT_TX_QUEUES; |
6087 | } |
6088 | |
6089 | if ((all_slaves_active != 0) && (all_slaves_active != 1)) { |
6090 | pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n" , |
6091 | all_slaves_active); |
6092 | all_slaves_active = 0; |
6093 | } |
6094 | |
6095 | if (resend_igmp < 0 || resend_igmp > 255) { |
6096 | pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n" , |
6097 | resend_igmp, BOND_DEFAULT_RESEND_IGMP); |
6098 | resend_igmp = BOND_DEFAULT_RESEND_IGMP; |
6099 | } |
6100 | |
6101 | bond_opt_initval(&newval, packets_per_slave); |
6102 | if (!bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_PACKETS_PER_SLAVE), val: &newval)) { |
6103 | pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n" , |
6104 | packets_per_slave, USHRT_MAX); |
6105 | packets_per_slave = 1; |
6106 | } |
6107 | |
6108 | if (bond_mode == BOND_MODE_ALB) { |
6109 | pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n" , |
6110 | updelay); |
6111 | } |
6112 | |
6113 | if (!miimon) { |
6114 | if (updelay || downdelay) { |
6115 | /* just warn the user the up/down delay will have |
6116 | * no effect since miimon is zero... |
6117 | */ |
6118 | pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n" , |
6119 | updelay, downdelay); |
6120 | } |
6121 | } else { |
6122 | /* don't allow arp monitoring */ |
6123 | if (arp_interval) { |
6124 | pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n" , |
6125 | miimon, arp_interval); |
6126 | arp_interval = 0; |
6127 | } |
6128 | |
6129 | if ((updelay % miimon) != 0) { |
6130 | pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n" , |
6131 | updelay, miimon, (updelay / miimon) * miimon); |
6132 | } |
6133 | |
6134 | updelay /= miimon; |
6135 | |
6136 | if ((downdelay % miimon) != 0) { |
6137 | pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n" , |
6138 | downdelay, miimon, |
6139 | (downdelay / miimon) * miimon); |
6140 | } |
6141 | |
6142 | downdelay /= miimon; |
6143 | } |
6144 | |
6145 | if (arp_interval < 0) { |
6146 | pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n" , |
6147 | arp_interval, INT_MAX); |
6148 | arp_interval = 0; |
6149 | } |
6150 | |
6151 | for (arp_ip_count = 0, i = 0; |
6152 | (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { |
6153 | __be32 ip; |
6154 | |
6155 | /* not a complete check, but good enough to catch mistakes */ |
6156 | if (!in4_pton(src: arp_ip_target[i], srclen: -1, dst: (u8 *)&ip, delim: -1, NULL) || |
6157 | !bond_is_ip_target_ok(addr: ip)) { |
6158 | pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n" , |
6159 | arp_ip_target[i]); |
6160 | arp_interval = 0; |
6161 | } else { |
6162 | if (bond_get_targets_ip(targets: arp_target, ip) == -1) |
6163 | arp_target[arp_ip_count++] = ip; |
6164 | else |
6165 | pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n" , |
6166 | &ip); |
6167 | } |
6168 | } |
6169 | |
6170 | if (arp_interval && !arp_ip_count) { |
6171 | /* don't allow arping if no arp_ip_target given... */ |
6172 | pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n" , |
6173 | arp_interval); |
6174 | arp_interval = 0; |
6175 | } |
6176 | |
6177 | if (arp_validate) { |
6178 | if (!arp_interval) { |
6179 | pr_err("arp_validate requires arp_interval\n" ); |
6180 | return -EINVAL; |
6181 | } |
6182 | |
6183 | bond_opt_initstr(&newval, arp_validate); |
6184 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_ARP_VALIDATE), |
6185 | val: &newval); |
6186 | if (!valptr) { |
6187 | pr_err("Error: invalid arp_validate \"%s\"\n" , |
6188 | arp_validate); |
6189 | return -EINVAL; |
6190 | } |
6191 | arp_validate_value = valptr->value; |
6192 | } else { |
6193 | arp_validate_value = 0; |
6194 | } |
6195 | |
6196 | if (arp_all_targets) { |
6197 | bond_opt_initstr(&newval, arp_all_targets); |
6198 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_ARP_ALL_TARGETS), |
6199 | val: &newval); |
6200 | if (!valptr) { |
6201 | pr_err("Error: invalid arp_all_targets_value \"%s\"\n" , |
6202 | arp_all_targets); |
6203 | arp_all_targets_value = 0; |
6204 | } else { |
6205 | arp_all_targets_value = valptr->value; |
6206 | } |
6207 | } |
6208 | |
6209 | if (miimon) { |
6210 | pr_info("MII link monitoring set to %d ms\n" , miimon); |
6211 | } else if (arp_interval) { |
6212 | valptr = bond_opt_get_val(option: BOND_OPT_ARP_VALIDATE, |
6213 | val: arp_validate_value); |
6214 | pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):" , |
6215 | arp_interval, valptr->string, arp_ip_count); |
6216 | |
6217 | for (i = 0; i < arp_ip_count; i++) |
6218 | pr_cont(" %s" , arp_ip_target[i]); |
6219 | |
6220 | pr_cont("\n" ); |
6221 | |
6222 | } else if (max_bonds) { |
6223 | /* miimon and arp_interval not set, we need one so things |
6224 | * work as expected, see bonding.txt for details |
6225 | */ |
6226 | pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n" ); |
6227 | } |
6228 | |
6229 | if (primary && !bond_mode_uses_primary(mode: bond_mode)) { |
6230 | /* currently, using a primary only makes sense |
6231 | * in active backup, TLB or ALB modes |
6232 | */ |
6233 | pr_warn("Warning: %s primary device specified but has no effect in %s mode\n" , |
6234 | primary, bond_mode_name(bond_mode)); |
6235 | primary = NULL; |
6236 | } |
6237 | |
6238 | if (primary && primary_reselect) { |
6239 | bond_opt_initstr(&newval, primary_reselect); |
6240 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_PRIMARY_RESELECT), |
6241 | val: &newval); |
6242 | if (!valptr) { |
6243 | pr_err("Error: Invalid primary_reselect \"%s\"\n" , |
6244 | primary_reselect); |
6245 | return -EINVAL; |
6246 | } |
6247 | primary_reselect_value = valptr->value; |
6248 | } else { |
6249 | primary_reselect_value = BOND_PRI_RESELECT_ALWAYS; |
6250 | } |
6251 | |
6252 | if (fail_over_mac) { |
6253 | bond_opt_initstr(&newval, fail_over_mac); |
6254 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_FAIL_OVER_MAC), |
6255 | val: &newval); |
6256 | if (!valptr) { |
6257 | pr_err("Error: invalid fail_over_mac \"%s\"\n" , |
6258 | fail_over_mac); |
6259 | return -EINVAL; |
6260 | } |
6261 | fail_over_mac_value = valptr->value; |
6262 | if (bond_mode != BOND_MODE_ACTIVEBACKUP) |
6263 | pr_warn("Warning: fail_over_mac only affects active-backup mode\n" ); |
6264 | } else { |
6265 | fail_over_mac_value = BOND_FOM_NONE; |
6266 | } |
6267 | |
6268 | bond_opt_initstr(&newval, "default" ); |
6269 | valptr = bond_opt_parse( |
6270 | opt: bond_opt_get(option: BOND_OPT_AD_ACTOR_SYS_PRIO), |
6271 | val: &newval); |
6272 | if (!valptr) { |
6273 | pr_err("Error: No ad_actor_sys_prio default value" ); |
6274 | return -EINVAL; |
6275 | } |
6276 | ad_actor_sys_prio = valptr->value; |
6277 | |
6278 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_AD_USER_PORT_KEY), |
6279 | val: &newval); |
6280 | if (!valptr) { |
6281 | pr_err("Error: No ad_user_port_key default value" ); |
6282 | return -EINVAL; |
6283 | } |
6284 | ad_user_port_key = valptr->value; |
6285 | |
6286 | bond_opt_initstr(&newval, "default" ); |
6287 | valptr = bond_opt_parse(opt: bond_opt_get(option: BOND_OPT_TLB_DYNAMIC_LB), val: &newval); |
6288 | if (!valptr) { |
6289 | pr_err("Error: No tlb_dynamic_lb default value" ); |
6290 | return -EINVAL; |
6291 | } |
6292 | tlb_dynamic_lb = valptr->value; |
6293 | |
6294 | if (lp_interval == 0) { |
6295 | pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n" , |
6296 | INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); |
6297 | lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; |
6298 | } |
6299 | |
6300 | /* fill params struct with the proper values */ |
6301 | params->mode = bond_mode; |
6302 | params->xmit_policy = xmit_hashtype; |
6303 | params->miimon = miimon; |
6304 | params->num_peer_notif = num_peer_notif; |
6305 | params->arp_interval = arp_interval; |
6306 | params->arp_validate = arp_validate_value; |
6307 | params->arp_all_targets = arp_all_targets_value; |
6308 | params->missed_max = 2; |
6309 | params->updelay = updelay; |
6310 | params->downdelay = downdelay; |
6311 | params->peer_notif_delay = 0; |
6312 | params->use_carrier = use_carrier; |
6313 | params->lacp_active = 1; |
6314 | params->lacp_fast = lacp_fast; |
6315 | params->primary[0] = 0; |
6316 | params->primary_reselect = primary_reselect_value; |
6317 | params->fail_over_mac = fail_over_mac_value; |
6318 | params->tx_queues = tx_queues; |
6319 | params->all_slaves_active = all_slaves_active; |
6320 | params->resend_igmp = resend_igmp; |
6321 | params->min_links = min_links; |
6322 | params->lp_interval = lp_interval; |
6323 | params->packets_per_slave = packets_per_slave; |
6324 | params->tlb_dynamic_lb = tlb_dynamic_lb; |
6325 | params->ad_actor_sys_prio = ad_actor_sys_prio; |
6326 | eth_zero_addr(addr: params->ad_actor_system); |
6327 | params->ad_user_port_key = ad_user_port_key; |
6328 | if (packets_per_slave > 0) { |
6329 | params->reciprocal_packets_per_slave = |
6330 | reciprocal_value(d: packets_per_slave); |
6331 | } else { |
6332 | /* reciprocal_packets_per_slave is unused if |
6333 | * packets_per_slave is 0 or 1, just initialize it |
6334 | */ |
6335 | params->reciprocal_packets_per_slave = |
6336 | (struct reciprocal_value) { 0 }; |
6337 | } |
6338 | |
6339 | if (primary) |
6340 | strscpy_pad(params->primary, primary, sizeof(params->primary)); |
6341 | |
6342 | memcpy(params->arp_targets, arp_target, sizeof(arp_target)); |
6343 | #if IS_ENABLED(CONFIG_IPV6) |
6344 | memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS); |
6345 | #endif |
6346 | |
6347 | return 0; |
6348 | } |
6349 | |
6350 | /* Called from registration process */ |
6351 | static int bond_init(struct net_device *bond_dev) |
6352 | { |
6353 | struct bonding *bond = netdev_priv(dev: bond_dev); |
6354 | struct bond_net *bn = net_generic(net: dev_net(dev: bond_dev), id: bond_net_id); |
6355 | |
6356 | netdev_dbg(bond_dev, "Begin bond_init\n" ); |
6357 | |
6358 | bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); |
6359 | if (!bond->wq) |
6360 | return -ENOMEM; |
6361 | |
6362 | bond->notifier_ctx = false; |
6363 | |
6364 | spin_lock_init(&bond->stats_lock); |
6365 | netdev_lockdep_set_classes(bond_dev); |
6366 | |
6367 | list_add_tail(new: &bond->bond_list, head: &bn->dev_list); |
6368 | |
6369 | bond_prepare_sysfs_group(bond); |
6370 | |
6371 | bond_debug_register(bond); |
6372 | |
6373 | /* Ensure valid dev_addr */ |
6374 | if (is_zero_ether_addr(addr: bond_dev->dev_addr) && |
6375 | bond_dev->addr_assign_type == NET_ADDR_PERM) |
6376 | eth_hw_addr_random(dev: bond_dev); |
6377 | |
6378 | return 0; |
6379 | } |
6380 | |
6381 | unsigned int bond_get_num_tx_queues(void) |
6382 | { |
6383 | return tx_queues; |
6384 | } |
6385 | |
6386 | /* Create a new bond based on the specified name and bonding parameters. |
6387 | * If name is NULL, obtain a suitable "bond%d" name for us. |
6388 | * Caller must NOT hold rtnl_lock; we need to release it here before we |
6389 | * set up our sysfs entries. |
6390 | */ |
6391 | int bond_create(struct net *net, const char *name) |
6392 | { |
6393 | struct net_device *bond_dev; |
6394 | struct bonding *bond; |
6395 | int res = -ENOMEM; |
6396 | |
6397 | rtnl_lock(); |
6398 | |
6399 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), |
6400 | name ? name : "bond%d" , NET_NAME_UNKNOWN, |
6401 | bond_setup, tx_queues); |
6402 | if (!bond_dev) |
6403 | goto out; |
6404 | |
6405 | bond = netdev_priv(dev: bond_dev); |
6406 | dev_net_set(dev: bond_dev, net); |
6407 | bond_dev->rtnl_link_ops = &bond_link_ops; |
6408 | |
6409 | res = register_netdevice(dev: bond_dev); |
6410 | if (res < 0) { |
6411 | free_netdev(dev: bond_dev); |
6412 | goto out; |
6413 | } |
6414 | |
6415 | netif_carrier_off(dev: bond_dev); |
6416 | |
6417 | bond_work_init_all(bond); |
6418 | |
6419 | out: |
6420 | rtnl_unlock(); |
6421 | return res; |
6422 | } |
6423 | |
6424 | static int __net_init bond_net_init(struct net *net) |
6425 | { |
6426 | struct bond_net *bn = net_generic(net, id: bond_net_id); |
6427 | |
6428 | bn->net = net; |
6429 | INIT_LIST_HEAD(list: &bn->dev_list); |
6430 | |
6431 | bond_create_proc_dir(bn); |
6432 | bond_create_sysfs(net: bn); |
6433 | |
6434 | return 0; |
6435 | } |
6436 | |
6437 | static void __net_exit bond_net_exit_batch(struct list_head *net_list) |
6438 | { |
6439 | struct bond_net *bn; |
6440 | struct net *net; |
6441 | LIST_HEAD(list); |
6442 | |
6443 | list_for_each_entry(net, net_list, exit_list) { |
6444 | bn = net_generic(net, id: bond_net_id); |
6445 | bond_destroy_sysfs(net: bn); |
6446 | } |
6447 | |
6448 | /* Kill off any bonds created after unregistering bond rtnl ops */ |
6449 | rtnl_lock(); |
6450 | list_for_each_entry(net, net_list, exit_list) { |
6451 | struct bonding *bond, *tmp_bond; |
6452 | |
6453 | bn = net_generic(net, id: bond_net_id); |
6454 | list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) |
6455 | unregister_netdevice_queue(dev: bond->dev, head: &list); |
6456 | } |
6457 | unregister_netdevice_many(head: &list); |
6458 | rtnl_unlock(); |
6459 | |
6460 | list_for_each_entry(net, net_list, exit_list) { |
6461 | bn = net_generic(net, id: bond_net_id); |
6462 | bond_destroy_proc_dir(bn); |
6463 | } |
6464 | } |
6465 | |
6466 | static struct pernet_operations bond_net_ops = { |
6467 | .init = bond_net_init, |
6468 | .exit_batch = bond_net_exit_batch, |
6469 | .id = &bond_net_id, |
6470 | .size = sizeof(struct bond_net), |
6471 | }; |
6472 | |
6473 | static int __init bonding_init(void) |
6474 | { |
6475 | int i; |
6476 | int res; |
6477 | |
6478 | res = bond_check_params(params: &bonding_defaults); |
6479 | if (res) |
6480 | goto out; |
6481 | |
6482 | res = register_pernet_subsys(&bond_net_ops); |
6483 | if (res) |
6484 | goto out; |
6485 | |
6486 | res = bond_netlink_init(); |
6487 | if (res) |
6488 | goto err_link; |
6489 | |
6490 | bond_create_debugfs(); |
6491 | |
6492 | for (i = 0; i < max_bonds; i++) { |
6493 | res = bond_create(net: &init_net, NULL); |
6494 | if (res) |
6495 | goto err; |
6496 | } |
6497 | |
6498 | skb_flow_dissector_init(flow_dissector: &flow_keys_bonding, |
6499 | key: flow_keys_bonding_keys, |
6500 | ARRAY_SIZE(flow_keys_bonding_keys)); |
6501 | |
6502 | register_netdevice_notifier(nb: &bond_netdev_notifier); |
6503 | out: |
6504 | return res; |
6505 | err: |
6506 | bond_destroy_debugfs(); |
6507 | bond_netlink_fini(); |
6508 | err_link: |
6509 | unregister_pernet_subsys(&bond_net_ops); |
6510 | goto out; |
6511 | |
6512 | } |
6513 | |
6514 | static void __exit bonding_exit(void) |
6515 | { |
6516 | unregister_netdevice_notifier(nb: &bond_netdev_notifier); |
6517 | |
6518 | bond_destroy_debugfs(); |
6519 | |
6520 | bond_netlink_fini(); |
6521 | unregister_pernet_subsys(&bond_net_ops); |
6522 | |
6523 | #ifdef CONFIG_NET_POLL_CONTROLLER |
6524 | /* Make sure we don't have an imbalance on our netpoll blocking */ |
6525 | WARN_ON(atomic_read(&netpoll_block_tx)); |
6526 | #endif |
6527 | } |
6528 | |
6529 | module_init(bonding_init); |
6530 | module_exit(bonding_exit); |
6531 | MODULE_LICENSE("GPL" ); |
6532 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
6533 | MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others" ); |
6534 | |