1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <linux/uaccess.h>
76#include <linux/bitops.h>
77#include <linux/capability.h>
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
81#include <linux/hash.h>
82#include <linux/slab.h>
83#include <linux/sched.h>
84#include <linux/sched/mm.h>
85#include <linux/mutex.h>
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
95#include <linux/ethtool.h>
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
98#include <linux/bpf.h>
99#include <linux/bpf_trace.h>
100#include <net/net_namespace.h>
101#include <net/sock.h>
102#include <net/busy_poll.h>
103#include <linux/rtnetlink.h>
104#include <linux/stat.h>
105#include <net/dst.h>
106#include <net/dst_metadata.h>
107#include <net/pkt_sched.h>
108#include <net/pkt_cls.h>
109#include <net/checksum.h>
110#include <net/xfrm.h>
111#include <linux/highmem.h>
112#include <linux/init.h>
113#include <linux/module.h>
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
117#include <net/iw_handler.h>
118#include <asm/current.h>
119#include <linux/audit.h>
120#include <linux/dmaengine.h>
121#include <linux/err.h>
122#include <linux/ctype.h>
123#include <linux/if_arp.h>
124#include <linux/if_vlan.h>
125#include <linux/ip.h>
126#include <net/ip.h>
127#include <net/mpls.h>
128#include <linux/ipv6.h>
129#include <linux/in.h>
130#include <linux/jhash.h>
131#include <linux/random.h>
132#include <trace/events/napi.h>
133#include <trace/events/net.h>
134#include <trace/events/skb.h>
135#include <linux/pci.h>
136#include <linux/inetdevice.h>
137#include <linux/cpu_rmap.h>
138#include <linux/static_key.h>
139#include <linux/hashtable.h>
140#include <linux/vmalloc.h>
141#include <linux/if_macvlan.h>
142#include <linux/errqueue.h>
143#include <linux/hrtimer.h>
144#include <linux/netfilter_ingress.h>
145#include <linux/crash_dump.h>
146#include <linux/sctp.h>
147#include <net/udp_tunnel.h>
148#include <linux/net_namespace.h>
149
150#include "net-sysfs.h"
151
152#define MAX_GRO_SKBS 8
153
154/* This should be increased if a protocol with a bigger head is added. */
155#define GRO_MAX_HEAD (MAX_HEADER + 128)
156
157static DEFINE_SPINLOCK(ptype_lock);
158static DEFINE_SPINLOCK(offload_lock);
159struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160struct list_head ptype_all __read_mostly; /* Taps */
161static struct list_head offload_base __read_mostly;
162
163static int netif_rx_internal(struct sk_buff *skb);
164static int call_netdevice_notifiers_info(unsigned long val,
165 struct netdev_notifier_info *info);
166static struct napi_struct *napi_by_id(unsigned int napi_id);
167
168/*
169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
170 * semaphore.
171 *
172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173 *
174 * Writers must hold the rtnl semaphore while they loop through the
175 * dev_base_head list, and hold dev_base_lock for writing when they do the
176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
178 *
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
182 *
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
185 * semaphore held.
186 */
187DEFINE_RWLOCK(dev_base_lock);
188EXPORT_SYMBOL(dev_base_lock);
189
190static DEFINE_MUTEX(ifalias_mutex);
191
192/* protects napi_hash addition/deletion and napi_gen_id */
193static DEFINE_SPINLOCK(napi_hash_lock);
194
195static unsigned int napi_gen_id = NR_CPUS;
196static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
197
198static seqcount_t devnet_rename_seq;
199
200static inline void dev_base_seq_inc(struct net *net)
201{
202 while (++net->dev_base_seq == 0)
203 ;
204}
205
206static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207{
208 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
209
210 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
211}
212
213static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214{
215 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
216}
217
218static inline void rps_lock(struct softnet_data *sd)
219{
220#ifdef CONFIG_RPS
221 spin_lock(&sd->input_pkt_queue.lock);
222#endif
223}
224
225static inline void rps_unlock(struct softnet_data *sd)
226{
227#ifdef CONFIG_RPS
228 spin_unlock(&sd->input_pkt_queue.lock);
229#endif
230}
231
232/* Device list insertion */
233static void list_netdevice(struct net_device *dev)
234{
235 struct net *net = dev_net(dev);
236
237 ASSERT_RTNL();
238
239 write_lock_bh(&dev_base_lock);
240 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
242 hlist_add_head_rcu(&dev->index_hlist,
243 dev_index_hash(net, dev->ifindex));
244 write_unlock_bh(&dev_base_lock);
245
246 dev_base_seq_inc(net);
247}
248
249/* Device list removal
250 * caller must respect a RCU grace period before freeing/reusing dev
251 */
252static void unlist_netdevice(struct net_device *dev)
253{
254 ASSERT_RTNL();
255
256 /* Unlink dev from the device chain */
257 write_lock_bh(&dev_base_lock);
258 list_del_rcu(&dev->dev_list);
259 hlist_del_rcu(&dev->name_hlist);
260 hlist_del_rcu(&dev->index_hlist);
261 write_unlock_bh(&dev_base_lock);
262
263 dev_base_seq_inc(dev_net(dev));
264}
265
266/*
267 * Our notifier list
268 */
269
270static RAW_NOTIFIER_HEAD(netdev_chain);
271
272/*
273 * Device drivers call our routines to queue packets here. We empty the
274 * queue in the local softnet handler.
275 */
276
277DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
278EXPORT_PER_CPU_SYMBOL(softnet_data);
279
280#ifdef CONFIG_LOCKDEP
281/*
282 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
283 * according to dev->type
284 */
285static const unsigned short netdev_lock_type[] = {
286 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
287 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
288 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
289 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
290 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
291 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
292 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
293 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
294 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
295 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
296 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
297 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
298 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
299 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
300 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
301
302static const char *const netdev_lock_name[] = {
303 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
304 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
305 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
306 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
307 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
308 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
309 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
310 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
311 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
312 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
313 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
314 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
315 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
316 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
317 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
318
319static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
320static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
321
322static inline unsigned short netdev_lock_pos(unsigned short dev_type)
323{
324 int i;
325
326 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
327 if (netdev_lock_type[i] == dev_type)
328 return i;
329 /* the last key is used by default */
330 return ARRAY_SIZE(netdev_lock_type) - 1;
331}
332
333static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
334 unsigned short dev_type)
335{
336 int i;
337
338 i = netdev_lock_pos(dev_type);
339 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
340 netdev_lock_name[i]);
341}
342
343static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344{
345 int i;
346
347 i = netdev_lock_pos(dev->type);
348 lockdep_set_class_and_name(&dev->addr_list_lock,
349 &netdev_addr_lock_key[i],
350 netdev_lock_name[i]);
351}
352#else
353static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
354 unsigned short dev_type)
355{
356}
357static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
358{
359}
360#endif
361
362/*******************************************************************************
363 *
364 * Protocol management and registration routines
365 *
366 *******************************************************************************/
367
368
369/*
370 * Add a protocol ID to the list. Now that the input handler is
371 * smarter we can dispense with all the messy stuff that used to be
372 * here.
373 *
374 * BEWARE!!! Protocol handlers, mangling input packets,
375 * MUST BE last in hash buckets and checking protocol handlers
376 * MUST start from promiscuous ptype_all chain in net_bh.
377 * It is true now, do not change it.
378 * Explanation follows: if protocol handler, mangling packet, will
379 * be the first on list, it is not able to sense, that packet
380 * is cloned and should be copied-on-write, so that it will
381 * change it and subsequent readers will get broken packet.
382 * --ANK (980803)
383 */
384
385static inline struct list_head *ptype_head(const struct packet_type *pt)
386{
387 if (pt->type == htons(ETH_P_ALL))
388 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
389 else
390 return pt->dev ? &pt->dev->ptype_specific :
391 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
392}
393
394/**
395 * dev_add_pack - add packet handler
396 * @pt: packet type declaration
397 *
398 * Add a protocol handler to the networking stack. The passed &packet_type
399 * is linked into kernel lists and may not be freed until it has been
400 * removed from the kernel lists.
401 *
402 * This call does not sleep therefore it can not
403 * guarantee all CPU's that are in middle of receiving packets
404 * will see the new packet type (until the next received packet).
405 */
406
407void dev_add_pack(struct packet_type *pt)
408{
409 struct list_head *head = ptype_head(pt);
410
411 spin_lock(&ptype_lock);
412 list_add_rcu(&pt->list, head);
413 spin_unlock(&ptype_lock);
414}
415EXPORT_SYMBOL(dev_add_pack);
416
417/**
418 * __dev_remove_pack - remove packet handler
419 * @pt: packet type declaration
420 *
421 * Remove a protocol handler that was previously added to the kernel
422 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
423 * from the kernel lists and can be freed or reused once this function
424 * returns.
425 *
426 * The packet type might still be in use by receivers
427 * and must not be freed until after all the CPU's have gone
428 * through a quiescent state.
429 */
430void __dev_remove_pack(struct packet_type *pt)
431{
432 struct list_head *head = ptype_head(pt);
433 struct packet_type *pt1;
434
435 spin_lock(&ptype_lock);
436
437 list_for_each_entry(pt1, head, list) {
438 if (pt == pt1) {
439 list_del_rcu(&pt->list);
440 goto out;
441 }
442 }
443
444 pr_warn("dev_remove_pack: %p not found\n", pt);
445out:
446 spin_unlock(&ptype_lock);
447}
448EXPORT_SYMBOL(__dev_remove_pack);
449
450/**
451 * dev_remove_pack - remove packet handler
452 * @pt: packet type declaration
453 *
454 * Remove a protocol handler that was previously added to the kernel
455 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
456 * from the kernel lists and can be freed or reused once this function
457 * returns.
458 *
459 * This call sleeps to guarantee that no CPU is looking at the packet
460 * type after return.
461 */
462void dev_remove_pack(struct packet_type *pt)
463{
464 __dev_remove_pack(pt);
465
466 synchronize_net();
467}
468EXPORT_SYMBOL(dev_remove_pack);
469
470
471/**
472 * dev_add_offload - register offload handlers
473 * @po: protocol offload declaration
474 *
475 * Add protocol offload handlers to the networking stack. The passed
476 * &proto_offload is linked into kernel lists and may not be freed until
477 * it has been removed from the kernel lists.
478 *
479 * This call does not sleep therefore it can not
480 * guarantee all CPU's that are in middle of receiving packets
481 * will see the new offload handlers (until the next received packet).
482 */
483void dev_add_offload(struct packet_offload *po)
484{
485 struct packet_offload *elem;
486
487 spin_lock(&offload_lock);
488 list_for_each_entry(elem, &offload_base, list) {
489 if (po->priority < elem->priority)
490 break;
491 }
492 list_add_rcu(&po->list, elem->list.prev);
493 spin_unlock(&offload_lock);
494}
495EXPORT_SYMBOL(dev_add_offload);
496
497/**
498 * __dev_remove_offload - remove offload handler
499 * @po: packet offload declaration
500 *
501 * Remove a protocol offload handler that was previously added to the
502 * kernel offload handlers by dev_add_offload(). The passed &offload_type
503 * is removed from the kernel lists and can be freed or reused once this
504 * function returns.
505 *
506 * The packet type might still be in use by receivers
507 * and must not be freed until after all the CPU's have gone
508 * through a quiescent state.
509 */
510static void __dev_remove_offload(struct packet_offload *po)
511{
512 struct list_head *head = &offload_base;
513 struct packet_offload *po1;
514
515 spin_lock(&offload_lock);
516
517 list_for_each_entry(po1, head, list) {
518 if (po == po1) {
519 list_del_rcu(&po->list);
520 goto out;
521 }
522 }
523
524 pr_warn("dev_remove_offload: %p not found\n", po);
525out:
526 spin_unlock(&offload_lock);
527}
528
529/**
530 * dev_remove_offload - remove packet offload handler
531 * @po: packet offload declaration
532 *
533 * Remove a packet offload handler that was previously added to the kernel
534 * offload handlers by dev_add_offload(). The passed &offload_type is
535 * removed from the kernel lists and can be freed or reused once this
536 * function returns.
537 *
538 * This call sleeps to guarantee that no CPU is looking at the packet
539 * type after return.
540 */
541void dev_remove_offload(struct packet_offload *po)
542{
543 __dev_remove_offload(po);
544
545 synchronize_net();
546}
547EXPORT_SYMBOL(dev_remove_offload);
548
549/******************************************************************************
550 *
551 * Device Boot-time Settings Routines
552 *
553 ******************************************************************************/
554
555/* Boot time configuration table */
556static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
557
558/**
559 * netdev_boot_setup_add - add new setup entry
560 * @name: name of the device
561 * @map: configured settings for the device
562 *
563 * Adds new setup entry to the dev_boot_setup list. The function
564 * returns 0 on error and 1 on success. This is a generic routine to
565 * all netdevices.
566 */
567static int netdev_boot_setup_add(char *name, struct ifmap *map)
568{
569 struct netdev_boot_setup *s;
570 int i;
571
572 s = dev_boot_setup;
573 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
574 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
575 memset(s[i].name, 0, sizeof(s[i].name));
576 strlcpy(s[i].name, name, IFNAMSIZ);
577 memcpy(&s[i].map, map, sizeof(s[i].map));
578 break;
579 }
580 }
581
582 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
583}
584
585/**
586 * netdev_boot_setup_check - check boot time settings
587 * @dev: the netdevice
588 *
589 * Check boot time settings for the device.
590 * The found settings are set for the device to be used
591 * later in the device probing.
592 * Returns 0 if no settings found, 1 if they are.
593 */
594int netdev_boot_setup_check(struct net_device *dev)
595{
596 struct netdev_boot_setup *s = dev_boot_setup;
597 int i;
598
599 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
600 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
601 !strcmp(dev->name, s[i].name)) {
602 dev->irq = s[i].map.irq;
603 dev->base_addr = s[i].map.base_addr;
604 dev->mem_start = s[i].map.mem_start;
605 dev->mem_end = s[i].map.mem_end;
606 return 1;
607 }
608 }
609 return 0;
610}
611EXPORT_SYMBOL(netdev_boot_setup_check);
612
613
614/**
615 * netdev_boot_base - get address from boot time settings
616 * @prefix: prefix for network device
617 * @unit: id for network device
618 *
619 * Check boot time settings for the base address of device.
620 * The found settings are set for the device to be used
621 * later in the device probing.
622 * Returns 0 if no settings found.
623 */
624unsigned long netdev_boot_base(const char *prefix, int unit)
625{
626 const struct netdev_boot_setup *s = dev_boot_setup;
627 char name[IFNAMSIZ];
628 int i;
629
630 sprintf(name, "%s%d", prefix, unit);
631
632 /*
633 * If device already registered then return base of 1
634 * to indicate not to probe for this interface
635 */
636 if (__dev_get_by_name(&init_net, name))
637 return 1;
638
639 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
640 if (!strcmp(name, s[i].name))
641 return s[i].map.base_addr;
642 return 0;
643}
644
645/*
646 * Saves at boot time configured settings for any netdevice.
647 */
648int __init netdev_boot_setup(char *str)
649{
650 int ints[5];
651 struct ifmap map;
652
653 str = get_options(str, ARRAY_SIZE(ints), ints);
654 if (!str || !*str)
655 return 0;
656
657 /* Save settings */
658 memset(&map, 0, sizeof(map));
659 if (ints[0] > 0)
660 map.irq = ints[1];
661 if (ints[0] > 1)
662 map.base_addr = ints[2];
663 if (ints[0] > 2)
664 map.mem_start = ints[3];
665 if (ints[0] > 3)
666 map.mem_end = ints[4];
667
668 /* Add new entry to the list */
669 return netdev_boot_setup_add(str, &map);
670}
671
672__setup("netdev=", netdev_boot_setup);
673
674/*******************************************************************************
675 *
676 * Device Interface Subroutines
677 *
678 *******************************************************************************/
679
680/**
681 * dev_get_iflink - get 'iflink' value of a interface
682 * @dev: targeted interface
683 *
684 * Indicates the ifindex the interface is linked to.
685 * Physical interfaces have the same 'ifindex' and 'iflink' values.
686 */
687
688int dev_get_iflink(const struct net_device *dev)
689{
690 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
691 return dev->netdev_ops->ndo_get_iflink(dev);
692
693 return dev->ifindex;
694}
695EXPORT_SYMBOL(dev_get_iflink);
696
697/**
698 * dev_fill_metadata_dst - Retrieve tunnel egress information.
699 * @dev: targeted interface
700 * @skb: The packet.
701 *
702 * For better visibility of tunnel traffic OVS needs to retrieve
703 * egress tunnel information for a packet. Following API allows
704 * user to get this info.
705 */
706int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
707{
708 struct ip_tunnel_info *info;
709
710 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
711 return -EINVAL;
712
713 info = skb_tunnel_info_unclone(skb);
714 if (!info)
715 return -ENOMEM;
716 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
717 return -EINVAL;
718
719 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
720}
721EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
722
723/**
724 * __dev_get_by_name - find a device by its name
725 * @net: the applicable net namespace
726 * @name: name to find
727 *
728 * Find an interface by name. Must be called under RTNL semaphore
729 * or @dev_base_lock. If the name is found a pointer to the device
730 * is returned. If the name is not found then %NULL is returned. The
731 * reference counters are not incremented so the caller must be
732 * careful with locks.
733 */
734
735struct net_device *__dev_get_by_name(struct net *net, const char *name)
736{
737 struct net_device *dev;
738 struct hlist_head *head = dev_name_hash(net, name);
739
740 hlist_for_each_entry(dev, head, name_hlist)
741 if (!strncmp(dev->name, name, IFNAMSIZ))
742 return dev;
743
744 return NULL;
745}
746EXPORT_SYMBOL(__dev_get_by_name);
747
748/**
749 * dev_get_by_name_rcu - find a device by its name
750 * @net: the applicable net namespace
751 * @name: name to find
752 *
753 * Find an interface by name.
754 * If the name is found a pointer to the device is returned.
755 * If the name is not found then %NULL is returned.
756 * The reference counters are not incremented so the caller must be
757 * careful with locks. The caller must hold RCU lock.
758 */
759
760struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
761{
762 struct net_device *dev;
763 struct hlist_head *head = dev_name_hash(net, name);
764
765 hlist_for_each_entry_rcu(dev, head, name_hlist)
766 if (!strncmp(dev->name, name, IFNAMSIZ))
767 return dev;
768
769 return NULL;
770}
771EXPORT_SYMBOL(dev_get_by_name_rcu);
772
773/**
774 * dev_get_by_name - find a device by its name
775 * @net: the applicable net namespace
776 * @name: name to find
777 *
778 * Find an interface by name. This can be called from any
779 * context and does its own locking. The returned handle has
780 * the usage count incremented and the caller must use dev_put() to
781 * release it when it is no longer needed. %NULL is returned if no
782 * matching device is found.
783 */
784
785struct net_device *dev_get_by_name(struct net *net, const char *name)
786{
787 struct net_device *dev;
788
789 rcu_read_lock();
790 dev = dev_get_by_name_rcu(net, name);
791 if (dev)
792 dev_hold(dev);
793 rcu_read_unlock();
794 return dev;
795}
796EXPORT_SYMBOL(dev_get_by_name);
797
798/**
799 * __dev_get_by_index - find a device by its ifindex
800 * @net: the applicable net namespace
801 * @ifindex: index of device
802 *
803 * Search for an interface by index. Returns %NULL if the device
804 * is not found or a pointer to the device. The device has not
805 * had its reference counter increased so the caller must be careful
806 * about locking. The caller must hold either the RTNL semaphore
807 * or @dev_base_lock.
808 */
809
810struct net_device *__dev_get_by_index(struct net *net, int ifindex)
811{
812 struct net_device *dev;
813 struct hlist_head *head = dev_index_hash(net, ifindex);
814
815 hlist_for_each_entry(dev, head, index_hlist)
816 if (dev->ifindex == ifindex)
817 return dev;
818
819 return NULL;
820}
821EXPORT_SYMBOL(__dev_get_by_index);
822
823/**
824 * dev_get_by_index_rcu - find a device by its ifindex
825 * @net: the applicable net namespace
826 * @ifindex: index of device
827 *
828 * Search for an interface by index. Returns %NULL if the device
829 * is not found or a pointer to the device. The device has not
830 * had its reference counter increased so the caller must be careful
831 * about locking. The caller must hold RCU lock.
832 */
833
834struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
835{
836 struct net_device *dev;
837 struct hlist_head *head = dev_index_hash(net, ifindex);
838
839 hlist_for_each_entry_rcu(dev, head, index_hlist)
840 if (dev->ifindex == ifindex)
841 return dev;
842
843 return NULL;
844}
845EXPORT_SYMBOL(dev_get_by_index_rcu);
846
847
848/**
849 * dev_get_by_index - find a device by its ifindex
850 * @net: the applicable net namespace
851 * @ifindex: index of device
852 *
853 * Search for an interface by index. Returns NULL if the device
854 * is not found or a pointer to the device. The device returned has
855 * had a reference added and the pointer is safe until the user calls
856 * dev_put to indicate they have finished with it.
857 */
858
859struct net_device *dev_get_by_index(struct net *net, int ifindex)
860{
861 struct net_device *dev;
862
863 rcu_read_lock();
864 dev = dev_get_by_index_rcu(net, ifindex);
865 if (dev)
866 dev_hold(dev);
867 rcu_read_unlock();
868 return dev;
869}
870EXPORT_SYMBOL(dev_get_by_index);
871
872/**
873 * dev_get_by_napi_id - find a device by napi_id
874 * @napi_id: ID of the NAPI struct
875 *
876 * Search for an interface by NAPI ID. Returns %NULL if the device
877 * is not found or a pointer to the device. The device has not had
878 * its reference counter increased so the caller must be careful
879 * about locking. The caller must hold RCU lock.
880 */
881
882struct net_device *dev_get_by_napi_id(unsigned int napi_id)
883{
884 struct napi_struct *napi;
885
886 WARN_ON_ONCE(!rcu_read_lock_held());
887
888 if (napi_id < MIN_NAPI_ID)
889 return NULL;
890
891 napi = napi_by_id(napi_id);
892
893 return napi ? napi->dev : NULL;
894}
895EXPORT_SYMBOL(dev_get_by_napi_id);
896
897/**
898 * netdev_get_name - get a netdevice name, knowing its ifindex.
899 * @net: network namespace
900 * @name: a pointer to the buffer where the name will be stored.
901 * @ifindex: the ifindex of the interface to get the name from.
902 *
903 * The use of raw_seqcount_begin() and cond_resched() before
904 * retrying is required as we want to give the writers a chance
905 * to complete when CONFIG_PREEMPT is not set.
906 */
907int netdev_get_name(struct net *net, char *name, int ifindex)
908{
909 struct net_device *dev;
910 unsigned int seq;
911
912retry:
913 seq = raw_seqcount_begin(&devnet_rename_seq);
914 rcu_read_lock();
915 dev = dev_get_by_index_rcu(net, ifindex);
916 if (!dev) {
917 rcu_read_unlock();
918 return -ENODEV;
919 }
920
921 strcpy(name, dev->name);
922 rcu_read_unlock();
923 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
924 cond_resched();
925 goto retry;
926 }
927
928 return 0;
929}
930
931/**
932 * dev_getbyhwaddr_rcu - find a device by its hardware address
933 * @net: the applicable net namespace
934 * @type: media type of device
935 * @ha: hardware address
936 *
937 * Search for an interface by MAC address. Returns NULL if the device
938 * is not found or a pointer to the device.
939 * The caller must hold RCU or RTNL.
940 * The returned device has not had its ref count increased
941 * and the caller must therefore be careful about locking
942 *
943 */
944
945struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
946 const char *ha)
947{
948 struct net_device *dev;
949
950 for_each_netdev_rcu(net, dev)
951 if (dev->type == type &&
952 !memcmp(dev->dev_addr, ha, dev->addr_len))
953 return dev;
954
955 return NULL;
956}
957EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
958
959struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
960{
961 struct net_device *dev;
962
963 ASSERT_RTNL();
964 for_each_netdev(net, dev)
965 if (dev->type == type)
966 return dev;
967
968 return NULL;
969}
970EXPORT_SYMBOL(__dev_getfirstbyhwtype);
971
972struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
973{
974 struct net_device *dev, *ret = NULL;
975
976 rcu_read_lock();
977 for_each_netdev_rcu(net, dev)
978 if (dev->type == type) {
979 dev_hold(dev);
980 ret = dev;
981 break;
982 }
983 rcu_read_unlock();
984 return ret;
985}
986EXPORT_SYMBOL(dev_getfirstbyhwtype);
987
988/**
989 * __dev_get_by_flags - find any device with given flags
990 * @net: the applicable net namespace
991 * @if_flags: IFF_* values
992 * @mask: bitmask of bits in if_flags to check
993 *
994 * Search for any interface with the given flags. Returns NULL if a device
995 * is not found or a pointer to the device. Must be called inside
996 * rtnl_lock(), and result refcount is unchanged.
997 */
998
999struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000 unsigned short mask)
1001{
1002 struct net_device *dev, *ret;
1003
1004 ASSERT_RTNL();
1005
1006 ret = NULL;
1007 for_each_netdev(net, dev) {
1008 if (((dev->flags ^ if_flags) & mask) == 0) {
1009 ret = dev;
1010 break;
1011 }
1012 }
1013 return ret;
1014}
1015EXPORT_SYMBOL(__dev_get_by_flags);
1016
1017/**
1018 * dev_valid_name - check if name is okay for network device
1019 * @name: name string
1020 *
1021 * Network device names need to be valid file names to
1022 * to allow sysfs to work. We also disallow any kind of
1023 * whitespace.
1024 */
1025bool dev_valid_name(const char *name)
1026{
1027 if (*name == '\0')
1028 return false;
1029 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1030 return false;
1031 if (!strcmp(name, ".") || !strcmp(name, ".."))
1032 return false;
1033
1034 while (*name) {
1035 if (*name == '/' || *name == ':' || isspace(*name))
1036 return false;
1037 name++;
1038 }
1039 return true;
1040}
1041EXPORT_SYMBOL(dev_valid_name);
1042
1043/**
1044 * __dev_alloc_name - allocate a name for a device
1045 * @net: network namespace to allocate the device name in
1046 * @name: name format string
1047 * @buf: scratch buffer and result name string
1048 *
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1053 * duplicates.
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1056 */
1057
1058static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1059{
1060 int i = 0;
1061 const char *p;
1062 const int max_netdevices = 8*PAGE_SIZE;
1063 unsigned long *inuse;
1064 struct net_device *d;
1065
1066 if (!dev_valid_name(name))
1067 return -EINVAL;
1068
1069 p = strchr(name, '%');
1070 if (p) {
1071 /*
1072 * Verify the string as this thing may have come from
1073 * the user. There must be either one "%d" and no other "%"
1074 * characters.
1075 */
1076 if (p[1] != 'd' || strchr(p + 2, '%'))
1077 return -EINVAL;
1078
1079 /* Use one page as a bit array of possible slots */
1080 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1081 if (!inuse)
1082 return -ENOMEM;
1083
1084 for_each_netdev(net, d) {
1085 if (!sscanf(d->name, name, &i))
1086 continue;
1087 if (i < 0 || i >= max_netdevices)
1088 continue;
1089
1090 /* avoid cases where sscanf is not exact inverse of printf */
1091 snprintf(buf, IFNAMSIZ, name, i);
1092 if (!strncmp(buf, d->name, IFNAMSIZ))
1093 set_bit(i, inuse);
1094 }
1095
1096 i = find_first_zero_bit(inuse, max_netdevices);
1097 free_page((unsigned long) inuse);
1098 }
1099
1100 snprintf(buf, IFNAMSIZ, name, i);
1101 if (!__dev_get_by_name(net, buf))
1102 return i;
1103
1104 /* It is possible to run out of possible slots
1105 * when the name is long and there isn't enough space left
1106 * for the digits, or if all bits are used.
1107 */
1108 return -ENFILE;
1109}
1110
1111static int dev_alloc_name_ns(struct net *net,
1112 struct net_device *dev,
1113 const char *name)
1114{
1115 char buf[IFNAMSIZ];
1116 int ret;
1117
1118 BUG_ON(!net);
1119 ret = __dev_alloc_name(net, name, buf);
1120 if (ret >= 0)
1121 strlcpy(dev->name, buf, IFNAMSIZ);
1122 return ret;
1123}
1124
1125/**
1126 * dev_alloc_name - allocate a name for a device
1127 * @dev: device
1128 * @name: name format string
1129 *
1130 * Passed a format string - eg "lt%d" it will try and find a suitable
1131 * id. It scans list of devices to build up a free map, then chooses
1132 * the first empty slot. The caller must hold the dev_base or rtnl lock
1133 * while allocating the name and adding the device in order to avoid
1134 * duplicates.
1135 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1136 * Returns the number of the unit assigned or a negative errno code.
1137 */
1138
1139int dev_alloc_name(struct net_device *dev, const char *name)
1140{
1141 return dev_alloc_name_ns(dev_net(dev), dev, name);
1142}
1143EXPORT_SYMBOL(dev_alloc_name);
1144
1145int dev_get_valid_name(struct net *net, struct net_device *dev,
1146 const char *name)
1147{
1148 BUG_ON(!net);
1149
1150 if (!dev_valid_name(name))
1151 return -EINVAL;
1152
1153 if (strchr(name, '%'))
1154 return dev_alloc_name_ns(net, dev, name);
1155 else if (__dev_get_by_name(net, name))
1156 return -EEXIST;
1157 else if (dev->name != name)
1158 strlcpy(dev->name, name, IFNAMSIZ);
1159
1160 return 0;
1161}
1162EXPORT_SYMBOL(dev_get_valid_name);
1163
1164/**
1165 * dev_change_name - change name of a device
1166 * @dev: device
1167 * @newname: name (or format string) must be at least IFNAMSIZ
1168 *
1169 * Change name of a device, can pass format strings "eth%d".
1170 * for wildcarding.
1171 */
1172int dev_change_name(struct net_device *dev, const char *newname)
1173{
1174 unsigned char old_assign_type;
1175 char oldname[IFNAMSIZ];
1176 int err = 0;
1177 int ret;
1178 struct net *net;
1179
1180 ASSERT_RTNL();
1181 BUG_ON(!dev_net(dev));
1182
1183 net = dev_net(dev);
1184 if (dev->flags & IFF_UP)
1185 return -EBUSY;
1186
1187 write_seqcount_begin(&devnet_rename_seq);
1188
1189 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1190 write_seqcount_end(&devnet_rename_seq);
1191 return 0;
1192 }
1193
1194 memcpy(oldname, dev->name, IFNAMSIZ);
1195
1196 err = dev_get_valid_name(net, dev, newname);
1197 if (err < 0) {
1198 write_seqcount_end(&devnet_rename_seq);
1199 return err;
1200 }
1201
1202 if (oldname[0] && !strchr(oldname, '%'))
1203 netdev_info(dev, "renamed from %s\n", oldname);
1204
1205 old_assign_type = dev->name_assign_type;
1206 dev->name_assign_type = NET_NAME_RENAMED;
1207
1208rollback:
1209 ret = device_rename(&dev->dev, dev->name);
1210 if (ret) {
1211 memcpy(dev->name, oldname, IFNAMSIZ);
1212 dev->name_assign_type = old_assign_type;
1213 write_seqcount_end(&devnet_rename_seq);
1214 return ret;
1215 }
1216
1217 write_seqcount_end(&devnet_rename_seq);
1218
1219 netdev_adjacent_rename_links(dev, oldname);
1220
1221 write_lock_bh(&dev_base_lock);
1222 hlist_del_rcu(&dev->name_hlist);
1223 write_unlock_bh(&dev_base_lock);
1224
1225 synchronize_rcu();
1226
1227 write_lock_bh(&dev_base_lock);
1228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1229 write_unlock_bh(&dev_base_lock);
1230
1231 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1232 ret = notifier_to_errno(ret);
1233
1234 if (ret) {
1235 /* err >= 0 after dev_alloc_name() or stores the first errno */
1236 if (err >= 0) {
1237 err = ret;
1238 write_seqcount_begin(&devnet_rename_seq);
1239 memcpy(dev->name, oldname, IFNAMSIZ);
1240 memcpy(oldname, newname, IFNAMSIZ);
1241 dev->name_assign_type = old_assign_type;
1242 old_assign_type = NET_NAME_RENAMED;
1243 goto rollback;
1244 } else {
1245 pr_err("%s: name change rollback failed: %d\n",
1246 dev->name, ret);
1247 }
1248 }
1249
1250 return err;
1251}
1252
1253/**
1254 * dev_set_alias - change ifalias of a device
1255 * @dev: device
1256 * @alias: name up to IFALIASZ
1257 * @len: limit of bytes to copy from info
1258 *
1259 * Set ifalias for a device,
1260 */
1261int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1262{
1263 struct dev_ifalias *new_alias = NULL;
1264
1265 if (len >= IFALIASZ)
1266 return -EINVAL;
1267
1268 if (len) {
1269 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1270 if (!new_alias)
1271 return -ENOMEM;
1272
1273 memcpy(new_alias->ifalias, alias, len);
1274 new_alias->ifalias[len] = 0;
1275 }
1276
1277 mutex_lock(&ifalias_mutex);
1278 rcu_swap_protected(dev->ifalias, new_alias,
1279 mutex_is_locked(&ifalias_mutex));
1280 mutex_unlock(&ifalias_mutex);
1281
1282 if (new_alias)
1283 kfree_rcu(new_alias, rcuhead);
1284
1285 return len;
1286}
1287EXPORT_SYMBOL(dev_set_alias);
1288
1289/**
1290 * dev_get_alias - get ifalias of a device
1291 * @dev: device
1292 * @name: buffer to store name of ifalias
1293 * @len: size of buffer
1294 *
1295 * get ifalias for a device. Caller must make sure dev cannot go
1296 * away, e.g. rcu read lock or own a reference count to device.
1297 */
1298int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299{
1300 const struct dev_ifalias *alias;
1301 int ret = 0;
1302
1303 rcu_read_lock();
1304 alias = rcu_dereference(dev->ifalias);
1305 if (alias)
1306 ret = snprintf(name, len, "%s", alias->ifalias);
1307 rcu_read_unlock();
1308
1309 return ret;
1310}
1311
1312/**
1313 * netdev_features_change - device changes features
1314 * @dev: device to cause notification
1315 *
1316 * Called to indicate a device has changed features.
1317 */
1318void netdev_features_change(struct net_device *dev)
1319{
1320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1321}
1322EXPORT_SYMBOL(netdev_features_change);
1323
1324/**
1325 * netdev_state_change - device changes state
1326 * @dev: device to cause notification
1327 *
1328 * Called to indicate a device has changed state. This function calls
1329 * the notifier chains for netdev_chain and sends a NEWLINK message
1330 * to the routing socket.
1331 */
1332void netdev_state_change(struct net_device *dev)
1333{
1334 if (dev->flags & IFF_UP) {
1335 struct netdev_notifier_change_info change_info = {
1336 .info.dev = dev,
1337 };
1338
1339 call_netdevice_notifiers_info(NETDEV_CHANGE,
1340 &change_info.info);
1341 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1342 }
1343}
1344EXPORT_SYMBOL(netdev_state_change);
1345
1346/**
1347 * netdev_notify_peers - notify network peers about existence of @dev
1348 * @dev: network device
1349 *
1350 * Generate traffic such that interested network peers are aware of
1351 * @dev, such as by generating a gratuitous ARP. This may be used when
1352 * a device wants to inform the rest of the network about some sort of
1353 * reconfiguration such as a failover event or virtual machine
1354 * migration.
1355 */
1356void netdev_notify_peers(struct net_device *dev)
1357{
1358 rtnl_lock();
1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1360 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1361 rtnl_unlock();
1362}
1363EXPORT_SYMBOL(netdev_notify_peers);
1364
1365static int __dev_open(struct net_device *dev)
1366{
1367 const struct net_device_ops *ops = dev->netdev_ops;
1368 int ret;
1369
1370 ASSERT_RTNL();
1371
1372 if (!netif_device_present(dev))
1373 return -ENODEV;
1374
1375 /* Block netpoll from trying to do any rx path servicing.
1376 * If we don't do this there is a chance ndo_poll_controller
1377 * or ndo_poll may be running while we open the device
1378 */
1379 netpoll_poll_disable(dev);
1380
1381 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382 ret = notifier_to_errno(ret);
1383 if (ret)
1384 return ret;
1385
1386 set_bit(__LINK_STATE_START, &dev->state);
1387
1388 if (ops->ndo_validate_addr)
1389 ret = ops->ndo_validate_addr(dev);
1390
1391 if (!ret && ops->ndo_open)
1392 ret = ops->ndo_open(dev);
1393
1394 netpoll_poll_enable(dev);
1395
1396 if (ret)
1397 clear_bit(__LINK_STATE_START, &dev->state);
1398 else {
1399 dev->flags |= IFF_UP;
1400 dev_set_rx_mode(dev);
1401 dev_activate(dev);
1402 add_device_randomness(dev->dev_addr, dev->addr_len);
1403 }
1404
1405 return ret;
1406}
1407
1408/**
1409 * dev_open - prepare an interface for use.
1410 * @dev: device to open
1411 *
1412 * Takes a device from down to up state. The device's private open
1413 * function is invoked and then the multicast lists are loaded. Finally
1414 * the device is moved into the up state and a %NETDEV_UP message is
1415 * sent to the netdev notifier chain.
1416 *
1417 * Calling this function on an active interface is a nop. On a failure
1418 * a negative errno code is returned.
1419 */
1420int dev_open(struct net_device *dev)
1421{
1422 int ret;
1423
1424 if (dev->flags & IFF_UP)
1425 return 0;
1426
1427 ret = __dev_open(dev);
1428 if (ret < 0)
1429 return ret;
1430
1431 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1432 call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434 return ret;
1435}
1436EXPORT_SYMBOL(dev_open);
1437
1438static void __dev_close_many(struct list_head *head)
1439{
1440 struct net_device *dev;
1441
1442 ASSERT_RTNL();
1443 might_sleep();
1444
1445 list_for_each_entry(dev, head, close_list) {
1446 /* Temporarily disable netpoll until the interface is down */
1447 netpoll_poll_disable(dev);
1448
1449 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1450
1451 clear_bit(__LINK_STATE_START, &dev->state);
1452
1453 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454 * can be even on different cpu. So just clear netif_running().
1455 *
1456 * dev->stop() will invoke napi_disable() on all of it's
1457 * napi_struct instances on this device.
1458 */
1459 smp_mb__after_atomic(); /* Commit netif_running(). */
1460 }
1461
1462 dev_deactivate_many(head);
1463
1464 list_for_each_entry(dev, head, close_list) {
1465 const struct net_device_ops *ops = dev->netdev_ops;
1466
1467 /*
1468 * Call the device specific close. This cannot fail.
1469 * Only if device is UP
1470 *
1471 * We allow it to be called even after a DETACH hot-plug
1472 * event.
1473 */
1474 if (ops->ndo_stop)
1475 ops->ndo_stop(dev);
1476
1477 dev->flags &= ~IFF_UP;
1478 netpoll_poll_enable(dev);
1479 }
1480}
1481
1482static void __dev_close(struct net_device *dev)
1483{
1484 LIST_HEAD(single);
1485
1486 list_add(&dev->close_list, &single);
1487 __dev_close_many(&single);
1488 list_del(&single);
1489}
1490
1491void dev_close_many(struct list_head *head, bool unlink)
1492{
1493 struct net_device *dev, *tmp;
1494
1495 /* Remove the devices that don't need to be closed */
1496 list_for_each_entry_safe(dev, tmp, head, close_list)
1497 if (!(dev->flags & IFF_UP))
1498 list_del_init(&dev->close_list);
1499
1500 __dev_close_many(head);
1501
1502 list_for_each_entry_safe(dev, tmp, head, close_list) {
1503 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1504 call_netdevice_notifiers(NETDEV_DOWN, dev);
1505 if (unlink)
1506 list_del_init(&dev->close_list);
1507 }
1508}
1509EXPORT_SYMBOL(dev_close_many);
1510
1511/**
1512 * dev_close - shutdown an interface.
1513 * @dev: device to shutdown
1514 *
1515 * This function moves an active device into down state. A
1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518 * chain.
1519 */
1520void dev_close(struct net_device *dev)
1521{
1522 if (dev->flags & IFF_UP) {
1523 LIST_HEAD(single);
1524
1525 list_add(&dev->close_list, &single);
1526 dev_close_many(&single, true);
1527 list_del(&single);
1528 }
1529}
1530EXPORT_SYMBOL(dev_close);
1531
1532
1533/**
1534 * dev_disable_lro - disable Large Receive Offload on a device
1535 * @dev: device
1536 *
1537 * Disable Large Receive Offload (LRO) on a net device. Must be
1538 * called under RTNL. This is needed if received packets may be
1539 * forwarded to another interface.
1540 */
1541void dev_disable_lro(struct net_device *dev)
1542{
1543 struct net_device *lower_dev;
1544 struct list_head *iter;
1545
1546 dev->wanted_features &= ~NETIF_F_LRO;
1547 netdev_update_features(dev);
1548
1549 if (unlikely(dev->features & NETIF_F_LRO))
1550 netdev_WARN(dev, "failed to disable LRO!\n");
1551
1552 netdev_for_each_lower_dev(dev, lower_dev, iter)
1553 dev_disable_lro(lower_dev);
1554}
1555EXPORT_SYMBOL(dev_disable_lro);
1556
1557/**
1558 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1559 * @dev: device
1560 *
1561 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1562 * called under RTNL. This is needed if Generic XDP is installed on
1563 * the device.
1564 */
1565static void dev_disable_gro_hw(struct net_device *dev)
1566{
1567 dev->wanted_features &= ~NETIF_F_GRO_HW;
1568 netdev_update_features(dev);
1569
1570 if (unlikely(dev->features & NETIF_F_GRO_HW))
1571 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1572}
1573
1574const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1575{
1576#define N(val) \
1577 case NETDEV_##val: \
1578 return "NETDEV_" __stringify(val);
1579 switch (cmd) {
1580 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1581 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1582 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1583 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1584 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1585 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1586 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1587 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1588 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1589 }
1590#undef N
1591 return "UNKNOWN_NETDEV_EVENT";
1592}
1593EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1594
1595static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1596 struct net_device *dev)
1597{
1598 struct netdev_notifier_info info = {
1599 .dev = dev,
1600 };
1601
1602 return nb->notifier_call(nb, val, &info);
1603}
1604
1605static int dev_boot_phase = 1;
1606
1607/**
1608 * register_netdevice_notifier - register a network notifier block
1609 * @nb: notifier
1610 *
1611 * Register a notifier to be called when network device events occur.
1612 * The notifier passed is linked into the kernel structures and must
1613 * not be reused until it has been unregistered. A negative errno code
1614 * is returned on a failure.
1615 *
1616 * When registered all registration and up events are replayed
1617 * to the new notifier to allow device to have a race free
1618 * view of the network device list.
1619 */
1620
1621int register_netdevice_notifier(struct notifier_block *nb)
1622{
1623 struct net_device *dev;
1624 struct net_device *last;
1625 struct net *net;
1626 int err;
1627
1628 /* Close race with setup_net() and cleanup_net() */
1629 down_write(&pernet_ops_rwsem);
1630 rtnl_lock();
1631 err = raw_notifier_chain_register(&netdev_chain, nb);
1632 if (err)
1633 goto unlock;
1634 if (dev_boot_phase)
1635 goto unlock;
1636 for_each_net(net) {
1637 for_each_netdev(net, dev) {
1638 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1639 err = notifier_to_errno(err);
1640 if (err)
1641 goto rollback;
1642
1643 if (!(dev->flags & IFF_UP))
1644 continue;
1645
1646 call_netdevice_notifier(nb, NETDEV_UP, dev);
1647 }
1648 }
1649
1650unlock:
1651 rtnl_unlock();
1652 up_write(&pernet_ops_rwsem);
1653 return err;
1654
1655rollback:
1656 last = dev;
1657 for_each_net(net) {
1658 for_each_netdev(net, dev) {
1659 if (dev == last)
1660 goto outroll;
1661
1662 if (dev->flags & IFF_UP) {
1663 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1664 dev);
1665 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1666 }
1667 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1668 }
1669 }
1670
1671outroll:
1672 raw_notifier_chain_unregister(&netdev_chain, nb);
1673 goto unlock;
1674}
1675EXPORT_SYMBOL(register_netdevice_notifier);
1676
1677/**
1678 * unregister_netdevice_notifier - unregister a network notifier block
1679 * @nb: notifier
1680 *
1681 * Unregister a notifier previously registered by
1682 * register_netdevice_notifier(). The notifier is unlinked into the
1683 * kernel structures and may then be reused. A negative errno code
1684 * is returned on a failure.
1685 *
1686 * After unregistering unregister and down device events are synthesized
1687 * for all devices on the device list to the removed notifier to remove
1688 * the need for special case cleanup code.
1689 */
1690
1691int unregister_netdevice_notifier(struct notifier_block *nb)
1692{
1693 struct net_device *dev;
1694 struct net *net;
1695 int err;
1696
1697 /* Close race with setup_net() and cleanup_net() */
1698 down_write(&pernet_ops_rwsem);
1699 rtnl_lock();
1700 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1701 if (err)
1702 goto unlock;
1703
1704 for_each_net(net) {
1705 for_each_netdev(net, dev) {
1706 if (dev->flags & IFF_UP) {
1707 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1708 dev);
1709 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1710 }
1711 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1712 }
1713 }
1714unlock:
1715 rtnl_unlock();
1716 up_write(&pernet_ops_rwsem);
1717 return err;
1718}
1719EXPORT_SYMBOL(unregister_netdevice_notifier);
1720
1721/**
1722 * call_netdevice_notifiers_info - call all network notifier blocks
1723 * @val: value passed unmodified to notifier function
1724 * @info: notifier information data
1725 *
1726 * Call all network notifier blocks. Parameters and return value
1727 * are as for raw_notifier_call_chain().
1728 */
1729
1730static int call_netdevice_notifiers_info(unsigned long val,
1731 struct netdev_notifier_info *info)
1732{
1733 ASSERT_RTNL();
1734 return raw_notifier_call_chain(&netdev_chain, val, info);
1735}
1736
1737/**
1738 * call_netdevice_notifiers - call all network notifier blocks
1739 * @val: value passed unmodified to notifier function
1740 * @dev: net_device pointer passed unmodified to notifier function
1741 *
1742 * Call all network notifier blocks. Parameters and return value
1743 * are as for raw_notifier_call_chain().
1744 */
1745
1746int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1747{
1748 struct netdev_notifier_info info = {
1749 .dev = dev,
1750 };
1751
1752 return call_netdevice_notifiers_info(val, &info);
1753}
1754EXPORT_SYMBOL(call_netdevice_notifiers);
1755
1756#ifdef CONFIG_NET_INGRESS
1757static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1758
1759void net_inc_ingress_queue(void)
1760{
1761 static_branch_inc(&ingress_needed_key);
1762}
1763EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1764
1765void net_dec_ingress_queue(void)
1766{
1767 static_branch_dec(&ingress_needed_key);
1768}
1769EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1770#endif
1771
1772#ifdef CONFIG_NET_EGRESS
1773static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1774
1775void net_inc_egress_queue(void)
1776{
1777 static_branch_inc(&egress_needed_key);
1778}
1779EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1780
1781void net_dec_egress_queue(void)
1782{
1783 static_branch_dec(&egress_needed_key);
1784}
1785EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1786#endif
1787
1788static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
1789#ifdef HAVE_JUMP_LABEL
1790static atomic_t netstamp_needed_deferred;
1791static atomic_t netstamp_wanted;
1792static void netstamp_clear(struct work_struct *work)
1793{
1794 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1795 int wanted;
1796
1797 wanted = atomic_add_return(deferred, &netstamp_wanted);
1798 if (wanted > 0)
1799 static_branch_enable(&netstamp_needed_key);
1800 else
1801 static_branch_disable(&netstamp_needed_key);
1802}
1803static DECLARE_WORK(netstamp_work, netstamp_clear);
1804#endif
1805
1806void net_enable_timestamp(void)
1807{
1808#ifdef HAVE_JUMP_LABEL
1809 int wanted;
1810
1811 while (1) {
1812 wanted = atomic_read(&netstamp_wanted);
1813 if (wanted <= 0)
1814 break;
1815 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1816 return;
1817 }
1818 atomic_inc(&netstamp_needed_deferred);
1819 schedule_work(&netstamp_work);
1820#else
1821 static_branch_inc(&netstamp_needed_key);
1822#endif
1823}
1824EXPORT_SYMBOL(net_enable_timestamp);
1825
1826void net_disable_timestamp(void)
1827{
1828#ifdef HAVE_JUMP_LABEL
1829 int wanted;
1830
1831 while (1) {
1832 wanted = atomic_read(&netstamp_wanted);
1833 if (wanted <= 1)
1834 break;
1835 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1836 return;
1837 }
1838 atomic_dec(&netstamp_needed_deferred);
1839 schedule_work(&netstamp_work);
1840#else
1841 static_branch_dec(&netstamp_needed_key);
1842#endif
1843}
1844EXPORT_SYMBOL(net_disable_timestamp);
1845
1846static inline void net_timestamp_set(struct sk_buff *skb)
1847{
1848 skb->tstamp = 0;
1849 if (static_branch_unlikely(&netstamp_needed_key))
1850 __net_timestamp(skb);
1851}
1852
1853#define net_timestamp_check(COND, SKB) \
1854 if (static_branch_unlikely(&netstamp_needed_key)) { \
1855 if ((COND) && !(SKB)->tstamp) \
1856 __net_timestamp(SKB); \
1857 } \
1858
1859bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1860{
1861 unsigned int len;
1862
1863 if (!(dev->flags & IFF_UP))
1864 return false;
1865
1866 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1867 if (skb->len <= len)
1868 return true;
1869
1870 /* if TSO is enabled, we don't care about the length as the packet
1871 * could be forwarded without being segmented before
1872 */
1873 if (skb_is_gso(skb))
1874 return true;
1875
1876 return false;
1877}
1878EXPORT_SYMBOL_GPL(is_skb_forwardable);
1879
1880int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1881{
1882 int ret = ____dev_forward_skb(dev, skb);
1883
1884 if (likely(!ret)) {
1885 skb->protocol = eth_type_trans(skb, dev);
1886 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1887 }
1888
1889 return ret;
1890}
1891EXPORT_SYMBOL_GPL(__dev_forward_skb);
1892
1893/**
1894 * dev_forward_skb - loopback an skb to another netif
1895 *
1896 * @dev: destination network device
1897 * @skb: buffer to forward
1898 *
1899 * return values:
1900 * NET_RX_SUCCESS (no congestion)
1901 * NET_RX_DROP (packet was dropped, but freed)
1902 *
1903 * dev_forward_skb can be used for injecting an skb from the
1904 * start_xmit function of one device into the receive queue
1905 * of another device.
1906 *
1907 * The receiving device may be in another namespace, so
1908 * we have to clear all information in the skb that could
1909 * impact namespace isolation.
1910 */
1911int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1912{
1913 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1914}
1915EXPORT_SYMBOL_GPL(dev_forward_skb);
1916
1917static inline int deliver_skb(struct sk_buff *skb,
1918 struct packet_type *pt_prev,
1919 struct net_device *orig_dev)
1920{
1921 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1922 return -ENOMEM;
1923 refcount_inc(&skb->users);
1924 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1925}
1926
1927static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1928 struct packet_type **pt,
1929 struct net_device *orig_dev,
1930 __be16 type,
1931 struct list_head *ptype_list)
1932{
1933 struct packet_type *ptype, *pt_prev = *pt;
1934
1935 list_for_each_entry_rcu(ptype, ptype_list, list) {
1936 if (ptype->type != type)
1937 continue;
1938 if (pt_prev)
1939 deliver_skb(skb, pt_prev, orig_dev);
1940 pt_prev = ptype;
1941 }
1942 *pt = pt_prev;
1943}
1944
1945static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1946{
1947 if (!ptype->af_packet_priv || !skb->sk)
1948 return false;
1949
1950 if (ptype->id_match)
1951 return ptype->id_match(ptype, skb->sk);
1952 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1953 return true;
1954
1955 return false;
1956}
1957
1958/*
1959 * Support routine. Sends outgoing frames to any network
1960 * taps currently in use.
1961 */
1962
1963void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1964{
1965 struct packet_type *ptype;
1966 struct sk_buff *skb2 = NULL;
1967 struct packet_type *pt_prev = NULL;
1968 struct list_head *ptype_list = &ptype_all;
1969
1970 rcu_read_lock();
1971again:
1972 list_for_each_entry_rcu(ptype, ptype_list, list) {
1973 /* Never send packets back to the socket
1974 * they originated from - MvS (miquels@drinkel.ow.org)
1975 */
1976 if (skb_loop_sk(ptype, skb))
1977 continue;
1978
1979 if (pt_prev) {
1980 deliver_skb(skb2, pt_prev, skb->dev);
1981 pt_prev = ptype;
1982 continue;
1983 }
1984
1985 /* need to clone skb, done only once */
1986 skb2 = skb_clone(skb, GFP_ATOMIC);
1987 if (!skb2)
1988 goto out_unlock;
1989
1990 net_timestamp_set(skb2);
1991
1992 /* skb->nh should be correctly
1993 * set by sender, so that the second statement is
1994 * just protection against buggy protocols.
1995 */
1996 skb_reset_mac_header(skb2);
1997
1998 if (skb_network_header(skb2) < skb2->data ||
1999 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2000 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2001 ntohs(skb2->protocol),
2002 dev->name);
2003 skb_reset_network_header(skb2);
2004 }
2005
2006 skb2->transport_header = skb2->network_header;
2007 skb2->pkt_type = PACKET_OUTGOING;
2008 pt_prev = ptype;
2009 }
2010
2011 if (ptype_list == &ptype_all) {
2012 ptype_list = &dev->ptype_all;
2013 goto again;
2014 }
2015out_unlock:
2016 if (pt_prev) {
2017 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2018 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2019 else
2020 kfree_skb(skb2);
2021 }
2022 rcu_read_unlock();
2023}
2024EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2025
2026/**
2027 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2028 * @dev: Network device
2029 * @txq: number of queues available
2030 *
2031 * If real_num_tx_queues is changed the tc mappings may no longer be
2032 * valid. To resolve this verify the tc mapping remains valid and if
2033 * not NULL the mapping. With no priorities mapping to this
2034 * offset/count pair it will no longer be used. In the worst case TC0
2035 * is invalid nothing can be done so disable priority mappings. If is
2036 * expected that drivers will fix this mapping if they can before
2037 * calling netif_set_real_num_tx_queues.
2038 */
2039static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2040{
2041 int i;
2042 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2043
2044 /* If TC0 is invalidated disable TC mapping */
2045 if (tc->offset + tc->count > txq) {
2046 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2047 dev->num_tc = 0;
2048 return;
2049 }
2050
2051 /* Invalidated prio to tc mappings set to TC0 */
2052 for (i = 1; i < TC_BITMASK + 1; i++) {
2053 int q = netdev_get_prio_tc_map(dev, i);
2054
2055 tc = &dev->tc_to_txq[q];
2056 if (tc->offset + tc->count > txq) {
2057 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2058 i, q);
2059 netdev_set_prio_tc_map(dev, i, 0);
2060 }
2061 }
2062}
2063
2064int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2065{
2066 if (dev->num_tc) {
2067 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2068 int i;
2069
2070 /* walk through the TCs and see if it falls into any of them */
2071 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2072 if ((txq - tc->offset) < tc->count)
2073 return i;
2074 }
2075
2076 /* didn't find it, just return -1 to indicate no match */
2077 return -1;
2078 }
2079
2080 return 0;
2081}
2082EXPORT_SYMBOL(netdev_txq_to_tc);
2083
2084#ifdef CONFIG_XPS
2085struct static_key xps_needed __read_mostly;
2086EXPORT_SYMBOL(xps_needed);
2087struct static_key xps_rxqs_needed __read_mostly;
2088EXPORT_SYMBOL(xps_rxqs_needed);
2089static DEFINE_MUTEX(xps_map_mutex);
2090#define xmap_dereference(P) \
2091 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2092
2093static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2094 int tci, u16 index)
2095{
2096 struct xps_map *map = NULL;
2097 int pos;
2098
2099 if (dev_maps)
2100 map = xmap_dereference(dev_maps->attr_map[tci]);
2101 if (!map)
2102 return false;
2103
2104 for (pos = map->len; pos--;) {
2105 if (map->queues[pos] != index)
2106 continue;
2107
2108 if (map->len > 1) {
2109 map->queues[pos] = map->queues[--map->len];
2110 break;
2111 }
2112
2113 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2114 kfree_rcu(map, rcu);
2115 return false;
2116 }
2117
2118 return true;
2119}
2120
2121static bool remove_xps_queue_cpu(struct net_device *dev,
2122 struct xps_dev_maps *dev_maps,
2123 int cpu, u16 offset, u16 count)
2124{
2125 int num_tc = dev->num_tc ? : 1;
2126 bool active = false;
2127 int tci;
2128
2129 for (tci = cpu * num_tc; num_tc--; tci++) {
2130 int i, j;
2131
2132 for (i = count, j = offset; i--; j++) {
2133 if (!remove_xps_queue(dev_maps, tci, j))
2134 break;
2135 }
2136
2137 active |= i < 0;
2138 }
2139
2140 return active;
2141}
2142
2143static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2144 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2145 u16 offset, u16 count, bool is_rxqs_map)
2146{
2147 bool active = false;
2148 int i, j;
2149
2150 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2151 j < nr_ids;)
2152 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2153 count);
2154 if (!active) {
2155 if (is_rxqs_map) {
2156 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2157 } else {
2158 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2159
2160 for (i = offset + (count - 1); count--; i--)
2161 netdev_queue_numa_node_write(
2162 netdev_get_tx_queue(dev, i),
2163 NUMA_NO_NODE);
2164 }
2165 kfree_rcu(dev_maps, rcu);
2166 }
2167}
2168
2169static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2170 u16 count)
2171{
2172 const unsigned long *possible_mask = NULL;
2173 struct xps_dev_maps *dev_maps;
2174 unsigned int nr_ids;
2175
2176 if (!static_key_false(&xps_needed))
2177 return;
2178
2179 cpus_read_lock();
2180 mutex_lock(&xps_map_mutex);
2181
2182 if (static_key_false(&xps_rxqs_needed)) {
2183 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2184 if (dev_maps) {
2185 nr_ids = dev->num_rx_queues;
2186 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2187 offset, count, true);
2188 }
2189 }
2190
2191 dev_maps = xmap_dereference(dev->xps_cpus_map);
2192 if (!dev_maps)
2193 goto out_no_maps;
2194
2195 if (num_possible_cpus() > 1)
2196 possible_mask = cpumask_bits(cpu_possible_mask);
2197 nr_ids = nr_cpu_ids;
2198 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2199 false);
2200
2201out_no_maps:
2202 if (static_key_enabled(&xps_rxqs_needed))
2203 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2204
2205 static_key_slow_dec_cpuslocked(&xps_needed);
2206 mutex_unlock(&xps_map_mutex);
2207 cpus_read_unlock();
2208}
2209
2210static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2211{
2212 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2213}
2214
2215static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2216 u16 index, bool is_rxqs_map)
2217{
2218 struct xps_map *new_map;
2219 int alloc_len = XPS_MIN_MAP_ALLOC;
2220 int i, pos;
2221
2222 for (pos = 0; map && pos < map->len; pos++) {
2223 if (map->queues[pos] != index)
2224 continue;
2225 return map;
2226 }
2227
2228 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2229 if (map) {
2230 if (pos < map->alloc_len)
2231 return map;
2232
2233 alloc_len = map->alloc_len * 2;
2234 }
2235
2236 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2237 * map
2238 */
2239 if (is_rxqs_map)
2240 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2241 else
2242 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2243 cpu_to_node(attr_index));
2244 if (!new_map)
2245 return NULL;
2246
2247 for (i = 0; i < pos; i++)
2248 new_map->queues[i] = map->queues[i];
2249 new_map->alloc_len = alloc_len;
2250 new_map->len = pos;
2251
2252 return new_map;
2253}
2254
2255/* Must be called under cpus_read_lock */
2256int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2257 u16 index, bool is_rxqs_map)
2258{
2259 const unsigned long *online_mask = NULL, *possible_mask = NULL;
2260 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2261 int i, j, tci, numa_node_id = -2;
2262 int maps_sz, num_tc = 1, tc = 0;
2263 struct xps_map *map, *new_map;
2264 bool active = false;
2265 unsigned int nr_ids;
2266
2267 if (dev->num_tc) {
2268 /* Do not allow XPS on subordinate device directly */
2269 num_tc = dev->num_tc;
2270 if (num_tc < 0)
2271 return -EINVAL;
2272
2273 /* If queue belongs to subordinate dev use its map */
2274 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2275
2276 tc = netdev_txq_to_tc(dev, index);
2277 if (tc < 0)
2278 return -EINVAL;
2279 }
2280
2281 mutex_lock(&xps_map_mutex);
2282 if (is_rxqs_map) {
2283 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2284 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2285 nr_ids = dev->num_rx_queues;
2286 } else {
2287 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2288 if (num_possible_cpus() > 1) {
2289 online_mask = cpumask_bits(cpu_online_mask);
2290 possible_mask = cpumask_bits(cpu_possible_mask);
2291 }
2292 dev_maps = xmap_dereference(dev->xps_cpus_map);
2293 nr_ids = nr_cpu_ids;
2294 }
2295
2296 if (maps_sz < L1_CACHE_BYTES)
2297 maps_sz = L1_CACHE_BYTES;
2298
2299 /* allocate memory for queue storage */
2300 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2301 j < nr_ids;) {
2302 if (!new_dev_maps)
2303 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2304 if (!new_dev_maps) {
2305 mutex_unlock(&xps_map_mutex);
2306 return -ENOMEM;
2307 }
2308
2309 tci = j * num_tc + tc;
2310 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2311 NULL;
2312
2313 map = expand_xps_map(map, j, index, is_rxqs_map);
2314 if (!map)
2315 goto error;
2316
2317 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2318 }
2319
2320 if (!new_dev_maps)
2321 goto out_no_new_maps;
2322
2323 static_key_slow_inc_cpuslocked(&xps_needed);
2324 if (is_rxqs_map)
2325 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2326
2327 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2328 j < nr_ids;) {
2329 /* copy maps belonging to foreign traffic classes */
2330 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2331 /* fill in the new device map from the old device map */
2332 map = xmap_dereference(dev_maps->attr_map[tci]);
2333 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2334 }
2335
2336 /* We need to explicitly update tci as prevous loop
2337 * could break out early if dev_maps is NULL.
2338 */
2339 tci = j * num_tc + tc;
2340
2341 if (netif_attr_test_mask(j, mask, nr_ids) &&
2342 netif_attr_test_online(j, online_mask, nr_ids)) {
2343 /* add tx-queue to CPU/rx-queue maps */
2344 int pos = 0;
2345
2346 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2347 while ((pos < map->len) && (map->queues[pos] != index))
2348 pos++;
2349
2350 if (pos == map->len)
2351 map->queues[map->len++] = index;
2352#ifdef CONFIG_NUMA
2353 if (!is_rxqs_map) {
2354 if (numa_node_id == -2)
2355 numa_node_id = cpu_to_node(j);
2356 else if (numa_node_id != cpu_to_node(j))
2357 numa_node_id = -1;
2358 }
2359#endif
2360 } else if (dev_maps) {
2361 /* fill in the new device map from the old device map */
2362 map = xmap_dereference(dev_maps->attr_map[tci]);
2363 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2364 }
2365
2366 /* copy maps belonging to foreign traffic classes */
2367 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2368 /* fill in the new device map from the old device map */
2369 map = xmap_dereference(dev_maps->attr_map[tci]);
2370 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2371 }
2372 }
2373
2374 if (is_rxqs_map)
2375 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2376 else
2377 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2378
2379 /* Cleanup old maps */
2380 if (!dev_maps)
2381 goto out_no_old_maps;
2382
2383 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2384 j < nr_ids;) {
2385 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2386 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2387 map = xmap_dereference(dev_maps->attr_map[tci]);
2388 if (map && map != new_map)
2389 kfree_rcu(map, rcu);
2390 }
2391 }
2392
2393 kfree_rcu(dev_maps, rcu);
2394
2395out_no_old_maps:
2396 dev_maps = new_dev_maps;
2397 active = true;
2398
2399out_no_new_maps:
2400 if (!is_rxqs_map) {
2401 /* update Tx queue numa node */
2402 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2403 (numa_node_id >= 0) ?
2404 numa_node_id : NUMA_NO_NODE);
2405 }
2406
2407 if (!dev_maps)
2408 goto out_no_maps;
2409
2410 /* removes tx-queue from unused CPUs/rx-queues */
2411 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2412 j < nr_ids;) {
2413 for (i = tc, tci = j * num_tc; i--; tci++)
2414 active |= remove_xps_queue(dev_maps, tci, index);
2415 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2416 !netif_attr_test_online(j, online_mask, nr_ids))
2417 active |= remove_xps_queue(dev_maps, tci, index);
2418 for (i = num_tc - tc, tci++; --i; tci++)
2419 active |= remove_xps_queue(dev_maps, tci, index);
2420 }
2421
2422 /* free map if not active */
2423 if (!active) {
2424 if (is_rxqs_map)
2425 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2426 else
2427 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2428 kfree_rcu(dev_maps, rcu);
2429 }
2430
2431out_no_maps:
2432 mutex_unlock(&xps_map_mutex);
2433
2434 return 0;
2435error:
2436 /* remove any maps that we added */
2437 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2438 j < nr_ids;) {
2439 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2440 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2441 map = dev_maps ?
2442 xmap_dereference(dev_maps->attr_map[tci]) :
2443 NULL;
2444 if (new_map && new_map != map)
2445 kfree(new_map);
2446 }
2447 }
2448
2449 mutex_unlock(&xps_map_mutex);
2450
2451 kfree(new_dev_maps);
2452 return -ENOMEM;
2453}
2454EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2455
2456int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2457 u16 index)
2458{
2459 int ret;
2460
2461 cpus_read_lock();
2462 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2463 cpus_read_unlock();
2464
2465 return ret;
2466}
2467EXPORT_SYMBOL(netif_set_xps_queue);
2468
2469#endif
2470static void netdev_unbind_all_sb_channels(struct net_device *dev)
2471{
2472 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2473
2474 /* Unbind any subordinate channels */
2475 while (txq-- != &dev->_tx[0]) {
2476 if (txq->sb_dev)
2477 netdev_unbind_sb_channel(dev, txq->sb_dev);
2478 }
2479}
2480
2481void netdev_reset_tc(struct net_device *dev)
2482{
2483#ifdef CONFIG_XPS
2484 netif_reset_xps_queues_gt(dev, 0);
2485#endif
2486 netdev_unbind_all_sb_channels(dev);
2487
2488 /* Reset TC configuration of device */
2489 dev->num_tc = 0;
2490 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2491 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2492}
2493EXPORT_SYMBOL(netdev_reset_tc);
2494
2495int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2496{
2497 if (tc >= dev->num_tc)
2498 return -EINVAL;
2499
2500#ifdef CONFIG_XPS
2501 netif_reset_xps_queues(dev, offset, count);
2502#endif
2503 dev->tc_to_txq[tc].count = count;
2504 dev->tc_to_txq[tc].offset = offset;
2505 return 0;
2506}
2507EXPORT_SYMBOL(netdev_set_tc_queue);
2508
2509int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2510{
2511 if (num_tc > TC_MAX_QUEUE)
2512 return -EINVAL;
2513
2514#ifdef CONFIG_XPS
2515 netif_reset_xps_queues_gt(dev, 0);
2516#endif
2517 netdev_unbind_all_sb_channels(dev);
2518
2519 dev->num_tc = num_tc;
2520 return 0;
2521}
2522EXPORT_SYMBOL(netdev_set_num_tc);
2523
2524void netdev_unbind_sb_channel(struct net_device *dev,
2525 struct net_device *sb_dev)
2526{
2527 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2528
2529#ifdef CONFIG_XPS
2530 netif_reset_xps_queues_gt(sb_dev, 0);
2531#endif
2532 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2533 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2534
2535 while (txq-- != &dev->_tx[0]) {
2536 if (txq->sb_dev == sb_dev)
2537 txq->sb_dev = NULL;
2538 }
2539}
2540EXPORT_SYMBOL(netdev_unbind_sb_channel);
2541
2542int netdev_bind_sb_channel_queue(struct net_device *dev,
2543 struct net_device *sb_dev,
2544 u8 tc, u16 count, u16 offset)
2545{
2546 /* Make certain the sb_dev and dev are already configured */
2547 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2548 return -EINVAL;
2549
2550 /* We cannot hand out queues we don't have */
2551 if ((offset + count) > dev->real_num_tx_queues)
2552 return -EINVAL;
2553
2554 /* Record the mapping */
2555 sb_dev->tc_to_txq[tc].count = count;
2556 sb_dev->tc_to_txq[tc].offset = offset;
2557
2558 /* Provide a way for Tx queue to find the tc_to_txq map or
2559 * XPS map for itself.
2560 */
2561 while (count--)
2562 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2563
2564 return 0;
2565}
2566EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2567
2568int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2569{
2570 /* Do not use a multiqueue device to represent a subordinate channel */
2571 if (netif_is_multiqueue(dev))
2572 return -ENODEV;
2573
2574 /* We allow channels 1 - 32767 to be used for subordinate channels.
2575 * Channel 0 is meant to be "native" mode and used only to represent
2576 * the main root device. We allow writing 0 to reset the device back
2577 * to normal mode after being used as a subordinate channel.
2578 */
2579 if (channel > S16_MAX)
2580 return -EINVAL;
2581
2582 dev->num_tc = -channel;
2583
2584 return 0;
2585}
2586EXPORT_SYMBOL(netdev_set_sb_channel);
2587
2588/*
2589 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2590 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2591 */
2592int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2593{
2594 bool disabling;
2595 int rc;
2596
2597 disabling = txq < dev->real_num_tx_queues;
2598
2599 if (txq < 1 || txq > dev->num_tx_queues)
2600 return -EINVAL;
2601
2602 if (dev->reg_state == NETREG_REGISTERED ||
2603 dev->reg_state == NETREG_UNREGISTERING) {
2604 ASSERT_RTNL();
2605
2606 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2607 txq);
2608 if (rc)
2609 return rc;
2610
2611 if (dev->num_tc)
2612 netif_setup_tc(dev, txq);
2613
2614 dev->real_num_tx_queues = txq;
2615
2616 if (disabling) {
2617 synchronize_net();
2618 qdisc_reset_all_tx_gt(dev, txq);
2619#ifdef CONFIG_XPS
2620 netif_reset_xps_queues_gt(dev, txq);
2621#endif
2622 }
2623 } else {
2624 dev->real_num_tx_queues = txq;
2625 }
2626
2627 return 0;
2628}
2629EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2630
2631#ifdef CONFIG_SYSFS
2632/**
2633 * netif_set_real_num_rx_queues - set actual number of RX queues used
2634 * @dev: Network device
2635 * @rxq: Actual number of RX queues
2636 *
2637 * This must be called either with the rtnl_lock held or before
2638 * registration of the net device. Returns 0 on success, or a
2639 * negative error code. If called before registration, it always
2640 * succeeds.
2641 */
2642int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2643{
2644 int rc;
2645
2646 if (rxq < 1 || rxq > dev->num_rx_queues)
2647 return -EINVAL;
2648
2649 if (dev->reg_state == NETREG_REGISTERED) {
2650 ASSERT_RTNL();
2651
2652 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2653 rxq);
2654 if (rc)
2655 return rc;
2656 }
2657
2658 dev->real_num_rx_queues = rxq;
2659 return 0;
2660}
2661EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2662#endif
2663
2664/**
2665 * netif_get_num_default_rss_queues - default number of RSS queues
2666 *
2667 * This routine should set an upper limit on the number of RSS queues
2668 * used by default by multiqueue devices.
2669 */
2670int netif_get_num_default_rss_queues(void)
2671{
2672 return is_kdump_kernel() ?
2673 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2674}
2675EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2676
2677static void __netif_reschedule(struct Qdisc *q)
2678{
2679 struct softnet_data *sd;
2680 unsigned long flags;
2681
2682 local_irq_save(flags);
2683 sd = this_cpu_ptr(&softnet_data);
2684 q->next_sched = NULL;
2685 *sd->output_queue_tailp = q;
2686 sd->output_queue_tailp = &q->next_sched;
2687 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2688 local_irq_restore(flags);
2689}
2690
2691void __netif_schedule(struct Qdisc *q)
2692{
2693 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2694 __netif_reschedule(q);
2695}
2696EXPORT_SYMBOL(__netif_schedule);
2697
2698struct dev_kfree_skb_cb {
2699 enum skb_free_reason reason;
2700};
2701
2702static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2703{
2704 return (struct dev_kfree_skb_cb *)skb->cb;
2705}
2706
2707void netif_schedule_queue(struct netdev_queue *txq)
2708{
2709 rcu_read_lock();
2710 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2711 struct Qdisc *q = rcu_dereference(txq->qdisc);
2712
2713 __netif_schedule(q);
2714 }
2715 rcu_read_unlock();
2716}
2717EXPORT_SYMBOL(netif_schedule_queue);
2718
2719void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2720{
2721 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2722 struct Qdisc *q;
2723
2724 rcu_read_lock();
2725 q = rcu_dereference(dev_queue->qdisc);
2726 __netif_schedule(q);
2727 rcu_read_unlock();
2728 }
2729}
2730EXPORT_SYMBOL(netif_tx_wake_queue);
2731
2732void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2733{
2734 unsigned long flags;
2735
2736 if (unlikely(!skb))
2737 return;
2738
2739 if (likely(refcount_read(&skb->users) == 1)) {
2740 smp_rmb();
2741 refcount_set(&skb->users, 0);
2742 } else if (likely(!refcount_dec_and_test(&skb->users))) {
2743 return;
2744 }
2745 get_kfree_skb_cb(skb)->reason = reason;
2746 local_irq_save(flags);
2747 skb->next = __this_cpu_read(softnet_data.completion_queue);
2748 __this_cpu_write(softnet_data.completion_queue, skb);
2749 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2750 local_irq_restore(flags);
2751}
2752EXPORT_SYMBOL(__dev_kfree_skb_irq);
2753
2754void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2755{
2756 if (in_irq() || irqs_disabled())
2757 __dev_kfree_skb_irq(skb, reason);
2758 else
2759 dev_kfree_skb(skb);
2760}
2761EXPORT_SYMBOL(__dev_kfree_skb_any);
2762
2763
2764/**
2765 * netif_device_detach - mark device as removed
2766 * @dev: network device
2767 *
2768 * Mark device as removed from system and therefore no longer available.
2769 */
2770void netif_device_detach(struct net_device *dev)
2771{
2772 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2773 netif_running(dev)) {
2774 netif_tx_stop_all_queues(dev);
2775 }
2776}
2777EXPORT_SYMBOL(netif_device_detach);
2778
2779/**
2780 * netif_device_attach - mark device as attached
2781 * @dev: network device
2782 *
2783 * Mark device as attached from system and restart if needed.
2784 */
2785void netif_device_attach(struct net_device *dev)
2786{
2787 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2788 netif_running(dev)) {
2789 netif_tx_wake_all_queues(dev);
2790 __netdev_watchdog_up(dev);
2791 }
2792}
2793EXPORT_SYMBOL(netif_device_attach);
2794
2795/*
2796 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2797 * to be used as a distribution range.
2798 */
2799static u16 skb_tx_hash(const struct net_device *dev,
2800 const struct net_device *sb_dev,
2801 struct sk_buff *skb)
2802{
2803 u32 hash;
2804 u16 qoffset = 0;
2805 u16 qcount = dev->real_num_tx_queues;
2806
2807 if (dev->num_tc) {
2808 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2809
2810 qoffset = sb_dev->tc_to_txq[tc].offset;
2811 qcount = sb_dev->tc_to_txq[tc].count;
2812 }
2813
2814 if (skb_rx_queue_recorded(skb)) {
2815 hash = skb_get_rx_queue(skb);
2816 while (unlikely(hash >= qcount))
2817 hash -= qcount;
2818 return hash + qoffset;
2819 }
2820
2821 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2822}
2823
2824static void skb_warn_bad_offload(const struct sk_buff *skb)
2825{
2826 static const netdev_features_t null_features;
2827 struct net_device *dev = skb->dev;
2828 const char *name = "";
2829
2830 if (!net_ratelimit())
2831 return;
2832
2833 if (dev) {
2834 if (dev->dev.parent)
2835 name = dev_driver_string(dev->dev.parent);
2836 else
2837 name = netdev_name(dev);
2838 }
2839 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2840 "gso_type=%d ip_summed=%d\n",
2841 name, dev ? &dev->features : &null_features,
2842 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2843 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2844 skb_shinfo(skb)->gso_type, skb->ip_summed);
2845}
2846
2847/*
2848 * Invalidate hardware checksum when packet is to be mangled, and
2849 * complete checksum manually on outgoing path.
2850 */
2851int skb_checksum_help(struct sk_buff *skb)
2852{
2853 __wsum csum;
2854 int ret = 0, offset;
2855
2856 if (skb->ip_summed == CHECKSUM_COMPLETE)
2857 goto out_set_summed;
2858
2859 if (unlikely(skb_shinfo(skb)->gso_size)) {
2860 skb_warn_bad_offload(skb);
2861 return -EINVAL;
2862 }
2863
2864 /* Before computing a checksum, we should make sure no frag could
2865 * be modified by an external entity : checksum could be wrong.
2866 */
2867 if (skb_has_shared_frag(skb)) {
2868 ret = __skb_linearize(skb);
2869 if (ret)
2870 goto out;
2871 }
2872
2873 offset = skb_checksum_start_offset(skb);
2874 BUG_ON(offset >= skb_headlen(skb));
2875 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2876
2877 offset += skb->csum_offset;
2878 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2879
2880 if (skb_cloned(skb) &&
2881 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2882 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2883 if (ret)
2884 goto out;
2885 }
2886
2887 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2888out_set_summed:
2889 skb->ip_summed = CHECKSUM_NONE;
2890out:
2891 return ret;
2892}
2893EXPORT_SYMBOL(skb_checksum_help);
2894
2895int skb_crc32c_csum_help(struct sk_buff *skb)
2896{
2897 __le32 crc32c_csum;
2898 int ret = 0, offset, start;
2899
2900 if (skb->ip_summed != CHECKSUM_PARTIAL)
2901 goto out;
2902
2903 if (unlikely(skb_is_gso(skb)))
2904 goto out;
2905
2906 /* Before computing a checksum, we should make sure no frag could
2907 * be modified by an external entity : checksum could be wrong.
2908 */
2909 if (unlikely(skb_has_shared_frag(skb))) {
2910 ret = __skb_linearize(skb);
2911 if (ret)
2912 goto out;
2913 }
2914 start = skb_checksum_start_offset(skb);
2915 offset = start + offsetof(struct sctphdr, checksum);
2916 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2917 ret = -EINVAL;
2918 goto out;
2919 }
2920 if (skb_cloned(skb) &&
2921 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2922 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2923 if (ret)
2924 goto out;
2925 }
2926 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2927 skb->len - start, ~(__u32)0,
2928 crc32c_csum_stub));
2929 *(__le32 *)(skb->data + offset) = crc32c_csum;
2930 skb->ip_summed = CHECKSUM_NONE;
2931 skb->csum_not_inet = 0;
2932out:
2933 return ret;
2934}
2935
2936__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2937{
2938 __be16 type = skb->protocol;
2939
2940 /* Tunnel gso handlers can set protocol to ethernet. */
2941 if (type == htons(ETH_P_TEB)) {
2942 struct ethhdr *eth;
2943
2944 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2945 return 0;
2946
2947 eth = (struct ethhdr *)skb->data;
2948 type = eth->h_proto;
2949 }
2950
2951 return __vlan_get_protocol(skb, type, depth);
2952}
2953
2954/**
2955 * skb_mac_gso_segment - mac layer segmentation handler.
2956 * @skb: buffer to segment
2957 * @features: features for the output path (see dev->features)
2958 */
2959struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2960 netdev_features_t features)
2961{
2962 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2963 struct packet_offload *ptype;
2964 int vlan_depth = skb->mac_len;
2965 __be16 type = skb_network_protocol(skb, &vlan_depth);
2966
2967 if (unlikely(!type))
2968 return ERR_PTR(-EINVAL);
2969
2970 __skb_pull(skb, vlan_depth);
2971
2972 rcu_read_lock();
2973 list_for_each_entry_rcu(ptype, &offload_base, list) {
2974 if (ptype->type == type && ptype->callbacks.gso_segment) {
2975 segs = ptype->callbacks.gso_segment(skb, features);
2976 break;
2977 }
2978 }
2979 rcu_read_unlock();
2980
2981 __skb_push(skb, skb->data - skb_mac_header(skb));
2982
2983 return segs;
2984}
2985EXPORT_SYMBOL(skb_mac_gso_segment);
2986
2987
2988/* openvswitch calls this on rx path, so we need a different check.
2989 */
2990static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2991{
2992 if (tx_path)
2993 return skb->ip_summed != CHECKSUM_PARTIAL &&
2994 skb->ip_summed != CHECKSUM_UNNECESSARY;
2995
2996 return skb->ip_summed == CHECKSUM_NONE;
2997}
2998
2999/**
3000 * __skb_gso_segment - Perform segmentation on skb.
3001 * @skb: buffer to segment
3002 * @features: features for the output path (see dev->features)
3003 * @tx_path: whether it is called in TX path
3004 *
3005 * This function segments the given skb and returns a list of segments.
3006 *
3007 * It may return NULL if the skb requires no segmentation. This is
3008 * only possible when GSO is used for verifying header integrity.
3009 *
3010 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
3011 */
3012struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3013 netdev_features_t features, bool tx_path)
3014{
3015 struct sk_buff *segs;
3016
3017 if (unlikely(skb_needs_check(skb, tx_path))) {
3018 int err;
3019
3020 /* We're going to init ->check field in TCP or UDP header */
3021 err = skb_cow_head(skb, 0);
3022 if (err < 0)
3023 return ERR_PTR(err);
3024 }
3025
3026 /* Only report GSO partial support if it will enable us to
3027 * support segmentation on this frame without needing additional
3028 * work.
3029 */
3030 if (features & NETIF_F_GSO_PARTIAL) {
3031 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3032 struct net_device *dev = skb->dev;
3033
3034 partial_features |= dev->features & dev->gso_partial_features;
3035 if (!skb_gso_ok(skb, features | partial_features))
3036 features &= ~NETIF_F_GSO_PARTIAL;
3037 }
3038
3039 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3040 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3041
3042 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3043 SKB_GSO_CB(skb)->encap_level = 0;
3044
3045 skb_reset_mac_header(skb);
3046 skb_reset_mac_len(skb);
3047
3048 segs = skb_mac_gso_segment(skb, features);
3049
3050 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3051 skb_warn_bad_offload(skb);
3052
3053 return segs;
3054}
3055EXPORT_SYMBOL(__skb_gso_segment);
3056
3057/* Take action when hardware reception checksum errors are detected. */
3058#ifdef CONFIG_BUG
3059void netdev_rx_csum_fault(struct net_device *dev)
3060{
3061 if (net_ratelimit()) {
3062 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3063 dump_stack();
3064 }
3065}
3066EXPORT_SYMBOL(netdev_rx_csum_fault);
3067#endif
3068
3069/* XXX: check that highmem exists at all on the given machine. */
3070static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3071{
3072#ifdef CONFIG_HIGHMEM
3073 int i;
3074
3075 if (!(dev->features & NETIF_F_HIGHDMA)) {
3076 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3077 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3078
3079 if (PageHighMem(skb_frag_page(frag)))
3080 return 1;
3081 }
3082 }
3083#endif
3084 return 0;
3085}
3086
3087/* If MPLS offload request, verify we are testing hardware MPLS features
3088 * instead of standard features for the netdev.
3089 */
3090#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3091static netdev_features_t net_mpls_features(struct sk_buff *skb,
3092 netdev_features_t features,
3093 __be16 type)
3094{
3095 if (eth_p_mpls(type))
3096 features &= skb->dev->mpls_features;
3097
3098 return features;
3099}
3100#else
3101static netdev_features_t net_mpls_features(struct sk_buff *skb,
3102 netdev_features_t features,
3103 __be16 type)
3104{
3105 return features;
3106}
3107#endif
3108
3109static netdev_features_t harmonize_features(struct sk_buff *skb,
3110 netdev_features_t features)
3111{
3112 int tmp;
3113 __be16 type;
3114
3115 type = skb_network_protocol(skb, &tmp);
3116 features = net_mpls_features(skb, features, type);
3117
3118 if (skb->ip_summed != CHECKSUM_NONE &&
3119 !can_checksum_protocol(features, type)) {
3120 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3121 }
3122 if (illegal_highdma(skb->dev, skb))
3123 features &= ~NETIF_F_SG;
3124
3125 return features;
3126}
3127
3128netdev_features_t passthru_features_check(struct sk_buff *skb,
3129 struct net_device *dev,
3130 netdev_features_t features)
3131{
3132 return features;
3133}
3134EXPORT_SYMBOL(passthru_features_check);
3135
3136static netdev_features_t dflt_features_check(struct sk_buff *skb,
3137 struct net_device *dev,
3138 netdev_features_t features)
3139{
3140 return vlan_features_check(skb, features);
3141}
3142
3143static netdev_features_t gso_features_check(const struct sk_buff *skb,
3144 struct net_device *dev,
3145 netdev_features_t features)
3146{
3147 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3148
3149 if (gso_segs > dev->gso_max_segs)
3150 return features & ~NETIF_F_GSO_MASK;
3151
3152 /* Support for GSO partial features requires software
3153 * intervention before we can actually process the packets
3154 * so we need to strip support for any partial features now
3155 * and we can pull them back in after we have partially
3156 * segmented the frame.
3157 */
3158 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3159 features &= ~dev->gso_partial_features;
3160
3161 /* Make sure to clear the IPv4 ID mangling feature if the
3162 * IPv4 header has the potential to be fragmented.
3163 */
3164 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3165 struct iphdr *iph = skb->encapsulation ?
3166 inner_ip_hdr(skb) : ip_hdr(skb);
3167
3168 if (!(iph->frag_off & htons(IP_DF)))
3169 features &= ~NETIF_F_TSO_MANGLEID;
3170 }
3171
3172 return features;
3173}
3174
3175netdev_features_t netif_skb_features(struct sk_buff *skb)
3176{
3177 struct net_device *dev = skb->dev;
3178 netdev_features_t features = dev->features;
3179
3180 if (skb_is_gso(skb))
3181 features = gso_features_check(skb, dev, features);
3182
3183 /* If encapsulation offload request, verify we are testing
3184 * hardware encapsulation features instead of standard
3185 * features for the netdev
3186 */
3187 if (skb->encapsulation)
3188 features &= dev->hw_enc_features;
3189
3190 if (skb_vlan_tagged(skb))
3191 features = netdev_intersect_features(features,
3192 dev->vlan_features |
3193 NETIF_F_HW_VLAN_CTAG_TX |
3194 NETIF_F_HW_VLAN_STAG_TX);
3195
3196 if (dev->netdev_ops->ndo_features_check)
3197 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3198 features);
3199 else
3200 features &= dflt_features_check(skb, dev, features);
3201
3202 return harmonize_features(skb, features);
3203}
3204EXPORT_SYMBOL(netif_skb_features);
3205
3206static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3207 struct netdev_queue *txq, bool more)
3208{
3209 unsigned int len;
3210 int rc;
3211
3212 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
3213 dev_queue_xmit_nit(skb, dev);
3214
3215 len = skb->len;
3216 trace_net_dev_start_xmit(skb, dev);
3217 rc = netdev_start_xmit(skb, dev, txq, more);
3218 trace_net_dev_xmit(skb, rc, dev, len);
3219
3220 return rc;
3221}
3222
3223struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3224 struct netdev_queue *txq, int *ret)
3225{
3226 struct sk_buff *skb = first;
3227 int rc = NETDEV_TX_OK;
3228
3229 while (skb) {
3230 struct sk_buff *next = skb->next;
3231
3232 skb->next = NULL;
3233 rc = xmit_one(skb, dev, txq, next != NULL);
3234 if (unlikely(!dev_xmit_complete(rc))) {
3235 skb->next = next;
3236 goto out;
3237 }
3238
3239 skb = next;
3240 if (netif_xmit_stopped(txq) && skb) {
3241 rc = NETDEV_TX_BUSY;
3242 break;
3243 }
3244 }
3245
3246out:
3247 *ret = rc;
3248 return skb;
3249}
3250
3251static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3252 netdev_features_t features)
3253{
3254 if (skb_vlan_tag_present(skb) &&
3255 !vlan_hw_offload_capable(features, skb->vlan_proto))
3256 skb = __vlan_hwaccel_push_inside(skb);
3257 return skb;
3258}
3259
3260int skb_csum_hwoffload_help(struct sk_buff *skb,
3261 const netdev_features_t features)
3262{
3263 if (unlikely(skb->csum_not_inet))
3264 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3265 skb_crc32c_csum_help(skb);
3266
3267 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3268}
3269EXPORT_SYMBOL(skb_csum_hwoffload_help);
3270
3271static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3272{
3273 netdev_features_t features;
3274
3275 features = netif_skb_features(skb);
3276 skb = validate_xmit_vlan(skb, features);
3277 if (unlikely(!skb))
3278 goto out_null;
3279
3280 skb = sk_validate_xmit_skb(skb, dev);
3281 if (unlikely(!skb))
3282 goto out_null;
3283
3284 if (netif_needs_gso(skb, features)) {
3285 struct sk_buff *segs;
3286
3287 segs = skb_gso_segment(skb, features);
3288 if (IS_ERR(segs)) {
3289 goto out_kfree_skb;
3290 } else if (segs) {
3291 consume_skb(skb);
3292 skb = segs;
3293 }
3294 } else {
3295 if (skb_needs_linearize(skb, features) &&
3296 __skb_linearize(skb))
3297 goto out_kfree_skb;
3298
3299 /* If packet is not checksummed and device does not
3300 * support checksumming for this protocol, complete
3301 * checksumming here.
3302 */
3303 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3304 if (skb->encapsulation)
3305 skb_set_inner_transport_header(skb,
3306 skb_checksum_start_offset(skb));
3307 else
3308 skb_set_transport_header(skb,
3309 skb_checksum_start_offset(skb));
3310 if (skb_csum_hwoffload_help(skb, features))
3311 goto out_kfree_skb;
3312 }
3313 }
3314
3315 skb = validate_xmit_xfrm(skb, features, again);
3316
3317 return skb;
3318
3319out_kfree_skb:
3320 kfree_skb(skb);
3321out_null:
3322 atomic_long_inc(&dev->tx_dropped);
3323 return NULL;
3324}
3325
3326struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3327{
3328 struct sk_buff *next, *head = NULL, *tail;
3329
3330 for (; skb != NULL; skb = next) {
3331 next = skb->next;
3332 skb->next = NULL;
3333
3334 /* in case skb wont be segmented, point to itself */
3335 skb->prev = skb;
3336
3337 skb = validate_xmit_skb(skb, dev, again);
3338 if (!skb)
3339 continue;
3340
3341 if (!head)
3342 head = skb;
3343 else
3344 tail->next = skb;
3345 /* If skb was segmented, skb->prev points to
3346 * the last segment. If not, it still contains skb.
3347 */
3348 tail = skb->prev;
3349 }
3350 return head;
3351}
3352EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3353
3354static void qdisc_pkt_len_init(struct sk_buff *skb)
3355{
3356 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3357
3358 qdisc_skb_cb(skb)->pkt_len = skb->len;
3359
3360 /* To get more precise estimation of bytes sent on wire,
3361 * we add to pkt_len the headers size of all segments
3362 */
3363 if (shinfo->gso_size) {
3364 unsigned int hdr_len;
3365 u16 gso_segs = shinfo->gso_segs;
3366
3367 /* mac layer + network layer */
3368 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3369
3370 /* + transport layer */
3371 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3372 const struct tcphdr *th;
3373 struct tcphdr _tcphdr;
3374
3375 th = skb_header_pointer(skb, skb_transport_offset(skb),
3376 sizeof(_tcphdr), &_tcphdr);
3377 if (likely(th))
3378 hdr_len += __tcp_hdrlen(th);
3379 } else {
3380 struct udphdr _udphdr;
3381
3382 if (skb_header_pointer(skb, skb_transport_offset(skb),
3383 sizeof(_udphdr), &_udphdr))
3384 hdr_len += sizeof(struct udphdr);
3385 }
3386
3387 if (shinfo->gso_type & SKB_GSO_DODGY)
3388 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3389 shinfo->gso_size);
3390
3391 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3392 }
3393}
3394
3395static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3396 struct net_device *dev,
3397 struct netdev_queue *txq)
3398{
3399 spinlock_t *root_lock = qdisc_lock(q);
3400 struct sk_buff *to_free = NULL;
3401 bool contended;
3402 int rc;
3403
3404 qdisc_calculate_pkt_len(skb, q);
3405
3406 if (q->flags & TCQ_F_NOLOCK) {
3407 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3408 __qdisc_drop(skb, &to_free);
3409 rc = NET_XMIT_DROP;
3410 } else {
3411 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3412 qdisc_run(q);
3413 }
3414
3415 if (unlikely(to_free))
3416 kfree_skb_list(to_free);
3417 return rc;
3418 }
3419
3420 /*
3421 * Heuristic to force contended enqueues to serialize on a
3422 * separate lock before trying to get qdisc main lock.
3423 * This permits qdisc->running owner to get the lock more
3424 * often and dequeue packets faster.
3425 */
3426 contended = qdisc_is_running(q);
3427 if (unlikely(contended))
3428 spin_lock(&q->busylock);
3429
3430 spin_lock(root_lock);
3431 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3432 __qdisc_drop(skb, &to_free);
3433 rc = NET_XMIT_DROP;
3434 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3435 qdisc_run_begin(q)) {
3436 /*
3437 * This is a work-conserving queue; there are no old skbs
3438 * waiting to be sent out; and the qdisc is not running -
3439 * xmit the skb directly.
3440 */
3441
3442 qdisc_bstats_update(q, skb);
3443
3444 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3445 if (unlikely(contended)) {
3446 spin_unlock(&q->busylock);
3447 contended = false;
3448 }
3449 __qdisc_run(q);
3450 }
3451
3452 qdisc_run_end(q);
3453 rc = NET_XMIT_SUCCESS;
3454 } else {
3455 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3456 if (qdisc_run_begin(q)) {
3457 if (unlikely(contended)) {
3458 spin_unlock(&q->busylock);
3459 contended = false;
3460 }
3461 __qdisc_run(q);
3462 qdisc_run_end(q);
3463 }
3464 }
3465 spin_unlock(root_lock);
3466 if (unlikely(to_free))
3467 kfree_skb_list(to_free);
3468 if (unlikely(contended))
3469 spin_unlock(&q->busylock);
3470 return rc;
3471}
3472
3473#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3474static void skb_update_prio(struct sk_buff *skb)
3475{
3476 const struct netprio_map *map;
3477 const struct sock *sk;
3478 unsigned int prioidx;
3479
3480 if (skb->priority)
3481 return;
3482 map = rcu_dereference_bh(skb->dev->priomap);
3483 if (!map)
3484 return;
3485 sk = skb_to_full_sk(skb);
3486 if (!sk)
3487 return;
3488
3489 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3490
3491 if (prioidx < map->priomap_len)
3492 skb->priority = map->priomap[prioidx];
3493}
3494#else
3495#define skb_update_prio(skb)
3496#endif
3497
3498DEFINE_PER_CPU(int, xmit_recursion);
3499EXPORT_SYMBOL(xmit_recursion);
3500
3501/**
3502 * dev_loopback_xmit - loop back @skb
3503 * @net: network namespace this loopback is happening in
3504 * @sk: sk needed to be a netfilter okfn
3505 * @skb: buffer to transmit
3506 */
3507int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3508{
3509 skb_reset_mac_header(skb);
3510 __skb_pull(skb, skb_network_offset(skb));
3511 skb->pkt_type = PACKET_LOOPBACK;
3512 skb->ip_summed = CHECKSUM_UNNECESSARY;
3513 WARN_ON(!skb_dst(skb));
3514 skb_dst_force(skb);
3515 netif_rx_ni(skb);
3516 return 0;
3517}
3518EXPORT_SYMBOL(dev_loopback_xmit);
3519
3520#ifdef CONFIG_NET_EGRESS
3521static struct sk_buff *
3522sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3523{
3524 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3525 struct tcf_result cl_res;
3526
3527 if (!miniq)
3528 return skb;
3529
3530 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3531 mini_qdisc_bstats_cpu_update(miniq, skb);
3532
3533 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3534 case TC_ACT_OK:
3535 case TC_ACT_RECLASSIFY:
3536 skb->tc_index = TC_H_MIN(cl_res.classid);
3537 break;
3538 case TC_ACT_SHOT:
3539 mini_qdisc_qstats_cpu_drop(miniq);
3540 *ret = NET_XMIT_DROP;
3541 kfree_skb(skb);
3542 return NULL;
3543 case TC_ACT_STOLEN:
3544 case TC_ACT_QUEUED:
3545 case TC_ACT_TRAP:
3546 *ret = NET_XMIT_SUCCESS;
3547 consume_skb(skb);
3548 return NULL;
3549 case TC_ACT_REDIRECT:
3550 /* No need to push/pop skb's mac_header here on egress! */
3551 skb_do_redirect(skb);
3552 *ret = NET_XMIT_SUCCESS;
3553 return NULL;
3554 default:
3555 break;
3556 }
3557
3558 return skb;
3559}
3560#endif /* CONFIG_NET_EGRESS */
3561
3562#ifdef CONFIG_XPS
3563static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3564 struct xps_dev_maps *dev_maps, unsigned int tci)
3565{
3566 struct xps_map *map;
3567 int queue_index = -1;
3568
3569 if (dev->num_tc) {
3570 tci *= dev->num_tc;
3571 tci += netdev_get_prio_tc_map(dev, skb->priority);
3572 }
3573
3574 map = rcu_dereference(dev_maps->attr_map[tci]);
3575 if (map) {
3576 if (map->len == 1)
3577 queue_index = map->queues[0];
3578 else
3579 queue_index = map->queues[reciprocal_scale(
3580 skb_get_hash(skb), map->len)];
3581 if (unlikely(queue_index >= dev->real_num_tx_queues))
3582 queue_index = -1;
3583 }
3584 return queue_index;
3585}
3586#endif
3587
3588static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3589 struct sk_buff *skb)
3590{
3591#ifdef CONFIG_XPS
3592 struct xps_dev_maps *dev_maps;
3593 struct sock *sk = skb->sk;
3594 int queue_index = -1;
3595
3596 if (!static_key_false(&xps_needed))
3597 return -1;
3598
3599 rcu_read_lock();
3600 if (!static_key_false(&xps_rxqs_needed))
3601 goto get_cpus_map;
3602
3603 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3604 if (dev_maps) {
3605 int tci = sk_rx_queue_get(sk);
3606
3607 if (tci >= 0 && tci < dev->num_rx_queues)
3608 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3609 tci);
3610 }
3611
3612get_cpus_map:
3613 if (queue_index < 0) {
3614 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3615 if (dev_maps) {
3616 unsigned int tci = skb->sender_cpu - 1;
3617
3618 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3619 tci);
3620 }
3621 }
3622 rcu_read_unlock();
3623
3624 return queue_index;
3625#else
3626 return -1;
3627#endif
3628}
3629
3630u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3631 struct net_device *sb_dev,
3632 select_queue_fallback_t fallback)
3633{
3634 return 0;
3635}
3636EXPORT_SYMBOL(dev_pick_tx_zero);
3637
3638u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3639 struct net_device *sb_dev,
3640 select_queue_fallback_t fallback)
3641{
3642 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3643}
3644EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3645
3646static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3647 struct net_device *sb_dev)
3648{
3649 struct sock *sk = skb->sk;
3650 int queue_index = sk_tx_queue_get(sk);
3651
3652 sb_dev = sb_dev ? : dev;
3653
3654 if (queue_index < 0 || skb->ooo_okay ||
3655 queue_index >= dev->real_num_tx_queues) {
3656 int new_index = get_xps_queue(dev, sb_dev, skb);
3657
3658 if (new_index < 0)
3659 new_index = skb_tx_hash(dev, sb_dev, skb);
3660
3661 if (queue_index != new_index && sk &&
3662 sk_fullsock(sk) &&
3663 rcu_access_pointer(sk->sk_dst_cache))
3664 sk_tx_queue_set(sk, new_index);
3665
3666 queue_index = new_index;
3667 }
3668
3669 return queue_index;
3670}
3671
3672struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3673 struct sk_buff *skb,
3674 struct net_device *sb_dev)
3675{
3676 int queue_index = 0;
3677
3678#ifdef CONFIG_XPS
3679 u32 sender_cpu = skb->sender_cpu - 1;
3680
3681 if (sender_cpu >= (u32)NR_CPUS)
3682 skb->sender_cpu = raw_smp_processor_id() + 1;
3683#endif
3684
3685 if (dev->real_num_tx_queues != 1) {
3686 const struct net_device_ops *ops = dev->netdev_ops;
3687
3688 if (ops->ndo_select_queue)
3689 queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
3690 __netdev_pick_tx);
3691 else
3692 queue_index = __netdev_pick_tx(dev, skb, sb_dev);
3693
3694 queue_index = netdev_cap_txqueue(dev, queue_index);
3695 }
3696
3697 skb_set_queue_mapping(skb, queue_index);
3698 return netdev_get_tx_queue(dev, queue_index);
3699}
3700
3701/**
3702 * __dev_queue_xmit - transmit a buffer
3703 * @skb: buffer to transmit
3704 * @sb_dev: suboordinate device used for L2 forwarding offload
3705 *
3706 * Queue a buffer for transmission to a network device. The caller must
3707 * have set the device and priority and built the buffer before calling
3708 * this function. The function can be called from an interrupt.
3709 *
3710 * A negative errno code is returned on a failure. A success does not
3711 * guarantee the frame will be transmitted as it may be dropped due
3712 * to congestion or traffic shaping.
3713 *
3714 * -----------------------------------------------------------------------------------
3715 * I notice this method can also return errors from the queue disciplines,
3716 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3717 * be positive.
3718 *
3719 * Regardless of the return value, the skb is consumed, so it is currently
3720 * difficult to retry a send to this method. (You can bump the ref count
3721 * before sending to hold a reference for retry if you are careful.)
3722 *
3723 * When calling this method, interrupts MUST be enabled. This is because
3724 * the BH enable code must have IRQs enabled so that it will not deadlock.
3725 * --BLG
3726 */
3727static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3728{
3729 struct net_device *dev = skb->dev;
3730 struct netdev_queue *txq;
3731 struct Qdisc *q;
3732 int rc = -ENOMEM;
3733 bool again = false;
3734
3735 skb_reset_mac_header(skb);
3736
3737 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3738 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3739
3740 /* Disable soft irqs for various locks below. Also
3741 * stops preemption for RCU.
3742 */
3743 rcu_read_lock_bh();
3744
3745 skb_update_prio(skb);
3746
3747 qdisc_pkt_len_init(skb);
3748#ifdef CONFIG_NET_CLS_ACT
3749 skb->tc_at_ingress = 0;
3750# ifdef CONFIG_NET_EGRESS
3751 if (static_branch_unlikely(&egress_needed_key)) {
3752 skb = sch_handle_egress(skb, &rc, dev);
3753 if (!skb)
3754 goto out;
3755 }
3756# endif
3757#endif
3758 /* If device/qdisc don't need skb->dst, release it right now while
3759 * its hot in this cpu cache.
3760 */
3761 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3762 skb_dst_drop(skb);
3763 else
3764 skb_dst_force(skb);
3765
3766 txq = netdev_pick_tx(dev, skb, sb_dev);
3767 q = rcu_dereference_bh(txq->qdisc);
3768
3769 trace_net_dev_queue(skb);
3770 if (q->enqueue) {
3771 rc = __dev_xmit_skb(skb, q, dev, txq);
3772 goto out;
3773 }
3774
3775 /* The device has no queue. Common case for software devices:
3776 * loopback, all the sorts of tunnels...
3777
3778 * Really, it is unlikely that netif_tx_lock protection is necessary
3779 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3780 * counters.)
3781 * However, it is possible, that they rely on protection
3782 * made by us here.
3783
3784 * Check this and shot the lock. It is not prone from deadlocks.
3785 *Either shot noqueue qdisc, it is even simpler 8)
3786 */
3787 if (dev->flags & IFF_UP) {
3788 int cpu = smp_processor_id(); /* ok because BHs are off */
3789
3790 if (txq->xmit_lock_owner != cpu) {
3791 if (unlikely(__this_cpu_read(xmit_recursion) >
3792 XMIT_RECURSION_LIMIT))
3793 goto recursion_alert;
3794
3795 skb = validate_xmit_skb(skb, dev, &again);
3796 if (!skb)
3797 goto out;
3798
3799 HARD_TX_LOCK(dev, txq, cpu);
3800
3801 if (!netif_xmit_stopped(txq)) {
3802 __this_cpu_inc(xmit_recursion);
3803 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3804