1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET 802.1Q VLAN
4 * Ethernet-type device handling.
5 *
6 * Authors: Ben Greear <greearb@candelatech.com>
7 * Please send support related email to: netdev@vger.kernel.org
8 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
9 *
10 * Fixes:
11 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
12 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
13 * Correct all the locking - David S. Miller <davem@redhat.com>;
14 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/capability.h>
20#include <linux/module.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/rculist.h>
26#include <net/p8022.h>
27#include <net/arp.h>
28#include <linux/rtnetlink.h>
29#include <linux/notifier.h>
30#include <net/rtnetlink.h>
31#include <net/net_namespace.h>
32#include <net/netns/generic.h>
33#include <linux/uaccess.h>
34
35#include <linux/if_vlan.h>
36#include "vlan.h"
37#include "vlanproc.h"
38
39#define DRV_VERSION "1.8"
40
41/* Global VLAN variables */
42
43unsigned int vlan_net_id __read_mostly;
44
45const char vlan_fullname[] = "802.1Q VLAN Support";
46const char vlan_version[] = DRV_VERSION;
47
48/* End of global variables definitions. */
49
50static int vlan_group_prealloc_vid(struct vlan_group *vg,
51 __be16 vlan_proto, u16 vlan_id)
52{
53 struct net_device **array;
54 unsigned int vidx;
55 unsigned int size;
56 int pidx;
57
58 ASSERT_RTNL();
59
60 pidx = vlan_proto_idx(proto: vlan_proto);
61 if (pidx < 0)
62 return -EINVAL;
63
64 vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
65 array = vg->vlan_devices_arrays[pidx][vidx];
66 if (array != NULL)
67 return 0;
68
69 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
70 array = kzalloc(size, GFP_KERNEL_ACCOUNT);
71 if (array == NULL)
72 return -ENOBUFS;
73
74 /* paired with smp_rmb() in __vlan_group_get_device() */
75 smp_wmb();
76
77 vg->vlan_devices_arrays[pidx][vidx] = array;
78 return 0;
79}
80
81static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
82 struct net_device *dev,
83 struct vlan_dev_priv *vlan)
84{
85 if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
86 netif_stacked_transfer_operstate(rootdev, dev);
87}
88
89void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
90{
91 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
92 struct net_device *real_dev = vlan->real_dev;
93 struct vlan_info *vlan_info;
94 struct vlan_group *grp;
95 u16 vlan_id = vlan->vlan_id;
96
97 ASSERT_RTNL();
98
99 vlan_info = rtnl_dereference(real_dev->vlan_info);
100 BUG_ON(!vlan_info);
101
102 grp = &vlan_info->grp;
103
104 grp->nr_vlan_devs--;
105
106 if (vlan->flags & VLAN_FLAG_MVRP)
107 vlan_mvrp_request_leave(dev);
108 if (vlan->flags & VLAN_FLAG_GVRP)
109 vlan_gvrp_request_leave(dev);
110
111 vlan_group_set_device(vg: grp, vlan_proto: vlan->vlan_proto, vlan_id, NULL);
112
113 netdev_upper_dev_unlink(dev: real_dev, upper_dev: dev);
114 /* Because unregister_netdevice_queue() makes sure at least one rcu
115 * grace period is respected before device freeing,
116 * we dont need to call synchronize_net() here.
117 */
118 unregister_netdevice_queue(dev, head);
119
120 if (grp->nr_vlan_devs == 0) {
121 vlan_mvrp_uninit_applicant(dev: real_dev);
122 vlan_gvrp_uninit_applicant(dev: real_dev);
123 }
124
125 vlan_vid_del(dev: real_dev, proto: vlan->vlan_proto, vid: vlan_id);
126}
127
128int vlan_check_real_dev(struct net_device *real_dev,
129 __be16 protocol, u16 vlan_id,
130 struct netlink_ext_ack *extack)
131{
132 const char *name = real_dev->name;
133
134 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
135 pr_info("VLANs not supported on %s\n", name);
136 NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
137 return -EOPNOTSUPP;
138 }
139
140 if (vlan_find_dev(real_dev, vlan_proto: protocol, vlan_id) != NULL) {
141 NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
142 return -EEXIST;
143 }
144
145 return 0;
146}
147
148int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
149{
150 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
151 struct net_device *real_dev = vlan->real_dev;
152 u16 vlan_id = vlan->vlan_id;
153 struct vlan_info *vlan_info;
154 struct vlan_group *grp;
155 int err;
156
157 err = vlan_vid_add(dev: real_dev, proto: vlan->vlan_proto, vid: vlan_id);
158 if (err)
159 return err;
160
161 vlan_info = rtnl_dereference(real_dev->vlan_info);
162 /* vlan_info should be there now. vlan_vid_add took care of it */
163 BUG_ON(!vlan_info);
164
165 grp = &vlan_info->grp;
166 if (grp->nr_vlan_devs == 0) {
167 err = vlan_gvrp_init_applicant(dev: real_dev);
168 if (err < 0)
169 goto out_vid_del;
170 err = vlan_mvrp_init_applicant(dev: real_dev);
171 if (err < 0)
172 goto out_uninit_gvrp;
173 }
174
175 err = vlan_group_prealloc_vid(vg: grp, vlan_proto: vlan->vlan_proto, vlan_id);
176 if (err < 0)
177 goto out_uninit_mvrp;
178
179 err = register_netdevice(dev);
180 if (err < 0)
181 goto out_uninit_mvrp;
182
183 err = netdev_upper_dev_link(dev: real_dev, upper_dev: dev, extack);
184 if (err)
185 goto out_unregister_netdev;
186
187 vlan_stacked_transfer_operstate(rootdev: real_dev, dev, vlan);
188 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
189
190 /* So, got the sucker initialized, now lets place
191 * it into our local structure.
192 */
193 vlan_group_set_device(vg: grp, vlan_proto: vlan->vlan_proto, vlan_id, dev);
194 grp->nr_vlan_devs++;
195
196 return 0;
197
198out_unregister_netdev:
199 unregister_netdevice(dev);
200out_uninit_mvrp:
201 if (grp->nr_vlan_devs == 0)
202 vlan_mvrp_uninit_applicant(dev: real_dev);
203out_uninit_gvrp:
204 if (grp->nr_vlan_devs == 0)
205 vlan_gvrp_uninit_applicant(dev: real_dev);
206out_vid_del:
207 vlan_vid_del(dev: real_dev, proto: vlan->vlan_proto, vid: vlan_id);
208 return err;
209}
210
211/* Attach a VLAN device to a mac address (ie Ethernet Card).
212 * Returns 0 if the device was created or a negative error code otherwise.
213 */
214static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
215{
216 struct net_device *new_dev;
217 struct vlan_dev_priv *vlan;
218 struct net *net = dev_net(dev: real_dev);
219 struct vlan_net *vn = net_generic(net, id: vlan_net_id);
220 char name[IFNAMSIZ];
221 int err;
222
223 if (vlan_id >= VLAN_VID_MASK)
224 return -ERANGE;
225
226 err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
227 NULL);
228 if (err < 0)
229 return err;
230
231 /* Gotta set up the fields for the device. */
232 switch (vn->name_type) {
233 case VLAN_NAME_TYPE_RAW_PLUS_VID:
234 /* name will look like: eth1.0005 */
235 snprintf(buf: name, IFNAMSIZ, fmt: "%s.%.4i", real_dev->name, vlan_id);
236 break;
237 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
238 /* Put our vlan.VID in the name.
239 * Name will look like: vlan5
240 */
241 snprintf(buf: name, IFNAMSIZ, fmt: "vlan%i", vlan_id);
242 break;
243 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
244 /* Put our vlan.VID in the name.
245 * Name will look like: eth0.5
246 */
247 snprintf(buf: name, IFNAMSIZ, fmt: "%s.%i", real_dev->name, vlan_id);
248 break;
249 case VLAN_NAME_TYPE_PLUS_VID:
250 /* Put our vlan.VID in the name.
251 * Name will look like: vlan0005
252 */
253 default:
254 snprintf(buf: name, IFNAMSIZ, fmt: "vlan%.4i", vlan_id);
255 }
256
257 new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
258 NET_NAME_UNKNOWN, vlan_setup);
259
260 if (new_dev == NULL)
261 return -ENOBUFS;
262
263 dev_net_set(dev: new_dev, net);
264 /* need 4 bytes for extra VLAN header info,
265 * hope the underlying device can handle it.
266 */
267 new_dev->mtu = real_dev->mtu;
268
269 vlan = vlan_dev_priv(dev: new_dev);
270 vlan->vlan_proto = htons(ETH_P_8021Q);
271 vlan->vlan_id = vlan_id;
272 vlan->real_dev = real_dev;
273 vlan->dent = NULL;
274 vlan->flags = VLAN_FLAG_REORDER_HDR;
275
276 new_dev->rtnl_link_ops = &vlan_link_ops;
277 err = register_vlan_dev(dev: new_dev, NULL);
278 if (err < 0)
279 goto out_free_newdev;
280
281 return 0;
282
283out_free_newdev:
284 free_netdev(dev: new_dev);
285 return err;
286}
287
288static void vlan_sync_address(struct net_device *dev,
289 struct net_device *vlandev)
290{
291 struct vlan_dev_priv *vlan = vlan_dev_priv(dev: vlandev);
292
293 /* May be called without an actual change */
294 if (ether_addr_equal(addr1: vlan->real_dev_addr, addr2: dev->dev_addr))
295 return;
296
297 /* vlan continues to inherit address of lower device */
298 if (vlan_dev_inherit_address(dev: vlandev, real_dev: dev))
299 goto out;
300
301 /* vlan address was different from the old address and is equal to
302 * the new address */
303 if (!ether_addr_equal(addr1: vlandev->dev_addr, addr2: vlan->real_dev_addr) &&
304 ether_addr_equal(addr1: vlandev->dev_addr, addr2: dev->dev_addr))
305 dev_uc_del(dev, addr: vlandev->dev_addr);
306
307 /* vlan address was equal to the old address and is different from
308 * the new address */
309 if (ether_addr_equal(addr1: vlandev->dev_addr, addr2: vlan->real_dev_addr) &&
310 !ether_addr_equal(addr1: vlandev->dev_addr, addr2: dev->dev_addr))
311 dev_uc_add(dev, addr: vlandev->dev_addr);
312
313out:
314 ether_addr_copy(dst: vlan->real_dev_addr, src: dev->dev_addr);
315}
316
317static void vlan_transfer_features(struct net_device *dev,
318 struct net_device *vlandev)
319{
320 struct vlan_dev_priv *vlan = vlan_dev_priv(dev: vlandev);
321
322 netif_inherit_tso_max(to: vlandev, from: dev);
323
324 if (vlan_hw_offload_capable(features: dev->features, proto: vlan->vlan_proto))
325 vlandev->hard_header_len = dev->hard_header_len;
326 else
327 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
328
329#if IS_ENABLED(CONFIG_FCOE)
330 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
331#endif
332
333 vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
334 vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
335 vlandev->hw_enc_features = vlan_tnl_features(real_dev: vlan->real_dev);
336
337 netdev_update_features(dev: vlandev);
338}
339
340static int __vlan_device_event(struct net_device *dev, unsigned long event)
341{
342 int err = 0;
343
344 switch (event) {
345 case NETDEV_CHANGENAME:
346 vlan_proc_rem_dev(vlandev: dev);
347 err = vlan_proc_add_dev(vlandev: dev);
348 break;
349 case NETDEV_REGISTER:
350 err = vlan_proc_add_dev(vlandev: dev);
351 break;
352 case NETDEV_UNREGISTER:
353 vlan_proc_rem_dev(vlandev: dev);
354 break;
355 }
356
357 return err;
358}
359
360static int vlan_device_event(struct notifier_block *unused, unsigned long event,
361 void *ptr)
362{
363 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(info: ptr);
364 struct net_device *dev = netdev_notifier_info_to_dev(info: ptr);
365 struct vlan_group *grp;
366 struct vlan_info *vlan_info;
367 int i, flgs;
368 struct net_device *vlandev;
369 struct vlan_dev_priv *vlan;
370 bool last = false;
371 LIST_HEAD(list);
372 int err;
373
374 if (is_vlan_dev(dev)) {
375 int err = __vlan_device_event(dev, event);
376
377 if (err)
378 return notifier_from_errno(err);
379 }
380
381 if ((event == NETDEV_UP) &&
382 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
383 pr_info("adding VLAN 0 to HW filter on device %s\n",
384 dev->name);
385 vlan_vid_add(dev, htons(ETH_P_8021Q), vid: 0);
386 }
387 if (event == NETDEV_DOWN &&
388 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
389 vlan_vid_del(dev, htons(ETH_P_8021Q), vid: 0);
390
391 vlan_info = rtnl_dereference(dev->vlan_info);
392 if (!vlan_info)
393 goto out;
394 grp = &vlan_info->grp;
395
396 /* It is OK that we do not hold the group lock right now,
397 * as we run under the RTNL lock.
398 */
399
400 switch (event) {
401 case NETDEV_CHANGE:
402 /* Propagate real device state to vlan devices */
403 vlan_group_for_each_dev(grp, i, vlandev)
404 vlan_stacked_transfer_operstate(rootdev: dev, dev: vlandev,
405 vlan: vlan_dev_priv(dev: vlandev));
406 break;
407
408 case NETDEV_CHANGEADDR:
409 /* Adjust unicast filters on underlying device */
410 vlan_group_for_each_dev(grp, i, vlandev) {
411 flgs = vlandev->flags;
412 if (!(flgs & IFF_UP))
413 continue;
414
415 vlan_sync_address(dev, vlandev);
416 }
417 break;
418
419 case NETDEV_CHANGEMTU:
420 vlan_group_for_each_dev(grp, i, vlandev) {
421 if (vlandev->mtu <= dev->mtu)
422 continue;
423
424 dev_set_mtu(vlandev, dev->mtu);
425 }
426 break;
427
428 case NETDEV_FEAT_CHANGE:
429 /* Propagate device features to underlying device */
430 vlan_group_for_each_dev(grp, i, vlandev)
431 vlan_transfer_features(dev, vlandev);
432 break;
433
434 case NETDEV_DOWN: {
435 struct net_device *tmp;
436 LIST_HEAD(close_list);
437
438 /* Put all VLANs for this dev in the down state too. */
439 vlan_group_for_each_dev(grp, i, vlandev) {
440 flgs = vlandev->flags;
441 if (!(flgs & IFF_UP))
442 continue;
443
444 vlan = vlan_dev_priv(dev: vlandev);
445 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
446 list_add(new: &vlandev->close_list, head: &close_list);
447 }
448
449 dev_close_many(head: &close_list, unlink: false);
450
451 list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
452 vlan_stacked_transfer_operstate(rootdev: dev, dev: vlandev,
453 vlan: vlan_dev_priv(dev: vlandev));
454 list_del_init(entry: &vlandev->close_list);
455 }
456 list_del(entry: &close_list);
457 break;
458 }
459 case NETDEV_UP:
460 /* Put all VLANs for this dev in the up state too. */
461 vlan_group_for_each_dev(grp, i, vlandev) {
462 flgs = dev_get_flags(vlandev);
463 if (flgs & IFF_UP)
464 continue;
465
466 vlan = vlan_dev_priv(dev: vlandev);
467 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
468 dev_change_flags(dev: vlandev, flags: flgs | IFF_UP,
469 extack);
470 vlan_stacked_transfer_operstate(rootdev: dev, dev: vlandev, vlan);
471 }
472 break;
473
474 case NETDEV_UNREGISTER:
475 /* twiddle thumbs on netns device moves */
476 if (dev->reg_state != NETREG_UNREGISTERING)
477 break;
478
479 vlan_group_for_each_dev(grp, i, vlandev) {
480 /* removal of last vid destroys vlan_info, abort
481 * afterwards */
482 if (vlan_info->nr_vids == 1)
483 last = true;
484
485 unregister_vlan_dev(dev: vlandev, head: &list);
486 if (last)
487 break;
488 }
489 unregister_netdevice_many(head: &list);
490 break;
491
492 case NETDEV_PRE_TYPE_CHANGE:
493 /* Forbid underlaying device to change its type. */
494 if (vlan_uses_dev(dev))
495 return NOTIFY_BAD;
496 break;
497
498 case NETDEV_NOTIFY_PEERS:
499 case NETDEV_BONDING_FAILOVER:
500 case NETDEV_RESEND_IGMP:
501 /* Propagate to vlan devices */
502 vlan_group_for_each_dev(grp, i, vlandev)
503 call_netdevice_notifiers(val: event, dev: vlandev);
504 break;
505
506 case NETDEV_CVLAN_FILTER_PUSH_INFO:
507 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
508 if (err)
509 return notifier_from_errno(err);
510 break;
511
512 case NETDEV_CVLAN_FILTER_DROP_INFO:
513 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
514 break;
515
516 case NETDEV_SVLAN_FILTER_PUSH_INFO:
517 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
518 if (err)
519 return notifier_from_errno(err);
520 break;
521
522 case NETDEV_SVLAN_FILTER_DROP_INFO:
523 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
524 break;
525 }
526
527out:
528 return NOTIFY_DONE;
529}
530
531static struct notifier_block vlan_notifier_block __read_mostly = {
532 .notifier_call = vlan_device_event,
533};
534
535/*
536 * VLAN IOCTL handler.
537 * o execute requested action or pass command to the device driver
538 * arg is really a struct vlan_ioctl_args __user *.
539 */
540static int vlan_ioctl_handler(struct net *net, void __user *arg)
541{
542 int err;
543 struct vlan_ioctl_args args;
544 struct net_device *dev = NULL;
545
546 if (copy_from_user(to: &args, from: arg, n: sizeof(struct vlan_ioctl_args)))
547 return -EFAULT;
548
549 /* Null terminate this sucker, just in case. */
550 args.device1[sizeof(args.device1) - 1] = 0;
551 args.u.device2[sizeof(args.u.device2) - 1] = 0;
552
553 rtnl_lock();
554
555 switch (args.cmd) {
556 case SET_VLAN_INGRESS_PRIORITY_CMD:
557 case SET_VLAN_EGRESS_PRIORITY_CMD:
558 case SET_VLAN_FLAG_CMD:
559 case ADD_VLAN_CMD:
560 case DEL_VLAN_CMD:
561 case GET_VLAN_REALDEV_NAME_CMD:
562 case GET_VLAN_VID_CMD:
563 err = -ENODEV;
564 dev = __dev_get_by_name(net, name: args.device1);
565 if (!dev)
566 goto out;
567
568 err = -EINVAL;
569 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
570 goto out;
571 }
572
573 switch (args.cmd) {
574 case SET_VLAN_INGRESS_PRIORITY_CMD:
575 err = -EPERM;
576 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
577 break;
578 vlan_dev_set_ingress_priority(dev,
579 skb_prio: args.u.skb_priority,
580 vlan_prio: args.vlan_qos);
581 err = 0;
582 break;
583
584 case SET_VLAN_EGRESS_PRIORITY_CMD:
585 err = -EPERM;
586 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
587 break;
588 err = vlan_dev_set_egress_priority(dev,
589 skb_prio: args.u.skb_priority,
590 vlan_prio: args.vlan_qos);
591 break;
592
593 case SET_VLAN_FLAG_CMD:
594 err = -EPERM;
595 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
596 break;
597 err = vlan_dev_change_flags(dev,
598 flag: args.vlan_qos ? args.u.flag : 0,
599 mask: args.u.flag);
600 break;
601
602 case SET_VLAN_NAME_TYPE_CMD:
603 err = -EPERM;
604 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
605 break;
606 if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
607 struct vlan_net *vn;
608
609 vn = net_generic(net, id: vlan_net_id);
610 vn->name_type = args.u.name_type;
611 err = 0;
612 } else {
613 err = -EINVAL;
614 }
615 break;
616
617 case ADD_VLAN_CMD:
618 err = -EPERM;
619 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
620 break;
621 err = register_vlan_device(real_dev: dev, vlan_id: args.u.VID);
622 break;
623
624 case DEL_VLAN_CMD:
625 err = -EPERM;
626 if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN))
627 break;
628 unregister_vlan_dev(dev, NULL);
629 err = 0;
630 break;
631
632 case GET_VLAN_REALDEV_NAME_CMD:
633 err = 0;
634 vlan_dev_get_realdev_name(dev, result: args.u.device2,
635 size: sizeof(args.u.device2));
636 if (copy_to_user(to: arg, from: &args,
637 n: sizeof(struct vlan_ioctl_args)))
638 err = -EFAULT;
639 break;
640
641 case GET_VLAN_VID_CMD:
642 err = 0;
643 args.u.VID = vlan_dev_vlan_id(dev);
644 if (copy_to_user(to: arg, from: &args,
645 n: sizeof(struct vlan_ioctl_args)))
646 err = -EFAULT;
647 break;
648
649 default:
650 err = -EOPNOTSUPP;
651 break;
652 }
653out:
654 rtnl_unlock();
655 return err;
656}
657
658static int __net_init vlan_init_net(struct net *net)
659{
660 struct vlan_net *vn = net_generic(net, id: vlan_net_id);
661 int err;
662
663 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
664
665 err = vlan_proc_init(net);
666
667 return err;
668}
669
670static void __net_exit vlan_exit_net(struct net *net)
671{
672 vlan_proc_cleanup(net);
673}
674
675static struct pernet_operations vlan_net_ops = {
676 .init = vlan_init_net,
677 .exit = vlan_exit_net,
678 .id = &vlan_net_id,
679 .size = sizeof(struct vlan_net),
680};
681
682static int __init vlan_proto_init(void)
683{
684 int err;
685
686 pr_info("%s v%s\n", vlan_fullname, vlan_version);
687
688 err = register_pernet_subsys(&vlan_net_ops);
689 if (err < 0)
690 goto err0;
691
692 err = register_netdevice_notifier(nb: &vlan_notifier_block);
693 if (err < 0)
694 goto err2;
695
696 err = vlan_gvrp_init();
697 if (err < 0)
698 goto err3;
699
700 err = vlan_mvrp_init();
701 if (err < 0)
702 goto err4;
703
704 err = vlan_netlink_init();
705 if (err < 0)
706 goto err5;
707
708 vlan_ioctl_set(hook: vlan_ioctl_handler);
709 return 0;
710
711err5:
712 vlan_mvrp_uninit();
713err4:
714 vlan_gvrp_uninit();
715err3:
716 unregister_netdevice_notifier(nb: &vlan_notifier_block);
717err2:
718 unregister_pernet_subsys(&vlan_net_ops);
719err0:
720 return err;
721}
722
723static void __exit vlan_cleanup_module(void)
724{
725 vlan_ioctl_set(NULL);
726
727 vlan_netlink_fini();
728
729 unregister_netdevice_notifier(nb: &vlan_notifier_block);
730
731 unregister_pernet_subsys(&vlan_net_ops);
732 rcu_barrier(); /* Wait for completion of call_rcu()'s */
733
734 vlan_mvrp_uninit();
735 vlan_gvrp_uninit();
736}
737
738module_init(vlan_proto_init);
739module_exit(vlan_cleanup_module);
740
741MODULE_DESCRIPTION("802.1Q/802.1ad VLAN Protocol");
742MODULE_LICENSE("GPL");
743MODULE_VERSION(DRV_VERSION);
744

source code of linux/net/8021q/vlan.c