1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/skbuff.h> |
3 | #include <linux/netdevice.h> |
4 | #include <linux/if_vlan.h> |
5 | #include <linux/netpoll.h> |
6 | #include <linux/export.h> |
7 | #include <net/gro.h> |
8 | #include "vlan.h" |
9 | |
10 | bool vlan_do_receive(struct sk_buff **skbp) |
11 | { |
12 | struct sk_buff *skb = *skbp; |
13 | __be16 vlan_proto = skb->vlan_proto; |
14 | u16 vlan_id = skb_vlan_tag_get_id(skb); |
15 | struct net_device *vlan_dev; |
16 | struct vlan_pcpu_stats *rx_stats; |
17 | |
18 | vlan_dev = vlan_find_dev(real_dev: skb->dev, vlan_proto, vlan_id); |
19 | if (!vlan_dev) |
20 | return false; |
21 | |
22 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); |
23 | if (unlikely(!skb)) |
24 | return false; |
25 | |
26 | if (unlikely(!(vlan_dev->flags & IFF_UP))) { |
27 | kfree_skb(skb); |
28 | *skbp = NULL; |
29 | return false; |
30 | } |
31 | |
32 | skb->dev = vlan_dev; |
33 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { |
34 | /* Our lower layer thinks this is not local, let's make sure. |
35 | * This allows the VLAN to have a different MAC than the |
36 | * underlying device, and still route correctly. */ |
37 | if (ether_addr_equal_64bits(addr1: eth_hdr(skb)->h_dest, addr2: vlan_dev->dev_addr)) |
38 | skb->pkt_type = PACKET_HOST; |
39 | } |
40 | |
41 | if (!(vlan_dev_priv(dev: vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) && |
42 | !netif_is_macvlan_port(dev: vlan_dev) && |
43 | !netif_is_bridge_port(dev: vlan_dev)) { |
44 | unsigned int offset = skb->data - skb_mac_header(skb); |
45 | |
46 | /* |
47 | * vlan_insert_tag expect skb->data pointing to mac header. |
48 | * So change skb->data before calling it and change back to |
49 | * original position later |
50 | */ |
51 | skb_push(skb, len: offset); |
52 | skb = *skbp = vlan_insert_inner_tag(skb, vlan_proto: skb->vlan_proto, |
53 | vlan_tci: skb->vlan_tci, mac_len: skb->mac_len); |
54 | if (!skb) |
55 | return false; |
56 | skb_pull(skb, len: offset + VLAN_HLEN); |
57 | skb_reset_mac_len(skb); |
58 | } |
59 | |
60 | skb->priority = vlan_get_ingress_priority(dev: vlan_dev, vlan_tci: skb->vlan_tci); |
61 | __vlan_hwaccel_clear_tag(skb); |
62 | |
63 | rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); |
64 | |
65 | u64_stats_update_begin(syncp: &rx_stats->syncp); |
66 | u64_stats_inc(p: &rx_stats->rx_packets); |
67 | u64_stats_add(p: &rx_stats->rx_bytes, val: skb->len); |
68 | if (skb->pkt_type == PACKET_MULTICAST) |
69 | u64_stats_inc(p: &rx_stats->rx_multicast); |
70 | u64_stats_update_end(syncp: &rx_stats->syncp); |
71 | |
72 | return true; |
73 | } |
74 | |
75 | /* Must be invoked with rcu_read_lock. */ |
76 | struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, |
77 | __be16 vlan_proto, u16 vlan_id) |
78 | { |
79 | struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); |
80 | |
81 | if (vlan_info) { |
82 | return vlan_group_get_device(vg: &vlan_info->grp, |
83 | vlan_proto, vlan_id); |
84 | } else { |
85 | /* |
86 | * Lower devices of master uppers (bonding, team) do not have |
87 | * grp assigned to themselves. Grp is assigned to upper device |
88 | * instead. |
89 | */ |
90 | struct net_device *upper_dev; |
91 | |
92 | upper_dev = netdev_master_upper_dev_get_rcu(dev); |
93 | if (upper_dev) |
94 | return __vlan_find_dev_deep_rcu(dev: upper_dev, |
95 | vlan_proto, vlan_id); |
96 | } |
97 | |
98 | return NULL; |
99 | } |
100 | EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); |
101 | |
102 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
103 | { |
104 | struct net_device *ret = vlan_dev_priv(dev)->real_dev; |
105 | |
106 | while (is_vlan_dev(dev: ret)) |
107 | ret = vlan_dev_priv(dev: ret)->real_dev; |
108 | |
109 | return ret; |
110 | } |
111 | EXPORT_SYMBOL(vlan_dev_real_dev); |
112 | |
113 | u16 vlan_dev_vlan_id(const struct net_device *dev) |
114 | { |
115 | return vlan_dev_priv(dev)->vlan_id; |
116 | } |
117 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
118 | |
119 | __be16 vlan_dev_vlan_proto(const struct net_device *dev) |
120 | { |
121 | return vlan_dev_priv(dev)->vlan_proto; |
122 | } |
123 | EXPORT_SYMBOL(vlan_dev_vlan_proto); |
124 | |
125 | /* |
126 | * vlan info and vid list |
127 | */ |
128 | |
129 | static void vlan_group_free(struct vlan_group *grp) |
130 | { |
131 | int i, j; |
132 | |
133 | for (i = 0; i < VLAN_PROTO_NUM; i++) |
134 | for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) |
135 | kfree(objp: grp->vlan_devices_arrays[i][j]); |
136 | } |
137 | |
138 | static void vlan_info_free(struct vlan_info *vlan_info) |
139 | { |
140 | vlan_group_free(grp: &vlan_info->grp); |
141 | kfree(objp: vlan_info); |
142 | } |
143 | |
144 | static void vlan_info_rcu_free(struct rcu_head *rcu) |
145 | { |
146 | vlan_info_free(container_of(rcu, struct vlan_info, rcu)); |
147 | } |
148 | |
149 | static struct vlan_info *vlan_info_alloc(struct net_device *dev) |
150 | { |
151 | struct vlan_info *vlan_info; |
152 | |
153 | vlan_info = kzalloc(size: sizeof(struct vlan_info), GFP_KERNEL); |
154 | if (!vlan_info) |
155 | return NULL; |
156 | |
157 | vlan_info->real_dev = dev; |
158 | INIT_LIST_HEAD(list: &vlan_info->vid_list); |
159 | return vlan_info; |
160 | } |
161 | |
162 | struct vlan_vid_info { |
163 | struct list_head list; |
164 | __be16 proto; |
165 | u16 vid; |
166 | int refcount; |
167 | }; |
168 | |
169 | static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto) |
170 | { |
171 | if (proto == htons(ETH_P_8021Q) && |
172 | dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
173 | return true; |
174 | if (proto == htons(ETH_P_8021AD) && |
175 | dev->features & NETIF_F_HW_VLAN_STAG_FILTER) |
176 | return true; |
177 | return false; |
178 | } |
179 | |
180 | static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, |
181 | __be16 proto, u16 vid) |
182 | { |
183 | struct vlan_vid_info *vid_info; |
184 | |
185 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { |
186 | if (vid_info->proto == proto && vid_info->vid == vid) |
187 | return vid_info; |
188 | } |
189 | return NULL; |
190 | } |
191 | |
192 | static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) |
193 | { |
194 | struct vlan_vid_info *vid_info; |
195 | |
196 | vid_info = kzalloc(size: sizeof(struct vlan_vid_info), GFP_KERNEL); |
197 | if (!vid_info) |
198 | return NULL; |
199 | vid_info->proto = proto; |
200 | vid_info->vid = vid; |
201 | |
202 | return vid_info; |
203 | } |
204 | |
205 | static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) |
206 | { |
207 | if (!vlan_hw_filter_capable(dev, proto)) |
208 | return 0; |
209 | |
210 | if (netif_device_present(dev)) |
211 | return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid); |
212 | else |
213 | return -ENODEV; |
214 | } |
215 | |
216 | static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) |
217 | { |
218 | if (!vlan_hw_filter_capable(dev, proto)) |
219 | return 0; |
220 | |
221 | if (netif_device_present(dev)) |
222 | return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid); |
223 | else |
224 | return -ENODEV; |
225 | } |
226 | |
227 | int vlan_for_each(struct net_device *dev, |
228 | int (*action)(struct net_device *dev, int vid, void *arg), |
229 | void *arg) |
230 | { |
231 | struct vlan_vid_info *vid_info; |
232 | struct vlan_info *vlan_info; |
233 | struct net_device *vdev; |
234 | int ret; |
235 | |
236 | ASSERT_RTNL(); |
237 | |
238 | vlan_info = rtnl_dereference(dev->vlan_info); |
239 | if (!vlan_info) |
240 | return 0; |
241 | |
242 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { |
243 | vdev = vlan_group_get_device(vg: &vlan_info->grp, vlan_proto: vid_info->proto, |
244 | vlan_id: vid_info->vid); |
245 | ret = action(vdev, vid_info->vid, arg); |
246 | if (ret) |
247 | return ret; |
248 | } |
249 | |
250 | return 0; |
251 | } |
252 | EXPORT_SYMBOL(vlan_for_each); |
253 | |
254 | int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto) |
255 | { |
256 | struct net_device *real_dev = vlan_info->real_dev; |
257 | struct vlan_vid_info *vlan_vid_info; |
258 | int err; |
259 | |
260 | list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) { |
261 | if (vlan_vid_info->proto == proto) { |
262 | err = vlan_add_rx_filter_info(dev: real_dev, proto, |
263 | vid: vlan_vid_info->vid); |
264 | if (err) |
265 | goto unwind; |
266 | } |
267 | } |
268 | |
269 | return 0; |
270 | |
271 | unwind: |
272 | list_for_each_entry_continue_reverse(vlan_vid_info, |
273 | &vlan_info->vid_list, list) { |
274 | if (vlan_vid_info->proto == proto) |
275 | vlan_kill_rx_filter_info(dev: real_dev, proto, |
276 | vid: vlan_vid_info->vid); |
277 | } |
278 | |
279 | return err; |
280 | } |
281 | EXPORT_SYMBOL(vlan_filter_push_vids); |
282 | |
283 | void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto) |
284 | { |
285 | struct vlan_vid_info *vlan_vid_info; |
286 | |
287 | list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) |
288 | if (vlan_vid_info->proto == proto) |
289 | vlan_kill_rx_filter_info(dev: vlan_info->real_dev, |
290 | proto: vlan_vid_info->proto, |
291 | vid: vlan_vid_info->vid); |
292 | } |
293 | EXPORT_SYMBOL(vlan_filter_drop_vids); |
294 | |
295 | static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, |
296 | struct vlan_vid_info **pvid_info) |
297 | { |
298 | struct net_device *dev = vlan_info->real_dev; |
299 | struct vlan_vid_info *vid_info; |
300 | int err; |
301 | |
302 | vid_info = vlan_vid_info_alloc(proto, vid); |
303 | if (!vid_info) |
304 | return -ENOMEM; |
305 | |
306 | err = vlan_add_rx_filter_info(dev, proto, vid); |
307 | if (err) { |
308 | kfree(objp: vid_info); |
309 | return err; |
310 | } |
311 | |
312 | list_add(new: &vid_info->list, head: &vlan_info->vid_list); |
313 | vlan_info->nr_vids++; |
314 | *pvid_info = vid_info; |
315 | return 0; |
316 | } |
317 | |
318 | int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
319 | { |
320 | struct vlan_info *vlan_info; |
321 | struct vlan_vid_info *vid_info; |
322 | bool vlan_info_created = false; |
323 | int err; |
324 | |
325 | ASSERT_RTNL(); |
326 | |
327 | vlan_info = rtnl_dereference(dev->vlan_info); |
328 | if (!vlan_info) { |
329 | vlan_info = vlan_info_alloc(dev); |
330 | if (!vlan_info) |
331 | return -ENOMEM; |
332 | vlan_info_created = true; |
333 | } |
334 | vid_info = vlan_vid_info_get(vlan_info, proto, vid); |
335 | if (!vid_info) { |
336 | err = __vlan_vid_add(vlan_info, proto, vid, pvid_info: &vid_info); |
337 | if (err) |
338 | goto out_free_vlan_info; |
339 | } |
340 | vid_info->refcount++; |
341 | |
342 | if (vlan_info_created) |
343 | rcu_assign_pointer(dev->vlan_info, vlan_info); |
344 | |
345 | return 0; |
346 | |
347 | out_free_vlan_info: |
348 | if (vlan_info_created) |
349 | kfree(objp: vlan_info); |
350 | return err; |
351 | } |
352 | EXPORT_SYMBOL(vlan_vid_add); |
353 | |
354 | static void __vlan_vid_del(struct vlan_info *vlan_info, |
355 | struct vlan_vid_info *vid_info) |
356 | { |
357 | struct net_device *dev = vlan_info->real_dev; |
358 | __be16 proto = vid_info->proto; |
359 | u16 vid = vid_info->vid; |
360 | int err; |
361 | |
362 | err = vlan_kill_rx_filter_info(dev, proto, vid); |
363 | if (err && dev->reg_state != NETREG_UNREGISTERING) |
364 | netdev_warn(dev, format: "failed to kill vid %04x/%d\n" , proto, vid); |
365 | |
366 | list_del(entry: &vid_info->list); |
367 | kfree(objp: vid_info); |
368 | vlan_info->nr_vids--; |
369 | } |
370 | |
371 | void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) |
372 | { |
373 | struct vlan_info *vlan_info; |
374 | struct vlan_vid_info *vid_info; |
375 | |
376 | ASSERT_RTNL(); |
377 | |
378 | vlan_info = rtnl_dereference(dev->vlan_info); |
379 | if (!vlan_info) |
380 | return; |
381 | |
382 | vid_info = vlan_vid_info_get(vlan_info, proto, vid); |
383 | if (!vid_info) |
384 | return; |
385 | vid_info->refcount--; |
386 | if (vid_info->refcount == 0) { |
387 | __vlan_vid_del(vlan_info, vid_info); |
388 | if (vlan_info->nr_vids == 0) { |
389 | RCU_INIT_POINTER(dev->vlan_info, NULL); |
390 | call_rcu(head: &vlan_info->rcu, func: vlan_info_rcu_free); |
391 | } |
392 | } |
393 | } |
394 | EXPORT_SYMBOL(vlan_vid_del); |
395 | |
396 | int vlan_vids_add_by_dev(struct net_device *dev, |
397 | const struct net_device *by_dev) |
398 | { |
399 | struct vlan_vid_info *vid_info; |
400 | struct vlan_info *vlan_info; |
401 | int err; |
402 | |
403 | ASSERT_RTNL(); |
404 | |
405 | vlan_info = rtnl_dereference(by_dev->vlan_info); |
406 | if (!vlan_info) |
407 | return 0; |
408 | |
409 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { |
410 | err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); |
411 | if (err) |
412 | goto unwind; |
413 | } |
414 | return 0; |
415 | |
416 | unwind: |
417 | list_for_each_entry_continue_reverse(vid_info, |
418 | &vlan_info->vid_list, |
419 | list) { |
420 | vlan_vid_del(dev, vid_info->proto, vid_info->vid); |
421 | } |
422 | |
423 | return err; |
424 | } |
425 | EXPORT_SYMBOL(vlan_vids_add_by_dev); |
426 | |
427 | void vlan_vids_del_by_dev(struct net_device *dev, |
428 | const struct net_device *by_dev) |
429 | { |
430 | struct vlan_vid_info *vid_info; |
431 | struct vlan_info *vlan_info; |
432 | |
433 | ASSERT_RTNL(); |
434 | |
435 | vlan_info = rtnl_dereference(by_dev->vlan_info); |
436 | if (!vlan_info) |
437 | return; |
438 | |
439 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) |
440 | vlan_vid_del(dev, vid_info->proto, vid_info->vid); |
441 | } |
442 | EXPORT_SYMBOL(vlan_vids_del_by_dev); |
443 | |
444 | bool vlan_uses_dev(const struct net_device *dev) |
445 | { |
446 | struct vlan_info *vlan_info; |
447 | |
448 | ASSERT_RTNL(); |
449 | |
450 | vlan_info = rtnl_dereference(dev->vlan_info); |
451 | if (!vlan_info) |
452 | return false; |
453 | return vlan_info->grp.nr_vlan_devs ? true : false; |
454 | } |
455 | EXPORT_SYMBOL(vlan_uses_dev); |
456 | |
457 | static struct sk_buff *vlan_gro_receive(struct list_head *head, |
458 | struct sk_buff *skb) |
459 | { |
460 | const struct packet_offload *ptype; |
461 | unsigned int hlen, off_vlan; |
462 | struct sk_buff *pp = NULL; |
463 | struct vlan_hdr *vhdr; |
464 | struct sk_buff *p; |
465 | __be16 type; |
466 | int flush = 1; |
467 | |
468 | off_vlan = skb_gro_offset(skb); |
469 | hlen = off_vlan + sizeof(*vhdr); |
470 | vhdr = skb_gro_header(skb, hlen, offset: off_vlan); |
471 | if (unlikely(!vhdr)) |
472 | goto out; |
473 | |
474 | type = vhdr->h_vlan_encapsulated_proto; |
475 | |
476 | ptype = gro_find_receive_by_type(type); |
477 | if (!ptype) |
478 | goto out; |
479 | |
480 | flush = 0; |
481 | |
482 | list_for_each_entry(p, head, list) { |
483 | struct vlan_hdr *vhdr2; |
484 | |
485 | if (!NAPI_GRO_CB(p)->same_flow) |
486 | continue; |
487 | |
488 | vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); |
489 | if (compare_vlan_header(h1: vhdr, h2: vhdr2)) |
490 | NAPI_GRO_CB(p)->same_flow = 0; |
491 | } |
492 | |
493 | skb_gro_pull(skb, len: sizeof(*vhdr)); |
494 | skb_gro_postpull_rcsum(skb, start: vhdr, len: sizeof(*vhdr)); |
495 | |
496 | pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive, |
497 | ipv6_gro_receive, inet_gro_receive, |
498 | head, skb); |
499 | |
500 | out: |
501 | skb_gro_flush_final(skb, pp, flush); |
502 | |
503 | return pp; |
504 | } |
505 | |
506 | static int vlan_gro_complete(struct sk_buff *skb, int nhoff) |
507 | { |
508 | struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); |
509 | __be16 type = vhdr->h_vlan_encapsulated_proto; |
510 | struct packet_offload *ptype; |
511 | int err = -ENOENT; |
512 | |
513 | ptype = gro_find_complete_by_type(type); |
514 | if (ptype) |
515 | err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, |
516 | ipv6_gro_complete, inet_gro_complete, |
517 | skb, nhoff + sizeof(*vhdr)); |
518 | |
519 | return err; |
520 | } |
521 | |
522 | static struct packet_offload vlan_packet_offloads[] __read_mostly = { |
523 | { |
524 | .type = cpu_to_be16(ETH_P_8021Q), |
525 | .priority = 10, |
526 | .callbacks = { |
527 | .gro_receive = vlan_gro_receive, |
528 | .gro_complete = vlan_gro_complete, |
529 | }, |
530 | }, |
531 | { |
532 | .type = cpu_to_be16(ETH_P_8021AD), |
533 | .priority = 10, |
534 | .callbacks = { |
535 | .gro_receive = vlan_gro_receive, |
536 | .gro_complete = vlan_gro_complete, |
537 | }, |
538 | }, |
539 | }; |
540 | |
541 | static int __init vlan_offload_init(void) |
542 | { |
543 | unsigned int i; |
544 | |
545 | for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) |
546 | dev_add_offload(po: &vlan_packet_offloads[i]); |
547 | |
548 | return 0; |
549 | } |
550 | |
551 | fs_initcall(vlan_offload_init); |
552 | |