1 | /* |
2 | * VLAN An implementation of 802.1Q VLAN tagging. |
3 | * |
4 | * Authors: Ben Greear <greearb@candelatech.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | * |
11 | */ |
12 | #ifndef _LINUX_IF_VLAN_H_ |
13 | #define _LINUX_IF_VLAN_H_ |
14 | |
15 | #include <linux/netdevice.h> |
16 | #include <linux/etherdevice.h> |
17 | #include <linux/rtnetlink.h> |
18 | #include <linux/bug.h> |
19 | #include <uapi/linux/if_vlan.h> |
20 | |
21 | #define VLAN_HLEN 4 /* The additional bytes required by VLAN |
22 | * (in addition to the Ethernet header) |
23 | */ |
24 | #define VLAN_ETH_HLEN 18 /* Total octets in header. */ |
25 | #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ |
26 | |
27 | /* |
28 | * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan |
29 | */ |
30 | #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ |
31 | #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ |
32 | |
33 | /* |
34 | * struct vlan_hdr - vlan header |
35 | * @h_vlan_TCI: priority and VLAN ID |
36 | * @h_vlan_encapsulated_proto: packet type ID or len |
37 | */ |
38 | struct vlan_hdr { |
39 | __be16 h_vlan_TCI; |
40 | __be16 h_vlan_encapsulated_proto; |
41 | }; |
42 | |
43 | /** |
44 | * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) |
45 | * @h_dest: destination ethernet address |
46 | * @h_source: source ethernet address |
47 | * @h_vlan_proto: ethernet protocol |
48 | * @h_vlan_TCI: priority and VLAN ID |
49 | * @h_vlan_encapsulated_proto: packet type ID or len |
50 | */ |
51 | struct vlan_ethhdr { |
52 | unsigned char h_dest[ETH_ALEN]; |
53 | unsigned char h_source[ETH_ALEN]; |
54 | __be16 h_vlan_proto; |
55 | __be16 h_vlan_TCI; |
56 | __be16 h_vlan_encapsulated_proto; |
57 | }; |
58 | |
59 | #include <linux/skbuff.h> |
60 | |
61 | static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) |
62 | { |
63 | return (struct vlan_ethhdr *)skb_mac_header(skb); |
64 | } |
65 | |
66 | #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ |
67 | #define VLAN_PRIO_SHIFT 13 |
68 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ |
69 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ |
70 | #define VLAN_N_VID 4096 |
71 | |
72 | /* found in socket.c */ |
73 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
74 | |
75 | static inline bool is_vlan_dev(const struct net_device *dev) |
76 | { |
77 | return dev->priv_flags & IFF_802_1Q_VLAN; |
78 | } |
79 | |
80 | #define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) |
81 | #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) |
82 | #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) |
83 | #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) |
84 | #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) |
85 | |
86 | static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) |
87 | { |
88 | ASSERT_RTNL(); |
89 | return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); |
90 | } |
91 | |
92 | static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) |
93 | { |
94 | ASSERT_RTNL(); |
95 | call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); |
96 | } |
97 | |
98 | static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) |
99 | { |
100 | ASSERT_RTNL(); |
101 | return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); |
102 | } |
103 | |
104 | static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) |
105 | { |
106 | ASSERT_RTNL(); |
107 | call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); |
108 | } |
109 | |
110 | /** |
111 | * struct vlan_pcpu_stats - VLAN percpu rx/tx stats |
112 | * @rx_packets: number of received packets |
113 | * @rx_bytes: number of received bytes |
114 | * @rx_multicast: number of received multicast packets |
115 | * @tx_packets: number of transmitted packets |
116 | * @tx_bytes: number of transmitted bytes |
117 | * @syncp: synchronization point for 64bit counters |
118 | * @rx_errors: number of rx errors |
119 | * @tx_dropped: number of tx drops |
120 | */ |
121 | struct vlan_pcpu_stats { |
122 | u64 rx_packets; |
123 | u64 rx_bytes; |
124 | u64 rx_multicast; |
125 | u64 tx_packets; |
126 | u64 tx_bytes; |
127 | struct u64_stats_sync syncp; |
128 | u32 rx_errors; |
129 | u32 tx_dropped; |
130 | }; |
131 | |
132 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
133 | |
134 | extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, |
135 | __be16 vlan_proto, u16 vlan_id); |
136 | extern int vlan_for_each(struct net_device *dev, |
137 | int (*action)(struct net_device *dev, int vid, |
138 | void *arg), void *arg); |
139 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
140 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
141 | extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); |
142 | |
143 | /** |
144 | * struct vlan_priority_tci_mapping - vlan egress priority mappings |
145 | * @priority: skb priority |
146 | * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 |
147 | * @next: pointer to next struct |
148 | */ |
149 | struct vlan_priority_tci_mapping { |
150 | u32 priority; |
151 | u16 vlan_qos; |
152 | struct vlan_priority_tci_mapping *next; |
153 | }; |
154 | |
155 | struct proc_dir_entry; |
156 | struct netpoll; |
157 | |
158 | /** |
159 | * struct vlan_dev_priv - VLAN private device data |
160 | * @nr_ingress_mappings: number of ingress priority mappings |
161 | * @ingress_priority_map: ingress priority mappings |
162 | * @nr_egress_mappings: number of egress priority mappings |
163 | * @egress_priority_map: hash of egress priority mappings |
164 | * @vlan_proto: VLAN encapsulation protocol |
165 | * @vlan_id: VLAN identifier |
166 | * @flags: device flags |
167 | * @real_dev: underlying netdevice |
168 | * @real_dev_addr: address of underlying netdevice |
169 | * @dent: proc dir entry |
170 | * @vlan_pcpu_stats: ptr to percpu rx stats |
171 | */ |
172 | struct vlan_dev_priv { |
173 | unsigned int nr_ingress_mappings; |
174 | u32 ingress_priority_map[8]; |
175 | unsigned int nr_egress_mappings; |
176 | struct vlan_priority_tci_mapping *egress_priority_map[16]; |
177 | |
178 | __be16 vlan_proto; |
179 | u16 vlan_id; |
180 | u16 flags; |
181 | |
182 | struct net_device *real_dev; |
183 | unsigned char real_dev_addr[ETH_ALEN]; |
184 | |
185 | struct proc_dir_entry *dent; |
186 | struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; |
187 | #ifdef CONFIG_NET_POLL_CONTROLLER |
188 | struct netpoll *netpoll; |
189 | #endif |
190 | unsigned int nest_level; |
191 | }; |
192 | |
193 | static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) |
194 | { |
195 | return netdev_priv(dev); |
196 | } |
197 | |
198 | static inline u16 |
199 | vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) |
200 | { |
201 | struct vlan_priority_tci_mapping *mp; |
202 | |
203 | smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ |
204 | |
205 | mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; |
206 | while (mp) { |
207 | if (mp->priority == skprio) { |
208 | return mp->vlan_qos; /* This should already be shifted |
209 | * to mask correctly with the |
210 | * VLAN's TCI */ |
211 | } |
212 | mp = mp->next; |
213 | } |
214 | return 0; |
215 | } |
216 | |
217 | extern bool vlan_do_receive(struct sk_buff **skb); |
218 | |
219 | extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); |
220 | extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); |
221 | |
222 | extern int vlan_vids_add_by_dev(struct net_device *dev, |
223 | const struct net_device *by_dev); |
224 | extern void vlan_vids_del_by_dev(struct net_device *dev, |
225 | const struct net_device *by_dev); |
226 | |
227 | extern bool vlan_uses_dev(const struct net_device *dev); |
228 | |
229 | static inline int vlan_get_encap_level(struct net_device *dev) |
230 | { |
231 | BUG_ON(!is_vlan_dev(dev)); |
232 | return vlan_dev_priv(dev)->nest_level; |
233 | } |
234 | #else |
235 | static inline struct net_device * |
236 | __vlan_find_dev_deep_rcu(struct net_device *real_dev, |
237 | __be16 vlan_proto, u16 vlan_id) |
238 | { |
239 | return NULL; |
240 | } |
241 | |
242 | static inline int |
243 | vlan_for_each(struct net_device *dev, |
244 | int (*action)(struct net_device *dev, int vid, void *arg), |
245 | void *arg) |
246 | { |
247 | return 0; |
248 | } |
249 | |
250 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
251 | { |
252 | BUG(); |
253 | return NULL; |
254 | } |
255 | |
256 | static inline u16 vlan_dev_vlan_id(const struct net_device *dev) |
257 | { |
258 | BUG(); |
259 | return 0; |
260 | } |
261 | |
262 | static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) |
263 | { |
264 | BUG(); |
265 | return 0; |
266 | } |
267 | |
268 | static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, |
269 | u32 skprio) |
270 | { |
271 | return 0; |
272 | } |
273 | |
274 | static inline bool vlan_do_receive(struct sk_buff **skb) |
275 | { |
276 | return false; |
277 | } |
278 | |
279 | static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
280 | { |
281 | return 0; |
282 | } |
283 | |
284 | static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) |
285 | { |
286 | } |
287 | |
288 | static inline int vlan_vids_add_by_dev(struct net_device *dev, |
289 | const struct net_device *by_dev) |
290 | { |
291 | return 0; |
292 | } |
293 | |
294 | static inline void vlan_vids_del_by_dev(struct net_device *dev, |
295 | const struct net_device *by_dev) |
296 | { |
297 | } |
298 | |
299 | static inline bool vlan_uses_dev(const struct net_device *dev) |
300 | { |
301 | return false; |
302 | } |
303 | static inline int vlan_get_encap_level(struct net_device *dev) |
304 | { |
305 | BUG(); |
306 | return 0; |
307 | } |
308 | #endif |
309 | |
310 | /** |
311 | * eth_type_vlan - check for valid vlan ether type. |
312 | * @ethertype: ether type to check |
313 | * |
314 | * Returns true if the ether type is a vlan ether type. |
315 | */ |
316 | static inline bool eth_type_vlan(__be16 ethertype) |
317 | { |
318 | switch (ethertype) { |
319 | case htons(ETH_P_8021Q): |
320 | case htons(ETH_P_8021AD): |
321 | return true; |
322 | default: |
323 | return false; |
324 | } |
325 | } |
326 | |
327 | static inline bool vlan_hw_offload_capable(netdev_features_t features, |
328 | __be16 proto) |
329 | { |
330 | if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) |
331 | return true; |
332 | if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) |
333 | return true; |
334 | return false; |
335 | } |
336 | |
337 | /** |
338 | * __vlan_insert_inner_tag - inner VLAN tag inserting |
339 | * @skb: skbuff to tag |
340 | * @vlan_proto: VLAN encapsulation protocol |
341 | * @vlan_tci: VLAN TCI to insert |
342 | * @mac_len: MAC header length including outer vlan headers |
343 | * |
344 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
345 | * Returns error if skb_cow_head fails. |
346 | * |
347 | * Does not change skb->protocol so this function can be used during receive. |
348 | */ |
349 | static inline int __vlan_insert_inner_tag(struct sk_buff *skb, |
350 | __be16 vlan_proto, u16 vlan_tci, |
351 | unsigned int mac_len) |
352 | { |
353 | struct vlan_ethhdr *veth; |
354 | |
355 | if (skb_cow_head(skb, VLAN_HLEN) < 0) |
356 | return -ENOMEM; |
357 | |
358 | skb_push(skb, VLAN_HLEN); |
359 | |
360 | /* Move the mac header sans proto to the beginning of the new header. */ |
361 | if (likely(mac_len > ETH_TLEN)) |
362 | memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); |
363 | skb->mac_header -= VLAN_HLEN; |
364 | |
365 | veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); |
366 | |
367 | /* first, the ethernet type */ |
368 | if (likely(mac_len >= ETH_TLEN)) { |
369 | /* h_vlan_encapsulated_proto should already be populated, and |
370 | * skb->data has space for h_vlan_proto |
371 | */ |
372 | veth->h_vlan_proto = vlan_proto; |
373 | } else { |
374 | /* h_vlan_encapsulated_proto should not be populated, and |
375 | * skb->data has no space for h_vlan_proto |
376 | */ |
377 | veth->h_vlan_encapsulated_proto = skb->protocol; |
378 | } |
379 | |
380 | /* now, the TCI */ |
381 | veth->h_vlan_TCI = htons(vlan_tci); |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | /** |
387 | * __vlan_insert_tag - regular VLAN tag inserting |
388 | * @skb: skbuff to tag |
389 | * @vlan_proto: VLAN encapsulation protocol |
390 | * @vlan_tci: VLAN TCI to insert |
391 | * |
392 | * Inserts the VLAN tag into @skb as part of the payload |
393 | * Returns error if skb_cow_head fails. |
394 | * |
395 | * Does not change skb->protocol so this function can be used during receive. |
396 | */ |
397 | static inline int __vlan_insert_tag(struct sk_buff *skb, |
398 | __be16 vlan_proto, u16 vlan_tci) |
399 | { |
400 | return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
401 | } |
402 | |
403 | /** |
404 | * vlan_insert_inner_tag - inner VLAN tag inserting |
405 | * @skb: skbuff to tag |
406 | * @vlan_proto: VLAN encapsulation protocol |
407 | * @vlan_tci: VLAN TCI to insert |
408 | * @mac_len: MAC header length including outer vlan headers |
409 | * |
410 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
411 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
412 | * |
413 | * Following the skb_unshare() example, in case of error, the calling function |
414 | * doesn't have to worry about freeing the original skb. |
415 | * |
416 | * Does not change skb->protocol so this function can be used during receive. |
417 | */ |
418 | static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, |
419 | __be16 vlan_proto, |
420 | u16 vlan_tci, |
421 | unsigned int mac_len) |
422 | { |
423 | int err; |
424 | |
425 | err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); |
426 | if (err) { |
427 | dev_kfree_skb_any(skb); |
428 | return NULL; |
429 | } |
430 | return skb; |
431 | } |
432 | |
433 | /** |
434 | * vlan_insert_tag - regular VLAN tag inserting |
435 | * @skb: skbuff to tag |
436 | * @vlan_proto: VLAN encapsulation protocol |
437 | * @vlan_tci: VLAN TCI to insert |
438 | * |
439 | * Inserts the VLAN tag into @skb as part of the payload |
440 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
441 | * |
442 | * Following the skb_unshare() example, in case of error, the calling function |
443 | * doesn't have to worry about freeing the original skb. |
444 | * |
445 | * Does not change skb->protocol so this function can be used during receive. |
446 | */ |
447 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, |
448 | __be16 vlan_proto, u16 vlan_tci) |
449 | { |
450 | return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
451 | } |
452 | |
453 | /** |
454 | * vlan_insert_tag_set_proto - regular VLAN tag inserting |
455 | * @skb: skbuff to tag |
456 | * @vlan_proto: VLAN encapsulation protocol |
457 | * @vlan_tci: VLAN TCI to insert |
458 | * |
459 | * Inserts the VLAN tag into @skb as part of the payload |
460 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
461 | * |
462 | * Following the skb_unshare() example, in case of error, the calling function |
463 | * doesn't have to worry about freeing the original skb. |
464 | */ |
465 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, |
466 | __be16 vlan_proto, |
467 | u16 vlan_tci) |
468 | { |
469 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); |
470 | if (skb) |
471 | skb->protocol = vlan_proto; |
472 | return skb; |
473 | } |
474 | |
475 | /** |
476 | * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info |
477 | * @skb: skbuff to clear |
478 | * |
479 | * Clears the VLAN information from @skb |
480 | */ |
481 | static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) |
482 | { |
483 | skb->vlan_present = 0; |
484 | } |
485 | |
486 | /** |
487 | * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb |
488 | * @dst: skbuff to copy to |
489 | * @src: skbuff to copy from |
490 | * |
491 | * Copies VLAN information from @src to @dst (for branchless code) |
492 | */ |
493 | static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) |
494 | { |
495 | dst->vlan_present = src->vlan_present; |
496 | dst->vlan_proto = src->vlan_proto; |
497 | dst->vlan_tci = src->vlan_tci; |
498 | } |
499 | |
500 | /* |
501 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload |
502 | * @skb: skbuff to tag |
503 | * |
504 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. |
505 | * |
506 | * Following the skb_unshare() example, in case of error, the calling function |
507 | * doesn't have to worry about freeing the original skb. |
508 | */ |
509 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) |
510 | { |
511 | skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, |
512 | skb_vlan_tag_get(skb)); |
513 | if (likely(skb)) |
514 | __vlan_hwaccel_clear_tag(skb); |
515 | return skb; |
516 | } |
517 | |
518 | /** |
519 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
520 | * @skb: skbuff to tag |
521 | * @vlan_proto: VLAN encapsulation protocol |
522 | * @vlan_tci: VLAN TCI to insert |
523 | * |
524 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
525 | */ |
526 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, |
527 | __be16 vlan_proto, u16 vlan_tci) |
528 | { |
529 | skb->vlan_proto = vlan_proto; |
530 | skb->vlan_tci = vlan_tci; |
531 | skb->vlan_present = 1; |
532 | } |
533 | |
534 | /** |
535 | * __vlan_get_tag - get the VLAN ID that is part of the payload |
536 | * @skb: skbuff to query |
537 | * @vlan_tci: buffer to store value |
538 | * |
539 | * Returns error if the skb is not of VLAN type |
540 | */ |
541 | static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
542 | { |
543 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; |
544 | |
545 | if (!eth_type_vlan(veth->h_vlan_proto)) |
546 | return -EINVAL; |
547 | |
548 | *vlan_tci = ntohs(veth->h_vlan_TCI); |
549 | return 0; |
550 | } |
551 | |
552 | /** |
553 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] |
554 | * @skb: skbuff to query |
555 | * @vlan_tci: buffer to store value |
556 | * |
557 | * Returns error if @skb->vlan_tci is not set correctly |
558 | */ |
559 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, |
560 | u16 *vlan_tci) |
561 | { |
562 | if (skb_vlan_tag_present(skb)) { |
563 | *vlan_tci = skb_vlan_tag_get(skb); |
564 | return 0; |
565 | } else { |
566 | *vlan_tci = 0; |
567 | return -EINVAL; |
568 | } |
569 | } |
570 | |
571 | /** |
572 | * vlan_get_tag - get the VLAN ID from the skb |
573 | * @skb: skbuff to query |
574 | * @vlan_tci: buffer to store value |
575 | * |
576 | * Returns error if the skb is not VLAN tagged |
577 | */ |
578 | static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
579 | { |
580 | if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
581 | return __vlan_hwaccel_get_tag(skb, vlan_tci); |
582 | } else { |
583 | return __vlan_get_tag(skb, vlan_tci); |
584 | } |
585 | } |
586 | |
587 | /** |
588 | * vlan_get_protocol - get protocol EtherType. |
589 | * @skb: skbuff to query |
590 | * @type: first vlan protocol |
591 | * @depth: buffer to store length of eth and vlan tags in bytes |
592 | * |
593 | * Returns the EtherType of the packet, regardless of whether it is |
594 | * vlan encapsulated (normal or hardware accelerated) or not. |
595 | */ |
596 | static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, |
597 | int *depth) |
598 | { |
599 | unsigned int vlan_depth = skb->mac_len; |
600 | |
601 | /* if type is 802.1Q/AD then the header should already be |
602 | * present at mac_len - VLAN_HLEN (if mac_len > 0), or at |
603 | * ETH_HLEN otherwise |
604 | */ |
605 | if (eth_type_vlan(type)) { |
606 | if (vlan_depth) { |
607 | if (WARN_ON(vlan_depth < VLAN_HLEN)) |
608 | return 0; |
609 | vlan_depth -= VLAN_HLEN; |
610 | } else { |
611 | vlan_depth = ETH_HLEN; |
612 | } |
613 | do { |
614 | struct vlan_hdr *vh; |
615 | |
616 | if (unlikely(!pskb_may_pull(skb, |
617 | vlan_depth + VLAN_HLEN))) |
618 | return 0; |
619 | |
620 | vh = (struct vlan_hdr *)(skb->data + vlan_depth); |
621 | type = vh->h_vlan_encapsulated_proto; |
622 | vlan_depth += VLAN_HLEN; |
623 | } while (eth_type_vlan(type)); |
624 | } |
625 | |
626 | if (depth) |
627 | *depth = vlan_depth; |
628 | |
629 | return type; |
630 | } |
631 | |
632 | /** |
633 | * vlan_get_protocol - get protocol EtherType. |
634 | * @skb: skbuff to query |
635 | * |
636 | * Returns the EtherType of the packet, regardless of whether it is |
637 | * vlan encapsulated (normal or hardware accelerated) or not. |
638 | */ |
639 | static inline __be16 vlan_get_protocol(struct sk_buff *skb) |
640 | { |
641 | return __vlan_get_protocol(skb, skb->protocol, NULL); |
642 | } |
643 | |
644 | static inline void vlan_set_encap_proto(struct sk_buff *skb, |
645 | struct vlan_hdr *vhdr) |
646 | { |
647 | __be16 proto; |
648 | unsigned short *rawp; |
649 | |
650 | /* |
651 | * Was a VLAN packet, grab the encapsulated protocol, which the layer |
652 | * three protocols care about. |
653 | */ |
654 | |
655 | proto = vhdr->h_vlan_encapsulated_proto; |
656 | if (eth_proto_is_802_3(proto)) { |
657 | skb->protocol = proto; |
658 | return; |
659 | } |
660 | |
661 | rawp = (unsigned short *)(vhdr + 1); |
662 | if (*rawp == 0xFFFF) |
663 | /* |
664 | * This is a magic hack to spot IPX packets. Older Novell |
665 | * breaks the protocol design and runs IPX over 802.3 without |
666 | * an 802.2 LLC layer. We look for FFFF which isn't a used |
667 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware |
668 | * but does for the rest. |
669 | */ |
670 | skb->protocol = htons(ETH_P_802_3); |
671 | else |
672 | /* |
673 | * Real 802.2 LLC |
674 | */ |
675 | skb->protocol = htons(ETH_P_802_2); |
676 | } |
677 | |
678 | /** |
679 | * skb_vlan_tagged - check if skb is vlan tagged. |
680 | * @skb: skbuff to query |
681 | * |
682 | * Returns true if the skb is tagged, regardless of whether it is hardware |
683 | * accelerated or not. |
684 | */ |
685 | static inline bool skb_vlan_tagged(const struct sk_buff *skb) |
686 | { |
687 | if (!skb_vlan_tag_present(skb) && |
688 | likely(!eth_type_vlan(skb->protocol))) |
689 | return false; |
690 | |
691 | return true; |
692 | } |
693 | |
694 | /** |
695 | * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. |
696 | * @skb: skbuff to query |
697 | * |
698 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
699 | * of whether it is hardware accelerated or not. |
700 | */ |
701 | static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
702 | { |
703 | __be16 protocol = skb->protocol; |
704 | |
705 | if (!skb_vlan_tag_present(skb)) { |
706 | struct vlan_ethhdr *veh; |
707 | |
708 | if (likely(!eth_type_vlan(protocol))) |
709 | return false; |
710 | |
711 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) |
712 | return false; |
713 | |
714 | veh = (struct vlan_ethhdr *)skb->data; |
715 | protocol = veh->h_vlan_encapsulated_proto; |
716 | } |
717 | |
718 | if (!eth_type_vlan(protocol)) |
719 | return false; |
720 | |
721 | return true; |
722 | } |
723 | |
724 | /** |
725 | * vlan_features_check - drop unsafe features for skb with multiple tags. |
726 | * @skb: skbuff to query |
727 | * @features: features to be checked |
728 | * |
729 | * Returns features without unsafe ones if the skb has multiple tags. |
730 | */ |
731 | static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
732 | netdev_features_t features) |
733 | { |
734 | if (skb_vlan_tagged_multi(skb)) { |
735 | /* In the case of multi-tagged packets, use a direct mask |
736 | * instead of using netdev_interesect_features(), to make |
737 | * sure that only devices supporting NETIF_F_HW_CSUM will |
738 | * have checksum offloading support. |
739 | */ |
740 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | |
741 | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | |
742 | NETIF_F_HW_VLAN_STAG_TX; |
743 | } |
744 | |
745 | return features; |
746 | } |
747 | |
748 | /** |
749 | * compare_vlan_header - Compare two vlan headers |
750 | * @h1: Pointer to vlan header |
751 | * @h2: Pointer to vlan header |
752 | * |
753 | * Compare two vlan headers, returns 0 if equal. |
754 | * |
755 | * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. |
756 | */ |
757 | static inline unsigned long (const struct vlan_hdr *h1, |
758 | const struct vlan_hdr *h2) |
759 | { |
760 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
761 | return *(u32 *)h1 ^ *(u32 *)h2; |
762 | #else |
763 | return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | |
764 | ((__force u32)h1->h_vlan_encapsulated_proto ^ |
765 | (__force u32)h2->h_vlan_encapsulated_proto); |
766 | #endif |
767 | } |
768 | #endif /* !(_LINUX_IF_VLAN_H_) */ |
769 | |