1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | |
3 | #ifndef __DSA_TAG_H |
4 | #define __DSA_TAG_H |
5 | |
6 | #include <linux/if_vlan.h> |
7 | #include <linux/list.h> |
8 | #include <linux/types.h> |
9 | #include <net/dsa.h> |
10 | |
11 | #include "port.h" |
12 | #include "user.h" |
13 | |
14 | struct dsa_tag_driver { |
15 | const struct dsa_device_ops *ops; |
16 | struct list_head list; |
17 | struct module *owner; |
18 | }; |
19 | |
20 | extern struct packet_type dsa_pack_type; |
21 | |
22 | const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol); |
23 | const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name); |
24 | void dsa_tag_driver_put(const struct dsa_device_ops *ops); |
25 | const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops); |
26 | |
27 | static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops) |
28 | { |
29 | return ops->needed_headroom + ops->needed_tailroom; |
30 | } |
31 | |
32 | static inline struct net_device *dsa_conduit_find_user(struct net_device *dev, |
33 | int device, int port) |
34 | { |
35 | struct dsa_port *cpu_dp = dev->dsa_ptr; |
36 | struct dsa_switch_tree *dst = cpu_dp->dst; |
37 | struct dsa_port *dp; |
38 | |
39 | list_for_each_entry(dp, &dst->ports, list) |
40 | if (dp->ds->index == device && dp->index == port && |
41 | dp->type == DSA_PORT_TYPE_USER) |
42 | return dp->user; |
43 | |
44 | return NULL; |
45 | } |
46 | |
47 | /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged |
48 | * frames as untagged, since the bridge will not untag them. |
49 | */ |
50 | static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb) |
51 | { |
52 | struct dsa_port *dp = dsa_user_to_port(dev: skb->dev); |
53 | struct net_device *br = dsa_port_bridge_dev_get(dp); |
54 | struct net_device *dev = skb->dev; |
55 | struct net_device *upper_dev; |
56 | u16 vid, pvid, proto; |
57 | int err; |
58 | |
59 | if (!br || br_vlan_enabled(dev: br)) |
60 | return skb; |
61 | |
62 | err = br_vlan_get_proto(dev: br, p_proto: &proto); |
63 | if (err) |
64 | return skb; |
65 | |
66 | /* Move VLAN tag from data to hwaccel */ |
67 | if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) { |
68 | skb = skb_vlan_untag(skb); |
69 | if (!skb) |
70 | return NULL; |
71 | } |
72 | |
73 | if (!skb_vlan_tag_present(skb)) |
74 | return skb; |
75 | |
76 | vid = skb_vlan_tag_get_id(skb); |
77 | |
78 | /* We already run under an RCU read-side critical section since |
79 | * we are called from netif_receive_skb_list_internal(). |
80 | */ |
81 | err = br_vlan_get_pvid_rcu(dev, p_pvid: &pvid); |
82 | if (err) |
83 | return skb; |
84 | |
85 | if (vid != pvid) |
86 | return skb; |
87 | |
88 | /* The sad part about attempting to untag from DSA is that we |
89 | * don't know, unless we check, if the skb will end up in |
90 | * the bridge's data path - br_allowed_ingress() - or not. |
91 | * For example, there might be an 8021q upper for the |
92 | * default_pvid of the bridge, which will steal VLAN-tagged traffic |
93 | * from the bridge's data path. This is a configuration that DSA |
94 | * supports because vlan_filtering is 0. In that case, we should |
95 | * definitely keep the tag, to make sure it keeps working. |
96 | */ |
97 | upper_dev = __vlan_find_dev_deep_rcu(real_dev: br, htons(proto), vlan_id: vid); |
98 | if (upper_dev) |
99 | return skb; |
100 | |
101 | __vlan_hwaccel_clear_tag(skb); |
102 | |
103 | return skb; |
104 | } |
105 | |
106 | /* For switches without hardware support for DSA tagging to be able |
107 | * to support termination through the bridge. |
108 | */ |
109 | static inline struct net_device * |
110 | dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid) |
111 | { |
112 | struct dsa_port *cpu_dp = conduit->dsa_ptr; |
113 | struct dsa_switch_tree *dst = cpu_dp->dst; |
114 | struct bridge_vlan_info vinfo; |
115 | struct net_device *user; |
116 | struct dsa_port *dp; |
117 | int err; |
118 | |
119 | list_for_each_entry(dp, &dst->ports, list) { |
120 | if (dp->type != DSA_PORT_TYPE_USER) |
121 | continue; |
122 | |
123 | if (!dp->bridge) |
124 | continue; |
125 | |
126 | if (dp->stp_state != BR_STATE_LEARNING && |
127 | dp->stp_state != BR_STATE_FORWARDING) |
128 | continue; |
129 | |
130 | /* Since the bridge might learn this packet, keep the CPU port |
131 | * affinity with the port that will be used for the reply on |
132 | * xmit. |
133 | */ |
134 | if (dp->cpu_dp != cpu_dp) |
135 | continue; |
136 | |
137 | user = dp->user; |
138 | |
139 | err = br_vlan_get_info_rcu(dev: user, vid, p_vinfo: &vinfo); |
140 | if (err) |
141 | continue; |
142 | |
143 | return user; |
144 | } |
145 | |
146 | return NULL; |
147 | } |
148 | |
149 | /* If the ingress port offloads the bridge, we mark the frame as autonomously |
150 | * forwarded by hardware, so the software bridge doesn't forward in twice, back |
151 | * to us, because we already did. However, if we're in fallback mode and we do |
152 | * software bridging, we are not offloading it, therefore the dp->bridge |
153 | * pointer is not populated, and flooding needs to be done by software (we are |
154 | * effectively operating in standalone ports mode). |
155 | */ |
156 | static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb) |
157 | { |
158 | struct dsa_port *dp = dsa_user_to_port(dev: skb->dev); |
159 | |
160 | skb->offload_fwd_mark = !!(dp->bridge); |
161 | } |
162 | |
163 | /* Helper for removing DSA header tags from packets in the RX path. |
164 | * Must not be called before skb_pull(len). |
165 | * skb->data |
166 | * | |
167 | * v |
168 | * | | | | | | | | | | | | | | | | | | | |
169 | * +-----------------------+-----------------------+---------------+-------+ |
170 | * | Destination MAC | Source MAC | DSA header | EType | |
171 | * +-----------------------+-----------------------+---------------+-------+ |
172 | * | | |
173 | * <----- len -----> <----- len -----> |
174 | * | |
175 | * >>>>>>> v |
176 | * >>>>>>> | | | | | | | | | | | | | | | |
177 | * >>>>>>> +-----------------------+-----------------------+-------+ |
178 | * >>>>>>> | Destination MAC | Source MAC | EType | |
179 | * +-----------------------+-----------------------+-------+ |
180 | * ^ |
181 | * | |
182 | * skb->data |
183 | */ |
184 | static inline void (struct sk_buff *skb, int len) |
185 | { |
186 | memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN); |
187 | } |
188 | |
189 | /* Helper for creating space for DSA header tags in TX path packets. |
190 | * Must not be called before skb_push(len). |
191 | * |
192 | * Before: |
193 | * |
194 | * <<<<<<< | | | | | | | | | | | | | | | |
195 | * ^ <<<<<<< +-----------------------+-----------------------+-------+ |
196 | * | <<<<<<< | Destination MAC | Source MAC | EType | |
197 | * | +-----------------------+-----------------------+-------+ |
198 | * <----- len -----> |
199 | * | |
200 | * | |
201 | * skb->data |
202 | * |
203 | * After: |
204 | * |
205 | * | | | | | | | | | | | | | | | | | | | |
206 | * +-----------------------+-----------------------+---------------+-------+ |
207 | * | Destination MAC | Source MAC | DSA header | EType | |
208 | * +-----------------------+-----------------------+---------------+-------+ |
209 | * ^ | | |
210 | * | <----- len -----> |
211 | * skb->data |
212 | */ |
213 | static inline void (struct sk_buff *skb, int len) |
214 | { |
215 | memmove(skb->data, skb->data + len, 2 * ETH_ALEN); |
216 | } |
217 | |
218 | /* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from |
219 | * skb_mac_header(skb), which leaves skb->data pointing at the first byte after |
220 | * what the DSA conduit perceives as the EtherType (the beginning of the L3 |
221 | * protocol). Since DSA EtherType header taggers treat the EtherType as part of |
222 | * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header |
223 | * is located 2 bytes behind skb->data. Note that EtherType in this context |
224 | * means the first 2 bytes of the DSA header, not the encapsulated EtherType |
225 | * that will become visible after the DSA header is stripped. |
226 | */ |
227 | static inline void *(struct sk_buff *skb) |
228 | { |
229 | return skb->data - 2; |
230 | } |
231 | |
232 | /* On TX, skb->data points to the MAC header, which means that EtherType |
233 | * header taggers start exactly where the EtherType is (the EtherType is |
234 | * treated as part of the DSA header). |
235 | */ |
236 | static inline void *(struct sk_buff *skb) |
237 | { |
238 | return skb->data + 2 * ETH_ALEN; |
239 | } |
240 | |
241 | /* Create 2 modaliases per tagging protocol, one to auto-load the module |
242 | * given the ID reported by get_tag_protocol(), and the other by name. |
243 | */ |
244 | #define DSA_TAG_DRIVER_ALIAS "dsa_tag:" |
245 | #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \ |
246 | MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \ |
247 | MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \ |
248 | __stringify(__proto##_VALUE)) |
249 | |
250 | void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[], |
251 | unsigned int count, |
252 | struct module *owner); |
253 | void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[], |
254 | unsigned int count); |
255 | |
256 | #define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \ |
257 | static int __init dsa_tag_driver_module_init(void) \ |
258 | { \ |
259 | dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \ |
260 | THIS_MODULE); \ |
261 | return 0; \ |
262 | } \ |
263 | module_init(dsa_tag_driver_module_init); \ |
264 | \ |
265 | static void __exit dsa_tag_driver_module_exit(void) \ |
266 | { \ |
267 | dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \ |
268 | } \ |
269 | module_exit(dsa_tag_driver_module_exit) |
270 | |
271 | /** |
272 | * module_dsa_tag_drivers() - Helper macro for registering DSA tag |
273 | * drivers |
274 | * @__ops_array: Array of tag driver structures |
275 | * |
276 | * Helper macro for DSA tag drivers which do not do anything special |
277 | * in module init/exit. Each module may only use this macro once, and |
278 | * calling it replaces module_init() and module_exit(). |
279 | */ |
280 | #define module_dsa_tag_drivers(__ops_array) \ |
281 | dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array)) |
282 | |
283 | #define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops |
284 | |
285 | /* Create a static structure we can build a linked list of dsa_tag |
286 | * drivers |
287 | */ |
288 | #define DSA_TAG_DRIVER(__ops) \ |
289 | static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \ |
290 | .ops = &__ops, \ |
291 | } |
292 | |
293 | /** |
294 | * module_dsa_tag_driver() - Helper macro for registering a single DSA tag |
295 | * driver |
296 | * @__ops: Single tag driver structures |
297 | * |
298 | * Helper macro for DSA tag drivers which do not do anything special |
299 | * in module init/exit. Each module may only use this macro once, and |
300 | * calling it replaces module_init() and module_exit(). |
301 | */ |
302 | #define module_dsa_tag_driver(__ops) \ |
303 | DSA_TAG_DRIVER(__ops); \ |
304 | \ |
305 | static struct dsa_tag_driver *dsa_tag_driver_array[] = { \ |
306 | &DSA_TAG_DRIVER_NAME(__ops) \ |
307 | }; \ |
308 | module_dsa_tag_drivers(dsa_tag_driver_array) |
309 | |
310 | #endif |
311 | |