1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /****************************************************************************** |
3 | |
4 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. |
5 | |
6 | |
7 | Contact Information: |
8 | Intel Linux Wireless <ilw@linux.intel.com> |
9 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
10 | |
11 | ******************************************************************************/ |
12 | #include <linux/compiler.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/if_arp.h> |
15 | #include <linux/in6.h> |
16 | #include <linux/in.h> |
17 | #include <linux/ip.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> |
20 | #include <linux/netdevice.h> |
21 | #include <linux/proc_fs.h> |
22 | #include <linux/skbuff.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/tcp.h> |
25 | #include <linux/types.h> |
26 | #include <linux/wireless.h> |
27 | #include <linux/etherdevice.h> |
28 | #include <linux/uaccess.h> |
29 | |
30 | #include "libipw.h" |
31 | |
32 | /* |
33 | |
34 | 802.11 Data Frame |
35 | |
36 | ,-------------------------------------------------------------------. |
37 | Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |
38 | |------|------|---------|---------|---------|------|---------|------| |
39 | Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | |
40 | | | tion | (BSSID) | | | ence | data | | |
41 | `--------------------------------------------------| |------' |
42 | Total: 28 non-data bytes `----.----' |
43 | | |
44 | .- 'Frame data' expands, if WEP enabled, to <----------' |
45 | | |
46 | V |
47 | ,-----------------------. |
48 | Bytes | 4 | 0-2296 | 4 | |
49 | |-----|-----------|-----| |
50 | Desc. | IV | Encrypted | ICV | |
51 | | | Packet | | |
52 | `-----| |-----' |
53 | `-----.-----' |
54 | | |
55 | .- 'Encrypted Packet' expands to |
56 | | |
57 | V |
58 | ,---------------------------------------------------. |
59 | Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |
60 | |------|------|---------|----------|------|---------| |
61 | Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | |
62 | | DSAP | SSAP | | | | Packet | |
63 | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | |
64 | `---------------------------------------------------- |
65 | Total: 8 non-data bytes |
66 | |
67 | 802.3 Ethernet Data Frame |
68 | |
69 | ,-----------------------------------------. |
70 | Bytes | 6 | 6 | 2 | Variable | 4 | |
71 | |-------|-------|------|-----------|------| |
72 | Desc. | Dest. | Source| Type | IP Packet | fcs | |
73 | | MAC | MAC | | | | |
74 | `-----------------------------------------' |
75 | Total: 18 non-data bytes |
76 | |
77 | In the event that fragmentation is required, the incoming payload is split into |
78 | N parts of size ieee->fts. The first fragment contains the SNAP header and the |
79 | remaining packets are just data. |
80 | |
81 | If encryption is enabled, each fragment payload size is reduced by enough space |
82 | to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) |
83 | So if you have 1500 bytes of payload with ieee->fts set to 500 without |
84 | encryption it will take 3 frames. With WEP it will take 4 frames as the |
85 | payload of each frame is reduced to 492 bytes. |
86 | |
87 | * SKB visualization |
88 | * |
89 | * ,- skb->data |
90 | * | |
91 | * | ETHERNET HEADER ,-<-- PAYLOAD |
92 | * | | 14 bytes from skb->data |
93 | * | 2 bytes for Type --> ,T. | (sizeof ethhdr) |
94 | * | | | | |
95 | * |,-Dest.--. ,--Src.---. | | | |
96 | * | 6 bytes| | 6 bytes | | | | |
97 | * v | | | | | | |
98 | * 0 | v 1 | v | v 2 |
99 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 |
100 | * ^ | ^ | ^ | |
101 | * | | | | | | |
102 | * | | | | `T' <---- 2 bytes for Type |
103 | * | | | | |
104 | * | | '---SNAP--' <-------- 6 bytes for SNAP |
105 | * | | |
106 | * `-IV--' <-------------------- 4 bytes for IV (WEP) |
107 | * |
108 | * SNAP HEADER |
109 | * |
110 | */ |
111 | |
112 | static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; |
113 | static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; |
114 | |
115 | static int libipw_copy_snap(u8 * data, __be16 h_proto) |
116 | { |
117 | struct libipw_snap_hdr *snap; |
118 | u8 *oui; |
119 | |
120 | snap = (struct libipw_snap_hdr *)data; |
121 | snap->dsap = 0xaa; |
122 | snap->ssap = 0xaa; |
123 | snap->ctrl = 0x03; |
124 | |
125 | if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX)) |
126 | oui = P802_1H_OUI; |
127 | else |
128 | oui = RFC1042_OUI; |
129 | snap->oui[0] = oui[0]; |
130 | snap->oui[1] = oui[1]; |
131 | snap->oui[2] = oui[2]; |
132 | |
133 | memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16)); |
134 | |
135 | return SNAP_SIZE + sizeof(u16); |
136 | } |
137 | |
138 | static int libipw_encrypt_fragment(struct libipw_device *ieee, |
139 | struct sk_buff *frag, int hdr_len) |
140 | { |
141 | struct lib80211_crypt_data *crypt = |
142 | ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; |
143 | int res; |
144 | |
145 | if (crypt == NULL) |
146 | return -1; |
147 | |
148 | /* To encrypt, frame format is: |
149 | * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ |
150 | atomic_inc(v: &crypt->refcnt); |
151 | res = 0; |
152 | if (crypt->ops && crypt->ops->encrypt_mpdu) |
153 | res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); |
154 | |
155 | atomic_dec(v: &crypt->refcnt); |
156 | if (res < 0) { |
157 | printk(KERN_INFO "%s: Encryption failed: len=%d.\n" , |
158 | ieee->dev->name, frag->len); |
159 | ieee->ieee_stats.tx_discards++; |
160 | return -1; |
161 | } |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | void libipw_txb_free(struct libipw_txb *txb) |
167 | { |
168 | int i; |
169 | if (unlikely(!txb)) |
170 | return; |
171 | for (i = 0; i < txb->nr_frags; i++) |
172 | if (txb->fragments[i]) |
173 | dev_kfree_skb_any(skb: txb->fragments[i]); |
174 | kfree(objp: txb); |
175 | } |
176 | |
177 | static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, |
178 | int headroom, gfp_t gfp_mask) |
179 | { |
180 | struct libipw_txb *txb; |
181 | int i; |
182 | |
183 | txb = kmalloc(struct_size(txb, fragments, nr_frags), flags: gfp_mask); |
184 | if (!txb) |
185 | return NULL; |
186 | |
187 | memset(txb, 0, sizeof(struct libipw_txb)); |
188 | txb->nr_frags = nr_frags; |
189 | txb->frag_size = txb_size; |
190 | |
191 | for (i = 0; i < nr_frags; i++) { |
192 | txb->fragments[i] = __dev_alloc_skb(length: txb_size + headroom, |
193 | gfp_mask); |
194 | if (unlikely(!txb->fragments[i])) { |
195 | i--; |
196 | break; |
197 | } |
198 | skb_reserve(skb: txb->fragments[i], len: headroom); |
199 | } |
200 | if (unlikely(i != nr_frags)) { |
201 | while (i >= 0) |
202 | dev_kfree_skb_any(skb: txb->fragments[i--]); |
203 | kfree(objp: txb); |
204 | return NULL; |
205 | } |
206 | return txb; |
207 | } |
208 | |
209 | static int libipw_classify(struct sk_buff *skb) |
210 | { |
211 | struct ethhdr *eth; |
212 | struct iphdr *ip; |
213 | |
214 | eth = (struct ethhdr *)skb->data; |
215 | if (eth->h_proto != htons(ETH_P_IP)) |
216 | return 0; |
217 | |
218 | ip = ip_hdr(skb); |
219 | switch (ip->tos & 0xfc) { |
220 | case 0x20: |
221 | return 2; |
222 | case 0x40: |
223 | return 1; |
224 | case 0x60: |
225 | return 3; |
226 | case 0x80: |
227 | return 4; |
228 | case 0xa0: |
229 | return 5; |
230 | case 0xc0: |
231 | return 6; |
232 | case 0xe0: |
233 | return 7; |
234 | default: |
235 | return 0; |
236 | } |
237 | } |
238 | |
239 | /* Incoming skb is converted to a txb which consists of |
240 | * a block of 802.11 fragment packets (stored as skbs) */ |
241 | netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) |
242 | { |
243 | struct libipw_device *ieee = netdev_priv(dev); |
244 | struct libipw_txb *txb = NULL; |
245 | struct libipw_hdr_3addrqos *frag_hdr; |
246 | int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, |
247 | rts_required; |
248 | unsigned long flags; |
249 | int encrypt, host_encrypt, host_encrypt_msdu; |
250 | __be16 ether_type; |
251 | int bytes, fc, hdr_len; |
252 | struct sk_buff *skb_frag; |
253 | struct libipw_hdr_3addrqos = {/* Ensure zero initialized */ |
254 | .duration_id = 0, |
255 | .seq_ctl = 0, |
256 | .qos_ctl = 0 |
257 | }; |
258 | u8 dest[ETH_ALEN], src[ETH_ALEN]; |
259 | struct lib80211_crypt_data *crypt; |
260 | int priority = skb->priority; |
261 | int snapped = 0; |
262 | |
263 | if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) |
264 | return NETDEV_TX_BUSY; |
265 | |
266 | spin_lock_irqsave(&ieee->lock, flags); |
267 | |
268 | /* If there is no driver handler to take the TXB, dont' bother |
269 | * creating it... */ |
270 | if (!ieee->hard_start_xmit) { |
271 | printk(KERN_WARNING "%s: No xmit handler.\n" , ieee->dev->name); |
272 | goto success; |
273 | } |
274 | |
275 | if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { |
276 | printk(KERN_WARNING "%s: skb too small (%d).\n" , |
277 | ieee->dev->name, skb->len); |
278 | goto success; |
279 | } |
280 | |
281 | ether_type = ((struct ethhdr *)skb->data)->h_proto; |
282 | |
283 | crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; |
284 | |
285 | encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) && |
286 | ieee->sec.encrypt; |
287 | |
288 | host_encrypt = ieee->host_encrypt && encrypt && crypt; |
289 | host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; |
290 | |
291 | if (!encrypt && ieee->ieee802_1x && |
292 | ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) { |
293 | dev->stats.tx_dropped++; |
294 | goto success; |
295 | } |
296 | |
297 | /* Save source and destination addresses */ |
298 | skb_copy_from_linear_data(skb, to: dest, ETH_ALEN); |
299 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, to: src, ETH_ALEN); |
300 | |
301 | if (host_encrypt) |
302 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | |
303 | IEEE80211_FCTL_PROTECTED; |
304 | else |
305 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; |
306 | |
307 | if (ieee->iw_mode == IW_MODE_INFRA) { |
308 | fc |= IEEE80211_FCTL_TODS; |
309 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ |
310 | memcpy(header.addr1, ieee->bssid, ETH_ALEN); |
311 | memcpy(header.addr2, src, ETH_ALEN); |
312 | memcpy(header.addr3, dest, ETH_ALEN); |
313 | } else if (ieee->iw_mode == IW_MODE_ADHOC) { |
314 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ |
315 | memcpy(header.addr1, dest, ETH_ALEN); |
316 | memcpy(header.addr2, src, ETH_ALEN); |
317 | memcpy(header.addr3, ieee->bssid, ETH_ALEN); |
318 | } |
319 | hdr_len = LIBIPW_3ADDR_LEN; |
320 | |
321 | if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) { |
322 | fc |= IEEE80211_STYPE_QOS_DATA; |
323 | hdr_len += 2; |
324 | |
325 | skb->priority = libipw_classify(skb); |
326 | header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID); |
327 | } |
328 | header.frame_ctl = cpu_to_le16(fc); |
329 | |
330 | /* Advance the SKB to the start of the payload */ |
331 | skb_pull(skb, len: sizeof(struct ethhdr)); |
332 | |
333 | /* Determine total amount of storage required for TXB packets */ |
334 | bytes = skb->len + SNAP_SIZE + sizeof(u16); |
335 | |
336 | /* Encrypt msdu first on the whole data packet. */ |
337 | if ((host_encrypt || host_encrypt_msdu) && |
338 | crypt && crypt->ops && crypt->ops->encrypt_msdu) { |
339 | int res = 0; |
340 | int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + |
341 | crypt->ops->extra_msdu_postfix_len; |
342 | struct sk_buff *skb_new = dev_alloc_skb(length: len); |
343 | |
344 | if (unlikely(!skb_new)) |
345 | goto failed; |
346 | |
347 | skb_reserve(skb: skb_new, len: crypt->ops->extra_msdu_prefix_len); |
348 | skb_put_data(skb: skb_new, data: &header, len: hdr_len); |
349 | snapped = 1; |
350 | libipw_copy_snap(data: skb_put(skb: skb_new, SNAP_SIZE + sizeof(u16)), |
351 | h_proto: ether_type); |
352 | skb_copy_from_linear_data(skb, to: skb_put(skb: skb_new, len: skb->len), len: skb->len); |
353 | res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); |
354 | if (res < 0) { |
355 | LIBIPW_ERROR("msdu encryption failed\n" ); |
356 | dev_kfree_skb_any(skb: skb_new); |
357 | goto failed; |
358 | } |
359 | dev_kfree_skb_any(skb); |
360 | skb = skb_new; |
361 | bytes += crypt->ops->extra_msdu_prefix_len + |
362 | crypt->ops->extra_msdu_postfix_len; |
363 | skb_pull(skb, len: hdr_len); |
364 | } |
365 | |
366 | if (host_encrypt || ieee->host_open_frag) { |
367 | /* Determine fragmentation size based on destination (multicast |
368 | * and broadcast are not fragmented) */ |
369 | if (is_multicast_ether_addr(addr: dest) || |
370 | is_broadcast_ether_addr(addr: dest)) |
371 | frag_size = MAX_FRAG_THRESHOLD; |
372 | else |
373 | frag_size = ieee->fts; |
374 | |
375 | /* Determine amount of payload per fragment. Regardless of if |
376 | * this stack is providing the full 802.11 header, one will |
377 | * eventually be affixed to this fragment -- so we must account |
378 | * for it when determining the amount of payload space. */ |
379 | bytes_per_frag = frag_size - hdr_len; |
380 | if (ieee->config & |
381 | (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) |
382 | bytes_per_frag -= LIBIPW_FCS_LEN; |
383 | |
384 | /* Each fragment may need to have room for encryption |
385 | * pre/postfix */ |
386 | if (host_encrypt && crypt && crypt->ops) |
387 | bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + |
388 | crypt->ops->extra_mpdu_postfix_len; |
389 | |
390 | /* Number of fragments is the total |
391 | * bytes_per_frag / payload_per_fragment */ |
392 | nr_frags = bytes / bytes_per_frag; |
393 | bytes_last_frag = bytes % bytes_per_frag; |
394 | if (bytes_last_frag) |
395 | nr_frags++; |
396 | else |
397 | bytes_last_frag = bytes_per_frag; |
398 | } else { |
399 | nr_frags = 1; |
400 | bytes_per_frag = bytes_last_frag = bytes; |
401 | frag_size = bytes + hdr_len; |
402 | } |
403 | |
404 | rts_required = (frag_size > ieee->rts |
405 | && ieee->config & CFG_LIBIPW_RTS); |
406 | if (rts_required) |
407 | nr_frags++; |
408 | |
409 | /* When we allocate the TXB we allocate enough space for the reserve |
410 | * and full fragment bytes (bytes_per_frag doesn't include prefix, |
411 | * postfix, header, FCS, etc.) */ |
412 | txb = libipw_alloc_txb(nr_frags, txb_size: frag_size, |
413 | headroom: ieee->tx_headroom, GFP_ATOMIC); |
414 | if (unlikely(!txb)) { |
415 | printk(KERN_WARNING "%s: Could not allocate TXB\n" , |
416 | ieee->dev->name); |
417 | goto failed; |
418 | } |
419 | txb->encrypted = encrypt; |
420 | if (host_encrypt) |
421 | txb->payload_size = frag_size * (nr_frags - 1) + |
422 | bytes_last_frag; |
423 | else |
424 | txb->payload_size = bytes; |
425 | |
426 | if (rts_required) { |
427 | skb_frag = txb->fragments[0]; |
428 | frag_hdr = skb_put(skb: skb_frag, len: hdr_len); |
429 | |
430 | /* |
431 | * Set header frame_ctl to the RTS. |
432 | */ |
433 | header.frame_ctl = |
434 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); |
435 | memcpy(frag_hdr, &header, hdr_len); |
436 | |
437 | /* |
438 | * Restore header frame_ctl to the original data setting. |
439 | */ |
440 | header.frame_ctl = cpu_to_le16(fc); |
441 | |
442 | if (ieee->config & |
443 | (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) |
444 | skb_put(skb: skb_frag, len: 4); |
445 | |
446 | txb->rts_included = 1; |
447 | i = 1; |
448 | } else |
449 | i = 0; |
450 | |
451 | for (; i < nr_frags; i++) { |
452 | skb_frag = txb->fragments[i]; |
453 | |
454 | if (host_encrypt) |
455 | skb_reserve(skb: skb_frag, |
456 | len: crypt->ops->extra_mpdu_prefix_len); |
457 | |
458 | frag_hdr = skb_put_data(skb: skb_frag, data: &header, len: hdr_len); |
459 | |
460 | /* If this is not the last fragment, then add the MOREFRAGS |
461 | * bit to the frame control */ |
462 | if (i != nr_frags - 1) { |
463 | frag_hdr->frame_ctl = |
464 | cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS); |
465 | bytes = bytes_per_frag; |
466 | } else { |
467 | /* The last fragment takes the remaining length */ |
468 | bytes = bytes_last_frag; |
469 | } |
470 | |
471 | if (i == 0 && !snapped) { |
472 | libipw_copy_snap(data: skb_put |
473 | (skb: skb_frag, SNAP_SIZE + sizeof(u16)), |
474 | h_proto: ether_type); |
475 | bytes -= SNAP_SIZE + sizeof(u16); |
476 | } |
477 | |
478 | skb_copy_from_linear_data(skb, to: skb_put(skb: skb_frag, len: bytes), len: bytes); |
479 | |
480 | /* Advance the SKB... */ |
481 | skb_pull(skb, len: bytes); |
482 | |
483 | /* Encryption routine will move the header forward in order |
484 | * to insert the IV between the header and the payload */ |
485 | if (host_encrypt) |
486 | libipw_encrypt_fragment(ieee, frag: skb_frag, hdr_len); |
487 | |
488 | if (ieee->config & |
489 | (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) |
490 | skb_put(skb: skb_frag, len: 4); |
491 | } |
492 | |
493 | success: |
494 | spin_unlock_irqrestore(lock: &ieee->lock, flags); |
495 | |
496 | dev_kfree_skb_any(skb); |
497 | |
498 | if (txb) { |
499 | netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority); |
500 | if (ret == NETDEV_TX_OK) { |
501 | dev->stats.tx_packets++; |
502 | dev->stats.tx_bytes += txb->payload_size; |
503 | return NETDEV_TX_OK; |
504 | } |
505 | |
506 | libipw_txb_free(txb); |
507 | } |
508 | |
509 | return NETDEV_TX_OK; |
510 | |
511 | failed: |
512 | spin_unlock_irqrestore(lock: &ieee->lock, flags); |
513 | netif_stop_queue(dev); |
514 | dev->stats.tx_errors++; |
515 | return NETDEV_TX_BUSY; |
516 | } |
517 | EXPORT_SYMBOL(libipw_xmit); |
518 | |
519 | EXPORT_SYMBOL(libipw_txb_free); |
520 | |