1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> |
4 | */ |
5 | |
6 | #include "mt7601u.h" |
7 | #include "dma.h" |
8 | #include "usb.h" |
9 | #include "trace.h" |
10 | |
11 | static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, |
12 | struct mt7601u_dma_buf_rx *e, gfp_t gfp); |
13 | |
14 | static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) |
15 | { |
16 | const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; |
17 | unsigned int hdrlen; |
18 | |
19 | if (unlikely(len < 10)) |
20 | return 0; |
21 | hdrlen = ieee80211_hdrlen(fc: hdr->frame_control); |
22 | if (unlikely(hdrlen > len)) |
23 | return 0; |
24 | return hdrlen; |
25 | } |
26 | |
27 | static struct sk_buff * |
28 | mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, |
29 | void *data, u32 seg_len, u32 truesize, struct page *p) |
30 | { |
31 | struct sk_buff *skb; |
32 | u32 true_len, hdr_len = 0, copy, frag; |
33 | |
34 | skb = alloc_skb(size: p ? 128 : seg_len, GFP_ATOMIC); |
35 | if (!skb) |
36 | return NULL; |
37 | |
38 | true_len = mt76_mac_process_rx(dev, skb, data, rxi: rxwi); |
39 | if (!true_len || true_len > seg_len) |
40 | goto bad_frame; |
41 | |
42 | hdr_len = ieee80211_get_hdrlen_from_buf(data, len: true_len); |
43 | if (!hdr_len) |
44 | goto bad_frame; |
45 | |
46 | if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { |
47 | skb_put_data(skb, data, len: hdr_len); |
48 | |
49 | data += hdr_len + 2; |
50 | true_len -= hdr_len; |
51 | hdr_len = 0; |
52 | } |
53 | |
54 | /* If not doing paged RX allocated skb will always have enough space */ |
55 | copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; |
56 | frag = true_len - copy; |
57 | |
58 | skb_put_data(skb, data, len: copy); |
59 | data += copy; |
60 | |
61 | if (frag) { |
62 | skb_add_rx_frag(skb, i: 0, page: p, off: data - page_address(p), |
63 | size: frag, truesize); |
64 | get_page(page: p); |
65 | } |
66 | |
67 | return skb; |
68 | |
69 | bad_frame: |
70 | dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n" , |
71 | true_len, hdr_len); |
72 | dev_kfree_skb(skb); |
73 | return NULL; |
74 | } |
75 | |
76 | static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, |
77 | u32 seg_len, struct page *p, |
78 | struct list_head *list) |
79 | { |
80 | struct sk_buff *skb; |
81 | struct mt7601u_rxwi *rxwi; |
82 | u32 fce_info, truesize = seg_len; |
83 | |
84 | /* DMA_INFO field at the beginning of the segment contains only some of |
85 | * the information, we need to read the FCE descriptor from the end. |
86 | */ |
87 | fce_info = get_unaligned_le32(p: data + seg_len - MT_FCE_INFO_LEN); |
88 | seg_len -= MT_FCE_INFO_LEN; |
89 | |
90 | data += MT_DMA_HDR_LEN; |
91 | seg_len -= MT_DMA_HDR_LEN; |
92 | |
93 | rxwi = (struct mt7601u_rxwi *) data; |
94 | data += sizeof(struct mt7601u_rxwi); |
95 | seg_len -= sizeof(struct mt7601u_rxwi); |
96 | |
97 | if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) |
98 | dev_err_once(dev->dev, "Error: RXWI zero fields are set\n" ); |
99 | if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) |
100 | dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n" ); |
101 | |
102 | trace_mt_rx(dev, rxwi, f: fce_info); |
103 | |
104 | skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); |
105 | if (!skb) |
106 | return; |
107 | |
108 | local_bh_disable(); |
109 | rcu_read_lock(); |
110 | |
111 | ieee80211_rx_list(hw: dev->hw, NULL, skb, list); |
112 | |
113 | rcu_read_unlock(); |
114 | local_bh_enable(); |
115 | } |
116 | |
117 | static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) |
118 | { |
119 | u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + |
120 | sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN; |
121 | u16 dma_len = get_unaligned_le16(p: data); |
122 | |
123 | if (data_len < min_seg_len || |
124 | WARN_ON_ONCE(!dma_len) || |
125 | WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) || |
126 | WARN_ON_ONCE(dma_len & 0x3) || |
127 | WARN_ON_ONCE(dma_len < min_seg_len)) |
128 | return 0; |
129 | |
130 | return MT_DMA_HDRS + dma_len; |
131 | } |
132 | |
133 | static void |
134 | mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) |
135 | { |
136 | u32 seg_len, data_len = e->urb->actual_length; |
137 | u8 *data = page_address(e->p); |
138 | struct page *new_p = NULL; |
139 | LIST_HEAD(list); |
140 | int cnt = 0; |
141 | |
142 | if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) |
143 | return; |
144 | |
145 | /* Copy if there is very little data in the buffer. */ |
146 | if (data_len > 512) |
147 | new_p = dev_alloc_pages(MT_RX_ORDER); |
148 | |
149 | while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { |
150 | mt7601u_rx_process_seg(dev, data, seg_len, |
151 | p: new_p ? e->p : NULL, list: &list); |
152 | |
153 | data_len -= seg_len; |
154 | data += seg_len; |
155 | cnt++; |
156 | } |
157 | |
158 | if (cnt > 1) |
159 | trace_mt_rx_dma_aggr(dev, cnt, paged: !!new_p); |
160 | |
161 | netif_receive_skb_list(head: &list); |
162 | |
163 | if (new_p) { |
164 | /* we have one extra ref from the allocator */ |
165 | put_page(page: e->p); |
166 | e->p = new_p; |
167 | } |
168 | } |
169 | |
170 | static struct mt7601u_dma_buf_rx * |
171 | mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev) |
172 | { |
173 | struct mt7601u_rx_queue *q = &dev->rx_q; |
174 | struct mt7601u_dma_buf_rx *buf = NULL; |
175 | unsigned long flags; |
176 | |
177 | spin_lock_irqsave(&dev->rx_lock, flags); |
178 | |
179 | if (!q->pending) |
180 | goto out; |
181 | |
182 | buf = &q->e[q->start]; |
183 | q->pending--; |
184 | q->start = (q->start + 1) % q->entries; |
185 | out: |
186 | spin_unlock_irqrestore(lock: &dev->rx_lock, flags); |
187 | |
188 | return buf; |
189 | } |
190 | |
191 | static void mt7601u_complete_rx(struct urb *urb) |
192 | { |
193 | struct mt7601u_dev *dev = urb->context; |
194 | struct mt7601u_rx_queue *q = &dev->rx_q; |
195 | unsigned long flags; |
196 | |
197 | /* do no schedule rx tasklet if urb has been unlinked |
198 | * or the device has been removed |
199 | */ |
200 | switch (urb->status) { |
201 | case -ECONNRESET: |
202 | case -ESHUTDOWN: |
203 | case -ENOENT: |
204 | case -EPROTO: |
205 | return; |
206 | default: |
207 | dev_err_ratelimited(dev->dev, "rx urb failed: %d\n" , |
208 | urb->status); |
209 | fallthrough; |
210 | case 0: |
211 | break; |
212 | } |
213 | |
214 | spin_lock_irqsave(&dev->rx_lock, flags); |
215 | if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch" )) |
216 | goto out; |
217 | |
218 | q->end = (q->end + 1) % q->entries; |
219 | q->pending++; |
220 | tasklet_schedule(t: &dev->rx_tasklet); |
221 | out: |
222 | spin_unlock_irqrestore(lock: &dev->rx_lock, flags); |
223 | } |
224 | |
225 | static void mt7601u_rx_tasklet(struct tasklet_struct *t) |
226 | { |
227 | struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet); |
228 | struct mt7601u_dma_buf_rx *e; |
229 | |
230 | while ((e = mt7601u_rx_get_pending_entry(dev))) { |
231 | if (e->urb->status) |
232 | continue; |
233 | |
234 | mt7601u_rx_process_entry(dev, e); |
235 | mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC); |
236 | } |
237 | } |
238 | |
239 | static void mt7601u_complete_tx(struct urb *urb) |
240 | { |
241 | struct mt7601u_tx_queue *q = urb->context; |
242 | struct mt7601u_dev *dev = q->dev; |
243 | struct sk_buff *skb; |
244 | unsigned long flags; |
245 | |
246 | switch (urb->status) { |
247 | case -ECONNRESET: |
248 | case -ESHUTDOWN: |
249 | case -ENOENT: |
250 | case -EPROTO: |
251 | return; |
252 | default: |
253 | dev_err_ratelimited(dev->dev, "tx urb failed: %d\n" , |
254 | urb->status); |
255 | fallthrough; |
256 | case 0: |
257 | break; |
258 | } |
259 | |
260 | spin_lock_irqsave(&dev->tx_lock, flags); |
261 | if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch" )) |
262 | goto out; |
263 | |
264 | skb = q->e[q->start].skb; |
265 | q->e[q->start].skb = NULL; |
266 | trace_mt_tx_dma_done(dev, skb); |
267 | |
268 | __skb_queue_tail(list: &dev->tx_skb_done, newsk: skb); |
269 | tasklet_schedule(t: &dev->tx_tasklet); |
270 | |
271 | if (q->used == q->entries - q->entries / 8) |
272 | ieee80211_wake_queue(hw: dev->hw, queue: skb_get_queue_mapping(skb)); |
273 | |
274 | q->start = (q->start + 1) % q->entries; |
275 | q->used--; |
276 | out: |
277 | spin_unlock_irqrestore(lock: &dev->tx_lock, flags); |
278 | } |
279 | |
280 | static void mt7601u_tx_tasklet(struct tasklet_struct *t) |
281 | { |
282 | struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet); |
283 | struct sk_buff_head skbs; |
284 | unsigned long flags; |
285 | |
286 | __skb_queue_head_init(list: &skbs); |
287 | |
288 | spin_lock_irqsave(&dev->tx_lock, flags); |
289 | |
290 | set_bit(nr: MT7601U_STATE_MORE_STATS, addr: &dev->state); |
291 | if (!test_and_set_bit(nr: MT7601U_STATE_READING_STATS, addr: &dev->state)) |
292 | queue_delayed_work(wq: dev->stat_wq, dwork: &dev->stat_work, |
293 | delay: msecs_to_jiffies(m: 10)); |
294 | |
295 | skb_queue_splice_init(list: &dev->tx_skb_done, head: &skbs); |
296 | |
297 | spin_unlock_irqrestore(lock: &dev->tx_lock, flags); |
298 | |
299 | while (!skb_queue_empty(list: &skbs)) { |
300 | struct sk_buff *skb = __skb_dequeue(list: &skbs); |
301 | |
302 | mt7601u_tx_status(dev, skb); |
303 | } |
304 | } |
305 | |
306 | static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, |
307 | struct sk_buff *skb, u8 ep) |
308 | { |
309 | struct usb_device *usb_dev = mt7601u_to_usb_dev(mt7601u: dev); |
310 | unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); |
311 | struct mt7601u_dma_buf_tx *e; |
312 | struct mt7601u_tx_queue *q = &dev->tx_q[ep]; |
313 | unsigned long flags; |
314 | int ret; |
315 | |
316 | spin_lock_irqsave(&dev->tx_lock, flags); |
317 | |
318 | if (WARN_ON(q->entries <= q->used)) { |
319 | ret = -ENOSPC; |
320 | goto out; |
321 | } |
322 | |
323 | e = &q->e[q->end]; |
324 | usb_fill_bulk_urb(urb: e->urb, dev: usb_dev, pipe: snd_pipe, transfer_buffer: skb->data, buffer_length: skb->len, |
325 | complete_fn: mt7601u_complete_tx, context: q); |
326 | ret = usb_submit_urb(urb: e->urb, GFP_ATOMIC); |
327 | if (ret) { |
328 | /* Special-handle ENODEV from TX urb submission because it will |
329 | * often be the first ENODEV we see after device is removed. |
330 | */ |
331 | if (ret == -ENODEV) |
332 | set_bit(nr: MT7601U_STATE_REMOVED, addr: &dev->state); |
333 | else |
334 | dev_err(dev->dev, "Error: TX urb submit failed:%d\n" , |
335 | ret); |
336 | goto out; |
337 | } |
338 | |
339 | q->end = (q->end + 1) % q->entries; |
340 | q->used++; |
341 | e->skb = skb; |
342 | |
343 | if (q->used >= q->entries) |
344 | ieee80211_stop_queue(hw: dev->hw, queue: skb_get_queue_mapping(skb)); |
345 | out: |
346 | spin_unlock_irqrestore(lock: &dev->tx_lock, flags); |
347 | |
348 | return ret; |
349 | } |
350 | |
351 | /* Map hardware Q to USB endpoint number */ |
352 | static u8 q2ep(u8 qid) |
353 | { |
354 | /* TODO: take management packets to queue 5 */ |
355 | return qid + 1; |
356 | } |
357 | |
358 | /* Map USB endpoint number to Q id in the DMA engine */ |
359 | static enum mt76_qsel ep2dmaq(u8 ep) |
360 | { |
361 | if (ep == 5) |
362 | return MT_QSEL_MGMT; |
363 | return MT_QSEL_EDCA; |
364 | } |
365 | |
366 | int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, |
367 | struct mt76_wcid *wcid, int hw_q) |
368 | { |
369 | u8 ep = q2ep(qid: hw_q); |
370 | u32 dma_flags; |
371 | int ret; |
372 | |
373 | dma_flags = MT_TXD_PKT_INFO_80211; |
374 | if (wcid->hw_key_idx == 0xff) |
375 | dma_flags |= MT_TXD_PKT_INFO_WIV; |
376 | |
377 | ret = mt7601u_dma_skb_wrap_pkt(skb, qsel: ep2dmaq(ep), flags: dma_flags); |
378 | if (ret) |
379 | return ret; |
380 | |
381 | ret = mt7601u_dma_submit_tx(dev, skb, ep); |
382 | if (ret) { |
383 | ieee80211_free_txskb(hw: dev->hw, skb); |
384 | return ret; |
385 | } |
386 | |
387 | return 0; |
388 | } |
389 | |
390 | static void mt7601u_kill_rx(struct mt7601u_dev *dev) |
391 | { |
392 | int i; |
393 | |
394 | for (i = 0; i < dev->rx_q.entries; i++) |
395 | usb_poison_urb(urb: dev->rx_q.e[i].urb); |
396 | } |
397 | |
398 | static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, |
399 | struct mt7601u_dma_buf_rx *e, gfp_t gfp) |
400 | { |
401 | struct usb_device *usb_dev = mt7601u_to_usb_dev(mt7601u: dev); |
402 | u8 *buf = page_address(e->p); |
403 | unsigned pipe; |
404 | int ret; |
405 | |
406 | pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]); |
407 | |
408 | usb_fill_bulk_urb(urb: e->urb, dev: usb_dev, pipe, transfer_buffer: buf, MT_RX_URB_SIZE, |
409 | complete_fn: mt7601u_complete_rx, context: dev); |
410 | |
411 | trace_mt_submit_urb(dev, u: e->urb); |
412 | ret = usb_submit_urb(urb: e->urb, mem_flags: gfp); |
413 | if (ret) |
414 | dev_err(dev->dev, "Error: submit RX URB failed:%d\n" , ret); |
415 | |
416 | return ret; |
417 | } |
418 | |
419 | static int mt7601u_submit_rx(struct mt7601u_dev *dev) |
420 | { |
421 | int i, ret; |
422 | |
423 | for (i = 0; i < dev->rx_q.entries; i++) { |
424 | ret = mt7601u_submit_rx_buf(dev, e: &dev->rx_q.e[i], GFP_KERNEL); |
425 | if (ret) |
426 | return ret; |
427 | } |
428 | |
429 | return 0; |
430 | } |
431 | |
432 | static void mt7601u_free_rx(struct mt7601u_dev *dev) |
433 | { |
434 | int i; |
435 | |
436 | for (i = 0; i < dev->rx_q.entries; i++) { |
437 | __free_pages(page: dev->rx_q.e[i].p, MT_RX_ORDER); |
438 | usb_free_urb(urb: dev->rx_q.e[i].urb); |
439 | } |
440 | } |
441 | |
442 | static int mt7601u_alloc_rx(struct mt7601u_dev *dev) |
443 | { |
444 | int i; |
445 | |
446 | memset(&dev->rx_q, 0, sizeof(dev->rx_q)); |
447 | dev->rx_q.dev = dev; |
448 | dev->rx_q.entries = N_RX_ENTRIES; |
449 | |
450 | for (i = 0; i < N_RX_ENTRIES; i++) { |
451 | dev->rx_q.e[i].urb = usb_alloc_urb(iso_packets: 0, GFP_KERNEL); |
452 | dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); |
453 | |
454 | if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) |
455 | return -ENOMEM; |
456 | } |
457 | |
458 | return 0; |
459 | } |
460 | |
461 | static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) |
462 | { |
463 | int i; |
464 | |
465 | for (i = 0; i < q->entries; i++) { |
466 | usb_poison_urb(urb: q->e[i].urb); |
467 | if (q->e[i].skb) |
468 | mt7601u_tx_status(dev: q->dev, skb: q->e[i].skb); |
469 | usb_free_urb(urb: q->e[i].urb); |
470 | } |
471 | } |
472 | |
473 | static void mt7601u_free_tx(struct mt7601u_dev *dev) |
474 | { |
475 | int i; |
476 | |
477 | if (!dev->tx_q) |
478 | return; |
479 | |
480 | for (i = 0; i < __MT_EP_OUT_MAX; i++) |
481 | mt7601u_free_tx_queue(q: &dev->tx_q[i]); |
482 | } |
483 | |
484 | static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, |
485 | struct mt7601u_tx_queue *q) |
486 | { |
487 | int i; |
488 | |
489 | q->dev = dev; |
490 | q->entries = N_TX_ENTRIES; |
491 | |
492 | for (i = 0; i < N_TX_ENTRIES; i++) { |
493 | q->e[i].urb = usb_alloc_urb(iso_packets: 0, GFP_KERNEL); |
494 | if (!q->e[i].urb) |
495 | return -ENOMEM; |
496 | } |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | static int mt7601u_alloc_tx(struct mt7601u_dev *dev) |
502 | { |
503 | int i; |
504 | |
505 | dev->tx_q = devm_kcalloc(dev: dev->dev, n: __MT_EP_OUT_MAX, |
506 | size: sizeof(*dev->tx_q), GFP_KERNEL); |
507 | if (!dev->tx_q) |
508 | return -ENOMEM; |
509 | |
510 | for (i = 0; i < __MT_EP_OUT_MAX; i++) |
511 | if (mt7601u_alloc_tx_queue(dev, q: &dev->tx_q[i])) |
512 | return -ENOMEM; |
513 | |
514 | return 0; |
515 | } |
516 | |
517 | int mt7601u_dma_init(struct mt7601u_dev *dev) |
518 | { |
519 | int ret; |
520 | |
521 | tasklet_setup(t: &dev->tx_tasklet, callback: mt7601u_tx_tasklet); |
522 | tasklet_setup(t: &dev->rx_tasklet, callback: mt7601u_rx_tasklet); |
523 | |
524 | ret = mt7601u_alloc_tx(dev); |
525 | if (ret) |
526 | goto err; |
527 | ret = mt7601u_alloc_rx(dev); |
528 | if (ret) |
529 | goto err; |
530 | |
531 | ret = mt7601u_submit_rx(dev); |
532 | if (ret) |
533 | goto err; |
534 | |
535 | return 0; |
536 | err: |
537 | mt7601u_dma_cleanup(dev); |
538 | return ret; |
539 | } |
540 | |
541 | void mt7601u_dma_cleanup(struct mt7601u_dev *dev) |
542 | { |
543 | mt7601u_kill_rx(dev); |
544 | |
545 | tasklet_kill(t: &dev->rx_tasklet); |
546 | |
547 | mt7601u_free_rx(dev); |
548 | mt7601u_free_tx(dev); |
549 | |
550 | tasklet_kill(t: &dev->tx_tasklet); |
551 | } |
552 | |