1 | // SPDX-License-Identifier: ISC |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
4 | */ |
5 | |
6 | #include <linux/dma-mapping.h> |
7 | #include "mt76.h" |
8 | #include "dma.h" |
9 | |
10 | #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) |
11 | |
12 | #define Q_READ(_q, _field) ({ \ |
13 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ |
14 | u32 _val; \ |
15 | if ((_q)->flags & MT_QFLAG_WED) \ |
16 | _val = mtk_wed_device_reg_read((_q)->wed, \ |
17 | ((_q)->wed_regs + \ |
18 | _offset)); \ |
19 | else \ |
20 | _val = readl(&(_q)->regs->_field); \ |
21 | _val; \ |
22 | }) |
23 | |
24 | #define Q_WRITE(_q, _field, _val) do { \ |
25 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ |
26 | if ((_q)->flags & MT_QFLAG_WED) \ |
27 | mtk_wed_device_reg_write((_q)->wed, \ |
28 | ((_q)->wed_regs + _offset), \ |
29 | _val); \ |
30 | else \ |
31 | writel(_val, &(_q)->regs->_field); \ |
32 | } while (0) |
33 | |
34 | #else |
35 | |
36 | #define Q_READ(_q, _field) readl(&(_q)->regs->_field) |
37 | #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field) |
38 | |
39 | #endif |
40 | |
41 | static struct mt76_txwi_cache * |
42 | mt76_alloc_txwi(struct mt76_dev *dev) |
43 | { |
44 | struct mt76_txwi_cache *t; |
45 | dma_addr_t addr; |
46 | u8 *txwi; |
47 | int size; |
48 | |
49 | size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); |
50 | txwi = kzalloc(size, GFP_ATOMIC); |
51 | if (!txwi) |
52 | return NULL; |
53 | |
54 | addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, |
55 | DMA_TO_DEVICE); |
56 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { |
57 | kfree(objp: txwi); |
58 | return NULL; |
59 | } |
60 | |
61 | t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); |
62 | t->dma_addr = addr; |
63 | |
64 | return t; |
65 | } |
66 | |
67 | static struct mt76_txwi_cache * |
68 | mt76_alloc_rxwi(struct mt76_dev *dev) |
69 | { |
70 | struct mt76_txwi_cache *t; |
71 | |
72 | t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); |
73 | if (!t) |
74 | return NULL; |
75 | |
76 | t->ptr = NULL; |
77 | return t; |
78 | } |
79 | |
80 | static struct mt76_txwi_cache * |
81 | __mt76_get_txwi(struct mt76_dev *dev) |
82 | { |
83 | struct mt76_txwi_cache *t = NULL; |
84 | |
85 | spin_lock(lock: &dev->lock); |
86 | if (!list_empty(head: &dev->txwi_cache)) { |
87 | t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, |
88 | list); |
89 | list_del(entry: &t->list); |
90 | } |
91 | spin_unlock(lock: &dev->lock); |
92 | |
93 | return t; |
94 | } |
95 | |
96 | static struct mt76_txwi_cache * |
97 | __mt76_get_rxwi(struct mt76_dev *dev) |
98 | { |
99 | struct mt76_txwi_cache *t = NULL; |
100 | |
101 | spin_lock_bh(lock: &dev->wed_lock); |
102 | if (!list_empty(head: &dev->rxwi_cache)) { |
103 | t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, |
104 | list); |
105 | list_del(entry: &t->list); |
106 | } |
107 | spin_unlock_bh(lock: &dev->wed_lock); |
108 | |
109 | return t; |
110 | } |
111 | |
112 | static struct mt76_txwi_cache * |
113 | mt76_get_txwi(struct mt76_dev *dev) |
114 | { |
115 | struct mt76_txwi_cache *t = __mt76_get_txwi(dev); |
116 | |
117 | if (t) |
118 | return t; |
119 | |
120 | return mt76_alloc_txwi(dev); |
121 | } |
122 | |
123 | struct mt76_txwi_cache * |
124 | mt76_get_rxwi(struct mt76_dev *dev) |
125 | { |
126 | struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); |
127 | |
128 | if (t) |
129 | return t; |
130 | |
131 | return mt76_alloc_rxwi(dev); |
132 | } |
133 | EXPORT_SYMBOL_GPL(mt76_get_rxwi); |
134 | |
135 | void |
136 | mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
137 | { |
138 | if (!t) |
139 | return; |
140 | |
141 | spin_lock(lock: &dev->lock); |
142 | list_add(new: &t->list, head: &dev->txwi_cache); |
143 | spin_unlock(lock: &dev->lock); |
144 | } |
145 | EXPORT_SYMBOL_GPL(mt76_put_txwi); |
146 | |
147 | void |
148 | mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
149 | { |
150 | if (!t) |
151 | return; |
152 | |
153 | spin_lock_bh(lock: &dev->wed_lock); |
154 | list_add(new: &t->list, head: &dev->rxwi_cache); |
155 | spin_unlock_bh(lock: &dev->wed_lock); |
156 | } |
157 | EXPORT_SYMBOL_GPL(mt76_put_rxwi); |
158 | |
159 | static void |
160 | mt76_free_pending_txwi(struct mt76_dev *dev) |
161 | { |
162 | struct mt76_txwi_cache *t; |
163 | |
164 | local_bh_disable(); |
165 | while ((t = __mt76_get_txwi(dev)) != NULL) { |
166 | dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
167 | DMA_TO_DEVICE); |
168 | kfree(objp: mt76_get_txwi_ptr(dev, t)); |
169 | } |
170 | local_bh_enable(); |
171 | } |
172 | |
173 | void |
174 | mt76_free_pending_rxwi(struct mt76_dev *dev) |
175 | { |
176 | struct mt76_txwi_cache *t; |
177 | |
178 | local_bh_disable(); |
179 | while ((t = __mt76_get_rxwi(dev)) != NULL) { |
180 | if (t->ptr) |
181 | mt76_put_page_pool_buf(buf: t->ptr, allow_direct: false); |
182 | kfree(objp: t); |
183 | } |
184 | local_bh_enable(); |
185 | } |
186 | EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); |
187 | |
188 | static void |
189 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
190 | { |
191 | Q_WRITE(q, desc_base, q->desc_dma); |
192 | if (q->flags & MT_QFLAG_WED_RRO_EN) |
193 | Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); |
194 | else |
195 | Q_WRITE(q, ring_size, q->ndesc); |
196 | q->head = Q_READ(q, dma_idx); |
197 | q->tail = q->head; |
198 | } |
199 | |
200 | void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, |
201 | bool reset_idx) |
202 | { |
203 | if (!q || !q->ndesc) |
204 | return; |
205 | |
206 | if (!mt76_queue_is_wed_rro_ind(q)) { |
207 | int i; |
208 | |
209 | /* clear descriptors */ |
210 | for (i = 0; i < q->ndesc; i++) |
211 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
212 | } |
213 | |
214 | if (reset_idx) { |
215 | Q_WRITE(q, cpu_idx, 0); |
216 | Q_WRITE(q, dma_idx, 0); |
217 | } |
218 | mt76_dma_sync_idx(dev, q); |
219 | } |
220 | |
221 | void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
222 | { |
223 | __mt76_dma_queue_reset(dev, q, reset_idx: true); |
224 | } |
225 | |
226 | static int |
227 | mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, |
228 | struct mt76_queue_buf *buf, void *data) |
229 | { |
230 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
231 | struct mt76_txwi_cache *txwi = NULL; |
232 | struct mt76_desc *desc; |
233 | int idx = q->head; |
234 | u32 buf1 = 0, ctrl; |
235 | int rx_token; |
236 | |
237 | if (mt76_queue_is_wed_rro_ind(q)) { |
238 | struct mt76_wed_rro_desc *rro_desc; |
239 | |
240 | rro_desc = (struct mt76_wed_rro_desc *)q->desc; |
241 | data = &rro_desc[q->head]; |
242 | goto done; |
243 | } |
244 | |
245 | desc = &q->desc[q->head]; |
246 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
247 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
248 | buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32); |
249 | #endif |
250 | |
251 | if (mt76_queue_is_wed_rx(q)) { |
252 | txwi = mt76_get_rxwi(dev); |
253 | if (!txwi) |
254 | return -ENOMEM; |
255 | |
256 | rx_token = mt76_rx_token_consume(dev, ptr: data, r: txwi, phys: buf->addr); |
257 | if (rx_token < 0) { |
258 | mt76_put_rxwi(dev, txwi); |
259 | return -ENOMEM; |
260 | } |
261 | |
262 | buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); |
263 | ctrl |= MT_DMA_CTL_TO_HOST; |
264 | } |
265 | |
266 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); |
267 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); |
268 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
269 | WRITE_ONCE(desc->info, 0); |
270 | |
271 | done: |
272 | entry->dma_addr[0] = buf->addr; |
273 | entry->dma_len[0] = buf->len; |
274 | entry->txwi = txwi; |
275 | entry->buf = data; |
276 | entry->wcid = 0xffff; |
277 | entry->skip_buf1 = true; |
278 | q->head = (q->head + 1) % q->ndesc; |
279 | q->queued++; |
280 | |
281 | return idx; |
282 | } |
283 | |
284 | static int |
285 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, |
286 | struct mt76_queue_buf *buf, int nbufs, u32 info, |
287 | struct sk_buff *skb, void *txwi) |
288 | { |
289 | struct mt76_queue_entry *entry; |
290 | struct mt76_desc *desc; |
291 | int i, idx = -1; |
292 | u32 ctrl, next; |
293 | |
294 | if (txwi) { |
295 | q->entry[q->head].txwi = DMA_DUMMY_DATA; |
296 | q->entry[q->head].skip_buf0 = true; |
297 | } |
298 | |
299 | for (i = 0; i < nbufs; i += 2, buf += 2) { |
300 | u32 buf0 = buf[0].addr, buf1 = 0; |
301 | |
302 | idx = q->head; |
303 | next = (q->head + 1) % q->ndesc; |
304 | |
305 | desc = &q->desc[idx]; |
306 | entry = &q->entry[idx]; |
307 | |
308 | if (buf[0].skip_unmap) |
309 | entry->skip_buf0 = true; |
310 | entry->skip_buf1 = i == nbufs - 1; |
311 | |
312 | entry->dma_addr[0] = buf[0].addr; |
313 | entry->dma_len[0] = buf[0].len; |
314 | |
315 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
316 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
317 | info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32); |
318 | #endif |
319 | if (i < nbufs - 1) { |
320 | entry->dma_addr[1] = buf[1].addr; |
321 | entry->dma_len[1] = buf[1].len; |
322 | buf1 = buf[1].addr; |
323 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); |
324 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
325 | info |= FIELD_PREP(MT_DMA_CTL_SDP1_H, |
326 | buf[1].addr >> 32); |
327 | #endif |
328 | if (buf[1].skip_unmap) |
329 | entry->skip_buf1 = true; |
330 | } |
331 | |
332 | if (i == nbufs - 1) |
333 | ctrl |= MT_DMA_CTL_LAST_SEC0; |
334 | else if (i == nbufs - 2) |
335 | ctrl |= MT_DMA_CTL_LAST_SEC1; |
336 | |
337 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); |
338 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); |
339 | WRITE_ONCE(desc->info, cpu_to_le32(info)); |
340 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
341 | |
342 | q->head = next; |
343 | q->queued++; |
344 | } |
345 | |
346 | q->entry[idx].txwi = txwi; |
347 | q->entry[idx].skb = skb; |
348 | q->entry[idx].wcid = 0xffff; |
349 | |
350 | return idx; |
351 | } |
352 | |
353 | static void |
354 | mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
355 | struct mt76_queue_entry *prev_e) |
356 | { |
357 | struct mt76_queue_entry *e = &q->entry[idx]; |
358 | |
359 | if (!e->skip_buf0) |
360 | dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], |
361 | DMA_TO_DEVICE); |
362 | |
363 | if (!e->skip_buf1) |
364 | dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], |
365 | DMA_TO_DEVICE); |
366 | |
367 | if (e->txwi == DMA_DUMMY_DATA) |
368 | e->txwi = NULL; |
369 | |
370 | *prev_e = *e; |
371 | memset(e, 0, sizeof(*e)); |
372 | } |
373 | |
374 | static void |
375 | mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) |
376 | { |
377 | wmb(); |
378 | Q_WRITE(q, cpu_idx, q->head); |
379 | } |
380 | |
381 | static void |
382 | mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
383 | { |
384 | struct mt76_queue_entry entry; |
385 | int last; |
386 | |
387 | if (!q || !q->ndesc) |
388 | return; |
389 | |
390 | spin_lock_bh(lock: &q->cleanup_lock); |
391 | if (flush) |
392 | last = -1; |
393 | else |
394 | last = Q_READ(q, dma_idx); |
395 | |
396 | while (q->queued > 0 && q->tail != last) { |
397 | mt76_dma_tx_cleanup_idx(dev, q, idx: q->tail, prev_e: &entry); |
398 | mt76_queue_tx_complete(dev, q, e: &entry); |
399 | |
400 | if (entry.txwi) { |
401 | if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) |
402 | mt76_put_txwi(dev, entry.txwi); |
403 | } |
404 | |
405 | if (!flush && q->tail == last) |
406 | last = Q_READ(q, dma_idx); |
407 | } |
408 | spin_unlock_bh(lock: &q->cleanup_lock); |
409 | |
410 | if (flush) { |
411 | spin_lock_bh(lock: &q->lock); |
412 | mt76_dma_sync_idx(dev, q); |
413 | mt76_dma_kick_queue(dev, q); |
414 | spin_unlock_bh(lock: &q->lock); |
415 | } |
416 | |
417 | if (!q->queued) |
418 | wake_up(&dev->tx_wait); |
419 | } |
420 | |
421 | static void * |
422 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
423 | int *len, u32 *info, bool *more, bool *drop) |
424 | { |
425 | struct mt76_queue_entry *e = &q->entry[idx]; |
426 | struct mt76_desc *desc = &q->desc[idx]; |
427 | u32 ctrl, desc_info, buf1; |
428 | void *buf = e->buf; |
429 | |
430 | if (mt76_queue_is_wed_rro_ind(q)) |
431 | goto done; |
432 | |
433 | ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
434 | if (len) { |
435 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
436 | *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); |
437 | } |
438 | |
439 | desc_info = le32_to_cpu(desc->info); |
440 | if (info) |
441 | *info = desc_info; |
442 | |
443 | buf1 = le32_to_cpu(desc->buf1); |
444 | mt76_dma_should_drop_buf(drop, ctrl, buf1, info: desc_info); |
445 | |
446 | if (mt76_queue_is_wed_rx(q)) { |
447 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
448 | struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); |
449 | |
450 | if (!t) |
451 | return NULL; |
452 | |
453 | dma_sync_single_for_cpu(dev: dev->dma_dev, addr: t->dma_addr, |
454 | SKB_WITH_OVERHEAD(q->buf_size), |
455 | dir: page_pool_get_dma_dir(pool: q->page_pool)); |
456 | |
457 | buf = t->ptr; |
458 | t->dma_addr = 0; |
459 | t->ptr = NULL; |
460 | |
461 | mt76_put_rxwi(dev, t); |
462 | if (drop) |
463 | *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); |
464 | } else { |
465 | dma_sync_single_for_cpu(dev: dev->dma_dev, addr: e->dma_addr[0], |
466 | SKB_WITH_OVERHEAD(q->buf_size), |
467 | dir: page_pool_get_dma_dir(pool: q->page_pool)); |
468 | } |
469 | |
470 | done: |
471 | e->buf = NULL; |
472 | return buf; |
473 | } |
474 | |
475 | static void * |
476 | mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
477 | int *len, u32 *info, bool *more, bool *drop) |
478 | { |
479 | int idx = q->tail; |
480 | |
481 | *more = false; |
482 | if (!q->queued) |
483 | return NULL; |
484 | |
485 | if (mt76_queue_is_wed_rro_data(q)) |
486 | return NULL; |
487 | |
488 | if (!mt76_queue_is_wed_rro_ind(q)) { |
489 | if (flush) |
490 | q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
491 | else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
492 | return NULL; |
493 | } |
494 | |
495 | q->tail = (q->tail + 1) % q->ndesc; |
496 | q->queued--; |
497 | |
498 | return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
499 | } |
500 | |
501 | static int |
502 | mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
503 | struct sk_buff *skb, u32 tx_info) |
504 | { |
505 | struct mt76_queue_buf buf = {}; |
506 | dma_addr_t addr; |
507 | |
508 | if (test_bit(MT76_MCU_RESET, &dev->phy.state)) |
509 | goto error; |
510 | |
511 | if (q->queued + 1 >= q->ndesc - 1) |
512 | goto error; |
513 | |
514 | addr = dma_map_single(dev->dma_dev, skb->data, skb->len, |
515 | DMA_TO_DEVICE); |
516 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
517 | goto error; |
518 | |
519 | buf.addr = addr; |
520 | buf.len = skb->len; |
521 | |
522 | spin_lock_bh(lock: &q->lock); |
523 | mt76_dma_add_buf(dev, q, buf: &buf, nbufs: 1, info: tx_info, skb, NULL); |
524 | mt76_dma_kick_queue(dev, q); |
525 | spin_unlock_bh(lock: &q->lock); |
526 | |
527 | return 0; |
528 | |
529 | error: |
530 | dev_kfree_skb(skb); |
531 | return -ENOMEM; |
532 | } |
533 | |
534 | static int |
535 | mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, |
536 | enum mt76_txq_id qid, struct sk_buff *skb, |
537 | struct mt76_wcid *wcid, struct ieee80211_sta *sta) |
538 | { |
539 | struct ieee80211_tx_status status = { |
540 | .sta = sta, |
541 | }; |
542 | struct mt76_tx_info tx_info = { |
543 | .skb = skb, |
544 | }; |
545 | struct ieee80211_hw *hw; |
546 | int len, n = 0, ret = -ENOMEM; |
547 | struct mt76_txwi_cache *t; |
548 | struct sk_buff *iter; |
549 | dma_addr_t addr; |
550 | u8 *txwi; |
551 | |
552 | if (test_bit(MT76_RESET, &dev->phy.state)) |
553 | goto free_skb; |
554 | |
555 | t = mt76_get_txwi(dev); |
556 | if (!t) |
557 | goto free_skb; |
558 | |
559 | txwi = mt76_get_txwi_ptr(dev, t); |
560 | |
561 | skb->prev = skb->next = NULL; |
562 | if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) |
563 | mt76_insert_hdr_pad(skb); |
564 | |
565 | len = skb_headlen(skb); |
566 | addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
567 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
568 | goto free; |
569 | |
570 | tx_info.buf[n].addr = t->dma_addr; |
571 | tx_info.buf[n++].len = dev->drv->txwi_size; |
572 | tx_info.buf[n].addr = addr; |
573 | tx_info.buf[n++].len = len; |
574 | |
575 | skb_walk_frags(skb, iter) { |
576 | if (n == ARRAY_SIZE(tx_info.buf)) |
577 | goto unmap; |
578 | |
579 | addr = dma_map_single(dev->dma_dev, iter->data, iter->len, |
580 | DMA_TO_DEVICE); |
581 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
582 | goto unmap; |
583 | |
584 | tx_info.buf[n].addr = addr; |
585 | tx_info.buf[n++].len = iter->len; |
586 | } |
587 | tx_info.nbuf = n; |
588 | |
589 | if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
590 | ret = -ENOMEM; |
591 | goto unmap; |
592 | } |
593 | |
594 | dma_sync_single_for_cpu(dev: dev->dma_dev, addr: t->dma_addr, size: dev->drv->txwi_size, |
595 | dir: DMA_TO_DEVICE); |
596 | ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
597 | dma_sync_single_for_device(dev: dev->dma_dev, addr: t->dma_addr, size: dev->drv->txwi_size, |
598 | dir: DMA_TO_DEVICE); |
599 | if (ret < 0) |
600 | goto unmap; |
601 | |
602 | return mt76_dma_add_buf(dev, q, buf: tx_info.buf, nbufs: tx_info.nbuf, |
603 | info: tx_info.info, skb: tx_info.skb, txwi: t); |
604 | |
605 | unmap: |
606 | for (n--; n > 0; n--) |
607 | dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, |
608 | tx_info.buf[n].len, DMA_TO_DEVICE); |
609 | |
610 | free: |
611 | #ifdef CONFIG_NL80211_TESTMODE |
612 | /* fix tx_done accounting on queue overflow */ |
613 | if (mt76_is_testmode_skb(dev, skb, hw: &hw)) { |
614 | struct mt76_phy *phy = hw->priv; |
615 | |
616 | if (tx_info.skb == phy->test.tx_skb) |
617 | phy->test.tx_done--; |
618 | } |
619 | #endif |
620 | |
621 | mt76_put_txwi(dev, t); |
622 | |
623 | free_skb: |
624 | status.skb = tx_info.skb; |
625 | hw = mt76_tx_status_get_hw(dev, skb: tx_info.skb); |
626 | spin_lock_bh(lock: &dev->rx_lock); |
627 | ieee80211_tx_status_ext(hw, status: &status); |
628 | spin_unlock_bh(lock: &dev->rx_lock); |
629 | |
630 | return ret; |
631 | } |
632 | |
633 | int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
634 | bool allow_direct) |
635 | { |
636 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
637 | int frames = 0; |
638 | |
639 | if (!q->ndesc) |
640 | return 0; |
641 | |
642 | spin_lock_bh(lock: &q->lock); |
643 | |
644 | while (q->queued < q->ndesc - 1) { |
645 | struct mt76_queue_buf qbuf = {}; |
646 | enum dma_data_direction dir; |
647 | dma_addr_t addr; |
648 | int offset; |
649 | void *buf = NULL; |
650 | |
651 | if (mt76_queue_is_wed_rro_ind(q)) |
652 | goto done; |
653 | |
654 | buf = mt76_get_page_pool_buf(q, offset: &offset, size: q->buf_size); |
655 | if (!buf) |
656 | break; |
657 | |
658 | addr = page_pool_get_dma_addr(page: virt_to_head_page(x: buf)) + offset; |
659 | dir = page_pool_get_dma_dir(pool: q->page_pool); |
660 | dma_sync_single_for_device(dev: dev->dma_dev, addr, size: len, dir); |
661 | |
662 | qbuf.addr = addr + q->buf_offset; |
663 | done: |
664 | qbuf.len = len - q->buf_offset; |
665 | qbuf.skip_unmap = false; |
666 | if (mt76_dma_add_rx_buf(dev, q, buf: &qbuf, data: buf) < 0) { |
667 | mt76_put_page_pool_buf(buf, allow_direct); |
668 | break; |
669 | } |
670 | frames++; |
671 | } |
672 | |
673 | if (frames || mt76_queue_is_wed_rx(q)) |
674 | mt76_dma_kick_queue(dev, q); |
675 | |
676 | spin_unlock_bh(lock: &q->lock); |
677 | |
678 | return frames; |
679 | } |
680 | |
681 | static int |
682 | mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
683 | int idx, int n_desc, int bufsize, |
684 | u32 ring_base) |
685 | { |
686 | int ret, size; |
687 | |
688 | spin_lock_init(&q->lock); |
689 | spin_lock_init(&q->cleanup_lock); |
690 | |
691 | q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; |
692 | q->ndesc = n_desc; |
693 | q->buf_size = bufsize; |
694 | q->hw_idx = idx; |
695 | |
696 | size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) |
697 | : sizeof(struct mt76_desc); |
698 | q->desc = dmam_alloc_coherent(dev: dev->dma_dev, size: q->ndesc * size, |
699 | dma_handle: &q->desc_dma, GFP_KERNEL); |
700 | if (!q->desc) |
701 | return -ENOMEM; |
702 | |
703 | if (mt76_queue_is_wed_rro_ind(q)) { |
704 | struct mt76_wed_rro_desc *rro_desc; |
705 | int i; |
706 | |
707 | rro_desc = (struct mt76_wed_rro_desc *)q->desc; |
708 | for (i = 0; i < q->ndesc; i++) { |
709 | struct mt76_wed_rro_ind *cmd; |
710 | |
711 | cmd = (struct mt76_wed_rro_ind *)&rro_desc[i]; |
712 | cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; |
713 | } |
714 | } |
715 | |
716 | size = q->ndesc * sizeof(*q->entry); |
717 | q->entry = devm_kzalloc(dev: dev->dev, size, GFP_KERNEL); |
718 | if (!q->entry) |
719 | return -ENOMEM; |
720 | |
721 | ret = mt76_create_page_pool(dev, q); |
722 | if (ret) |
723 | return ret; |
724 | |
725 | ret = mt76_wed_dma_setup(dev, q, reset: false); |
726 | if (ret) |
727 | return ret; |
728 | |
729 | if (mtk_wed_device_active(&dev->mmio.wed)) { |
730 | if ((mtk_wed_get_rx_capa(dev: &dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || |
731 | mt76_queue_is_wed_tx_free(q)) |
732 | return 0; |
733 | } |
734 | |
735 | mt76_dma_queue_reset(dev, q); |
736 | |
737 | return 0; |
738 | } |
739 | |
740 | static void |
741 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
742 | { |
743 | void *buf; |
744 | bool more; |
745 | |
746 | if (!q->ndesc) |
747 | return; |
748 | |
749 | do { |
750 | spin_lock_bh(lock: &q->lock); |
751 | buf = mt76_dma_dequeue(dev, q, flush: true, NULL, NULL, more: &more, NULL); |
752 | spin_unlock_bh(lock: &q->lock); |
753 | |
754 | if (!buf) |
755 | break; |
756 | |
757 | if (!mt76_queue_is_wed_rro(q)) |
758 | mt76_put_page_pool_buf(buf, allow_direct: false); |
759 | } while (1); |
760 | |
761 | spin_lock_bh(lock: &q->lock); |
762 | if (q->rx_head) { |
763 | dev_kfree_skb(q->rx_head); |
764 | q->rx_head = NULL; |
765 | } |
766 | |
767 | spin_unlock_bh(lock: &q->lock); |
768 | } |
769 | |
770 | static void |
771 | mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) |
772 | { |
773 | struct mt76_queue *q = &dev->q_rx[qid]; |
774 | |
775 | if (!q->ndesc) |
776 | return; |
777 | |
778 | if (!mt76_queue_is_wed_rro_ind(q)) { |
779 | int i; |
780 | |
781 | for (i = 0; i < q->ndesc; i++) |
782 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
783 | } |
784 | |
785 | mt76_dma_rx_cleanup(dev, q); |
786 | |
787 | /* reset WED rx queues */ |
788 | mt76_wed_dma_setup(dev, q, reset: true); |
789 | |
790 | if (mt76_queue_is_wed_tx_free(q)) |
791 | return; |
792 | |
793 | if (mtk_wed_device_active(&dev->mmio.wed) && |
794 | mt76_queue_is_wed_rro(q)) |
795 | return; |
796 | |
797 | mt76_dma_sync_idx(dev, q); |
798 | mt76_dma_rx_fill(dev, q, allow_direct: false); |
799 | } |
800 | |
801 | static void |
802 | mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, |
803 | int len, bool more, u32 info, bool allow_direct) |
804 | { |
805 | struct sk_buff *skb = q->rx_head; |
806 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
807 | int nr_frags = shinfo->nr_frags; |
808 | |
809 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) { |
810 | struct page *page = virt_to_head_page(x: data); |
811 | int offset = data - page_address(page) + q->buf_offset; |
812 | |
813 | skb_add_rx_frag(skb, i: nr_frags, page, off: offset, size: len, truesize: q->buf_size); |
814 | } else { |
815 | mt76_put_page_pool_buf(buf: data, allow_direct); |
816 | } |
817 | |
818 | if (more) |
819 | return; |
820 | |
821 | q->rx_head = NULL; |
822 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) |
823 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
824 | else |
825 | dev_kfree_skb(skb); |
826 | } |
827 | |
828 | static int |
829 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
830 | { |
831 | int len, data_len, done = 0, dma_idx; |
832 | struct sk_buff *skb; |
833 | unsigned char *data; |
834 | bool check_ddone = false; |
835 | bool allow_direct = !mt76_queue_is_wed_rx(q); |
836 | bool more; |
837 | |
838 | if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
839 | mt76_queue_is_wed_tx_free(q)) { |
840 | dma_idx = Q_READ(q, dma_idx); |
841 | check_ddone = true; |
842 | } |
843 | |
844 | while (done < budget) { |
845 | bool drop = false; |
846 | u32 info; |
847 | |
848 | if (check_ddone) { |
849 | if (q->tail == dma_idx) |
850 | dma_idx = Q_READ(q, dma_idx); |
851 | |
852 | if (q->tail == dma_idx) |
853 | break; |
854 | } |
855 | |
856 | data = mt76_dma_dequeue(dev, q, flush: false, len: &len, info: &info, more: &more, |
857 | drop: &drop); |
858 | if (!data) |
859 | break; |
860 | |
861 | if (drop) |
862 | goto free_frag; |
863 | |
864 | if (q->rx_head) |
865 | data_len = q->buf_size; |
866 | else |
867 | data_len = SKB_WITH_OVERHEAD(q->buf_size); |
868 | |
869 | if (data_len < len + q->buf_offset) { |
870 | dev_kfree_skb(q->rx_head); |
871 | q->rx_head = NULL; |
872 | goto free_frag; |
873 | } |
874 | |
875 | if (q->rx_head) { |
876 | mt76_add_fragment(dev, q, data, len, more, info, |
877 | allow_direct); |
878 | continue; |
879 | } |
880 | |
881 | if (!more && dev->drv->rx_check && |
882 | !(dev->drv->rx_check(dev, data, len))) |
883 | goto free_frag; |
884 | |
885 | skb = napi_build_skb(data, frag_size: q->buf_size); |
886 | if (!skb) |
887 | goto free_frag; |
888 | |
889 | skb_reserve(skb, len: q->buf_offset); |
890 | skb_mark_for_recycle(skb); |
891 | |
892 | *(u32 *)skb->cb = info; |
893 | |
894 | __skb_put(skb, len); |
895 | done++; |
896 | |
897 | if (more) { |
898 | q->rx_head = skb; |
899 | continue; |
900 | } |
901 | |
902 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
903 | continue; |
904 | |
905 | free_frag: |
906 | mt76_put_page_pool_buf(buf: data, allow_direct); |
907 | } |
908 | |
909 | mt76_dma_rx_fill(dev, q, allow_direct: true); |
910 | return done; |
911 | } |
912 | |
913 | int mt76_dma_rx_poll(struct napi_struct *napi, int budget) |
914 | { |
915 | struct mt76_dev *dev; |
916 | int qid, done = 0, cur; |
917 | |
918 | dev = container_of(napi->dev, struct mt76_dev, napi_dev); |
919 | qid = napi - dev->napi; |
920 | |
921 | rcu_read_lock(); |
922 | |
923 | do { |
924 | cur = mt76_dma_rx_process(dev, q: &dev->q_rx[qid], budget: budget - done); |
925 | mt76_rx_poll_complete(dev, q: qid, napi); |
926 | done += cur; |
927 | } while (cur && done < budget); |
928 | |
929 | rcu_read_unlock(); |
930 | |
931 | if (done < budget && napi_complete(n: napi)) |
932 | dev->drv->rx_poll_complete(dev, qid); |
933 | |
934 | return done; |
935 | } |
936 | EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); |
937 | |
938 | static int |
939 | mt76_dma_init(struct mt76_dev *dev, |
940 | int (*poll)(struct napi_struct *napi, int budget)) |
941 | { |
942 | int i; |
943 | |
944 | init_dummy_netdev(dev: &dev->napi_dev); |
945 | init_dummy_netdev(dev: &dev->tx_napi_dev); |
946 | snprintf(buf: dev->napi_dev.name, size: sizeof(dev->napi_dev.name), fmt: "%s" , |
947 | wiphy_name(wiphy: dev->hw->wiphy)); |
948 | dev->napi_dev.threaded = 1; |
949 | init_completion(x: &dev->mmio.wed_reset); |
950 | init_completion(x: &dev->mmio.wed_reset_complete); |
951 | |
952 | mt76_for_each_q_rx(dev, i) { |
953 | netif_napi_add(dev: &dev->napi_dev, napi: &dev->napi[i], poll); |
954 | mt76_dma_rx_fill(dev, q: &dev->q_rx[i], allow_direct: false); |
955 | napi_enable(n: &dev->napi[i]); |
956 | } |
957 | |
958 | return 0; |
959 | } |
960 | |
961 | static const struct mt76_queue_ops mt76_dma_ops = { |
962 | .init = mt76_dma_init, |
963 | .alloc = mt76_dma_alloc_queue, |
964 | .reset_q = mt76_dma_queue_reset, |
965 | .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
966 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
967 | .tx_cleanup = mt76_dma_tx_cleanup, |
968 | .rx_cleanup = mt76_dma_rx_cleanup, |
969 | .rx_reset = mt76_dma_rx_reset, |
970 | .kick = mt76_dma_kick_queue, |
971 | }; |
972 | |
973 | void mt76_dma_attach(struct mt76_dev *dev) |
974 | { |
975 | dev->queue_ops = &mt76_dma_ops; |
976 | } |
977 | EXPORT_SYMBOL_GPL(mt76_dma_attach); |
978 | |
979 | void mt76_dma_cleanup(struct mt76_dev *dev) |
980 | { |
981 | int i; |
982 | |
983 | mt76_worker_disable(w: &dev->tx_worker); |
984 | netif_napi_del(napi: &dev->tx_napi); |
985 | |
986 | for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { |
987 | struct mt76_phy *phy = dev->phys[i]; |
988 | int j; |
989 | |
990 | if (!phy) |
991 | continue; |
992 | |
993 | for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) |
994 | mt76_dma_tx_cleanup(dev, q: phy->q_tx[j], flush: true); |
995 | } |
996 | |
997 | for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) |
998 | mt76_dma_tx_cleanup(dev, q: dev->q_mcu[i], flush: true); |
999 | |
1000 | mt76_for_each_q_rx(dev, i) { |
1001 | struct mt76_queue *q = &dev->q_rx[i]; |
1002 | |
1003 | if (mtk_wed_device_active(&dev->mmio.wed) && |
1004 | mt76_queue_is_wed_rro(q)) |
1005 | continue; |
1006 | |
1007 | netif_napi_del(napi: &dev->napi[i]); |
1008 | mt76_dma_rx_cleanup(dev, q); |
1009 | |
1010 | page_pool_destroy(pool: q->page_pool); |
1011 | } |
1012 | |
1013 | if (mtk_wed_device_active(&dev->mmio.wed)) |
1014 | mtk_wed_device_detach(&dev->mmio.wed); |
1015 | |
1016 | if (mtk_wed_device_active(&dev->mmio.wed_hif2)) |
1017 | mtk_wed_device_detach(&dev->mmio.wed_hif2); |
1018 | |
1019 | mt76_free_pending_txwi(dev); |
1020 | mt76_free_pending_rxwi(dev); |
1021 | } |
1022 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |
1023 | |