1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | |
4 | Broadcom B43legacy wireless driver |
5 | |
6 | DMA ringbuffer and descriptor allocation/management |
7 | |
8 | Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> |
9 | |
10 | Some code in this file is derived from the b44.c driver |
11 | Copyright (C) 2002 David S. Miller |
12 | Copyright (C) Pekka Pietikainen |
13 | |
14 | |
15 | */ |
16 | |
17 | #include "b43legacy.h" |
18 | #include "dma.h" |
19 | #include "main.h" |
20 | #include "debugfs.h" |
21 | #include "xmit.h" |
22 | |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/skbuff.h> |
27 | #include <linux/slab.h> |
28 | #include <net/dst.h> |
29 | |
30 | /* 32bit DMA ops. */ |
31 | static |
32 | struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, |
33 | int slot, |
34 | struct b43legacy_dmadesc_meta **meta) |
35 | { |
36 | struct b43legacy_dmadesc32 *desc; |
37 | |
38 | *meta = &(ring->meta[slot]); |
39 | desc = ring->descbase; |
40 | desc = &(desc[slot]); |
41 | |
42 | return desc; |
43 | } |
44 | |
45 | static void op32_fill_descriptor(struct b43legacy_dmaring *ring, |
46 | struct b43legacy_dmadesc32 *desc, |
47 | dma_addr_t dmaaddr, u16 bufsize, |
48 | int start, int end, int irq) |
49 | { |
50 | struct b43legacy_dmadesc32 *descbase = ring->descbase; |
51 | int slot; |
52 | u32 ctl; |
53 | u32 addr; |
54 | u32 addrext; |
55 | |
56 | slot = (int)(desc - descbase); |
57 | B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); |
58 | |
59 | addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); |
60 | addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) |
61 | >> SSB_DMA_TRANSLATION_SHIFT; |
62 | addr |= ring->dev->dma.translation; |
63 | ctl = (bufsize - ring->frameoffset) |
64 | & B43legacy_DMA32_DCTL_BYTECNT; |
65 | if (slot == ring->nr_slots - 1) |
66 | ctl |= B43legacy_DMA32_DCTL_DTABLEEND; |
67 | if (start) |
68 | ctl |= B43legacy_DMA32_DCTL_FRAMESTART; |
69 | if (end) |
70 | ctl |= B43legacy_DMA32_DCTL_FRAMEEND; |
71 | if (irq) |
72 | ctl |= B43legacy_DMA32_DCTL_IRQ; |
73 | ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) |
74 | & B43legacy_DMA32_DCTL_ADDREXT_MASK; |
75 | |
76 | desc->control = cpu_to_le32(ctl); |
77 | desc->address = cpu_to_le32(addr); |
78 | } |
79 | |
80 | static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) |
81 | { |
82 | b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, |
83 | value: (u32)(slot * sizeof(struct b43legacy_dmadesc32))); |
84 | } |
85 | |
86 | static void op32_tx_suspend(struct b43legacy_dmaring *ring) |
87 | { |
88 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, |
89 | value: b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) |
90 | | B43legacy_DMA32_TXSUSPEND); |
91 | } |
92 | |
93 | static void op32_tx_resume(struct b43legacy_dmaring *ring) |
94 | { |
95 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, |
96 | value: b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) |
97 | & ~B43legacy_DMA32_TXSUSPEND); |
98 | } |
99 | |
100 | static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) |
101 | { |
102 | u32 val; |
103 | |
104 | val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); |
105 | val &= B43legacy_DMA32_RXDPTR; |
106 | |
107 | return (val / sizeof(struct b43legacy_dmadesc32)); |
108 | } |
109 | |
110 | static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, |
111 | int slot) |
112 | { |
113 | b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, |
114 | value: (u32)(slot * sizeof(struct b43legacy_dmadesc32))); |
115 | } |
116 | |
117 | static inline int free_slots(struct b43legacy_dmaring *ring) |
118 | { |
119 | return (ring->nr_slots - ring->used_slots); |
120 | } |
121 | |
122 | static inline int next_slot(struct b43legacy_dmaring *ring, int slot) |
123 | { |
124 | B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); |
125 | if (slot == ring->nr_slots - 1) |
126 | return 0; |
127 | return slot + 1; |
128 | } |
129 | |
130 | #ifdef CONFIG_B43LEGACY_DEBUG |
131 | static void update_max_used_slots(struct b43legacy_dmaring *ring, |
132 | int current_used_slots) |
133 | { |
134 | if (current_used_slots <= ring->max_used_slots) |
135 | return; |
136 | ring->max_used_slots = current_used_slots; |
137 | if (b43legacy_debug(dev: ring->dev, feature: B43legacy_DBG_DMAVERBOSE)) |
138 | b43legacydbg(wl: ring->dev->wl, |
139 | fmt: "max_used_slots increased to %d on %s ring %d\n" , |
140 | ring->max_used_slots, |
141 | ring->tx ? "TX" : "RX" , |
142 | ring->index); |
143 | } |
144 | #else |
145 | static inline |
146 | void update_max_used_slots(struct b43legacy_dmaring *ring, |
147 | int current_used_slots) |
148 | { } |
149 | #endif /* DEBUG */ |
150 | |
151 | /* Request a slot for usage. */ |
152 | static inline |
153 | int request_slot(struct b43legacy_dmaring *ring) |
154 | { |
155 | int slot; |
156 | |
157 | B43legacy_WARN_ON(!ring->tx); |
158 | B43legacy_WARN_ON(ring->stopped); |
159 | B43legacy_WARN_ON(free_slots(ring) == 0); |
160 | |
161 | slot = next_slot(ring, slot: ring->current_slot); |
162 | ring->current_slot = slot; |
163 | ring->used_slots++; |
164 | |
165 | update_max_used_slots(ring, current_used_slots: ring->used_slots); |
166 | |
167 | return slot; |
168 | } |
169 | |
170 | /* Mac80211-queue to b43legacy-ring mapping */ |
171 | static struct b43legacy_dmaring *priority_to_txring( |
172 | struct b43legacy_wldev *dev, |
173 | int queue_priority) |
174 | { |
175 | struct b43legacy_dmaring *ring; |
176 | |
177 | /*FIXME: For now we always run on TX-ring-1 */ |
178 | return dev->dma.tx_ring1; |
179 | |
180 | /* 0 = highest priority */ |
181 | switch (queue_priority) { |
182 | default: |
183 | B43legacy_WARN_ON(1); |
184 | fallthrough; |
185 | case 0: |
186 | ring = dev->dma.tx_ring3; |
187 | break; |
188 | case 1: |
189 | ring = dev->dma.tx_ring2; |
190 | break; |
191 | case 2: |
192 | ring = dev->dma.tx_ring1; |
193 | break; |
194 | case 3: |
195 | ring = dev->dma.tx_ring0; |
196 | break; |
197 | case 4: |
198 | ring = dev->dma.tx_ring4; |
199 | break; |
200 | case 5: |
201 | ring = dev->dma.tx_ring5; |
202 | break; |
203 | } |
204 | |
205 | return ring; |
206 | } |
207 | |
208 | static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, |
209 | int controller_idx) |
210 | { |
211 | static const u16 map32[] = { |
212 | B43legacy_MMIO_DMA32_BASE0, |
213 | B43legacy_MMIO_DMA32_BASE1, |
214 | B43legacy_MMIO_DMA32_BASE2, |
215 | B43legacy_MMIO_DMA32_BASE3, |
216 | B43legacy_MMIO_DMA32_BASE4, |
217 | B43legacy_MMIO_DMA32_BASE5, |
218 | }; |
219 | |
220 | B43legacy_WARN_ON(!(controller_idx >= 0 && |
221 | controller_idx < ARRAY_SIZE(map32))); |
222 | return map32[controller_idx]; |
223 | } |
224 | |
225 | static inline |
226 | dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, |
227 | unsigned char *buf, |
228 | size_t len, |
229 | int tx) |
230 | { |
231 | dma_addr_t dmaaddr; |
232 | |
233 | if (tx) |
234 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
235 | buf, len, |
236 | DMA_TO_DEVICE); |
237 | else |
238 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
239 | buf, len, |
240 | DMA_FROM_DEVICE); |
241 | |
242 | return dmaaddr; |
243 | } |
244 | |
245 | static inline |
246 | void unmap_descbuffer(struct b43legacy_dmaring *ring, |
247 | dma_addr_t addr, |
248 | size_t len, |
249 | int tx) |
250 | { |
251 | if (tx) |
252 | dma_unmap_single(ring->dev->dev->dma_dev, |
253 | addr, len, |
254 | DMA_TO_DEVICE); |
255 | else |
256 | dma_unmap_single(ring->dev->dev->dma_dev, |
257 | addr, len, |
258 | DMA_FROM_DEVICE); |
259 | } |
260 | |
261 | static inline |
262 | void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, |
263 | dma_addr_t addr, |
264 | size_t len) |
265 | { |
266 | B43legacy_WARN_ON(ring->tx); |
267 | |
268 | dma_sync_single_for_cpu(dev: ring->dev->dev->dma_dev, |
269 | addr, size: len, dir: DMA_FROM_DEVICE); |
270 | } |
271 | |
272 | static inline |
273 | void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, |
274 | dma_addr_t addr, |
275 | size_t len) |
276 | { |
277 | B43legacy_WARN_ON(ring->tx); |
278 | |
279 | dma_sync_single_for_device(dev: ring->dev->dev->dma_dev, |
280 | addr, size: len, dir: DMA_FROM_DEVICE); |
281 | } |
282 | |
283 | static inline |
284 | void free_descriptor_buffer(struct b43legacy_dmaring *ring, |
285 | struct b43legacy_dmadesc_meta *meta, |
286 | int irq_context) |
287 | { |
288 | if (meta->skb) { |
289 | if (irq_context) |
290 | dev_kfree_skb_irq(skb: meta->skb); |
291 | else |
292 | dev_kfree_skb(meta->skb); |
293 | meta->skb = NULL; |
294 | } |
295 | } |
296 | |
297 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
298 | { |
299 | /* GFP flags must match the flags in free_ringmemory()! */ |
300 | ring->descbase = dma_alloc_coherent(dev: ring->dev->dev->dma_dev, |
301 | B43legacy_DMA_RINGMEMSIZE, |
302 | dma_handle: &(ring->dmabase), GFP_KERNEL); |
303 | if (!ring->descbase) |
304 | return -ENOMEM; |
305 | |
306 | return 0; |
307 | } |
308 | |
309 | static void free_ringmemory(struct b43legacy_dmaring *ring) |
310 | { |
311 | dma_free_coherent(dev: ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, |
312 | cpu_addr: ring->descbase, dma_handle: ring->dmabase); |
313 | } |
314 | |
315 | /* Reset the RX DMA channel */ |
316 | static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, |
317 | u16 mmio_base, |
318 | enum b43legacy_dmatype type) |
319 | { |
320 | int i; |
321 | u32 value; |
322 | u16 offset; |
323 | |
324 | might_sleep(); |
325 | |
326 | offset = B43legacy_DMA32_RXCTL; |
327 | b43legacy_write32(dev, offset: mmio_base + offset, value: 0); |
328 | for (i = 0; i < 10; i++) { |
329 | offset = B43legacy_DMA32_RXSTATUS; |
330 | value = b43legacy_read32(dev, offset: mmio_base + offset); |
331 | value &= B43legacy_DMA32_RXSTATE; |
332 | if (value == B43legacy_DMA32_RXSTAT_DISABLED) { |
333 | i = -1; |
334 | break; |
335 | } |
336 | msleep(msecs: 1); |
337 | } |
338 | if (i != -1) { |
339 | b43legacyerr(wl: dev->wl, fmt: "DMA RX reset timed out\n" ); |
340 | return -ENODEV; |
341 | } |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | /* Reset the RX DMA channel */ |
347 | static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, |
348 | u16 mmio_base, |
349 | enum b43legacy_dmatype type) |
350 | { |
351 | int i; |
352 | u32 value; |
353 | u16 offset; |
354 | |
355 | might_sleep(); |
356 | |
357 | for (i = 0; i < 10; i++) { |
358 | offset = B43legacy_DMA32_TXSTATUS; |
359 | value = b43legacy_read32(dev, offset: mmio_base + offset); |
360 | value &= B43legacy_DMA32_TXSTATE; |
361 | if (value == B43legacy_DMA32_TXSTAT_DISABLED || |
362 | value == B43legacy_DMA32_TXSTAT_IDLEWAIT || |
363 | value == B43legacy_DMA32_TXSTAT_STOPPED) |
364 | break; |
365 | msleep(msecs: 1); |
366 | } |
367 | offset = B43legacy_DMA32_TXCTL; |
368 | b43legacy_write32(dev, offset: mmio_base + offset, value: 0); |
369 | for (i = 0; i < 10; i++) { |
370 | offset = B43legacy_DMA32_TXSTATUS; |
371 | value = b43legacy_read32(dev, offset: mmio_base + offset); |
372 | value &= B43legacy_DMA32_TXSTATE; |
373 | if (value == B43legacy_DMA32_TXSTAT_DISABLED) { |
374 | i = -1; |
375 | break; |
376 | } |
377 | msleep(msecs: 1); |
378 | } |
379 | if (i != -1) { |
380 | b43legacyerr(wl: dev->wl, fmt: "DMA TX reset timed out\n" ); |
381 | return -ENODEV; |
382 | } |
383 | /* ensure the reset is completed. */ |
384 | msleep(msecs: 1); |
385 | |
386 | return 0; |
387 | } |
388 | |
389 | /* Check if a DMA mapping address is invalid. */ |
390 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, |
391 | dma_addr_t addr, |
392 | size_t buffersize, |
393 | bool dma_to_device) |
394 | { |
395 | if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) |
396 | return true; |
397 | |
398 | switch (ring->type) { |
399 | case B43legacy_DMA_30BIT: |
400 | if ((u64)addr + buffersize > (1ULL << 30)) |
401 | goto address_error; |
402 | break; |
403 | case B43legacy_DMA_32BIT: |
404 | if ((u64)addr + buffersize > (1ULL << 32)) |
405 | goto address_error; |
406 | break; |
407 | } |
408 | |
409 | /* The address is OK. */ |
410 | return false; |
411 | |
412 | address_error: |
413 | /* We can't support this address. Unmap it again. */ |
414 | unmap_descbuffer(ring, addr, len: buffersize, tx: dma_to_device); |
415 | |
416 | return true; |
417 | } |
418 | |
419 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, |
420 | struct b43legacy_dmadesc32 *desc, |
421 | struct b43legacy_dmadesc_meta *meta, |
422 | gfp_t gfp_flags) |
423 | { |
424 | struct b43legacy_rxhdr_fw3 *rxhdr; |
425 | struct b43legacy_hwtxstatus *txstat; |
426 | dma_addr_t dmaaddr; |
427 | struct sk_buff *skb; |
428 | |
429 | B43legacy_WARN_ON(ring->tx); |
430 | |
431 | skb = __dev_alloc_skb(length: ring->rx_buffersize, gfp_mask: gfp_flags); |
432 | if (unlikely(!skb)) |
433 | return -ENOMEM; |
434 | dmaaddr = map_descbuffer(ring, buf: skb->data, |
435 | len: ring->rx_buffersize, tx: 0); |
436 | if (b43legacy_dma_mapping_error(ring, addr: dmaaddr, buffersize: ring->rx_buffersize, dma_to_device: 0)) { |
437 | /* ugh. try to realloc in zone_dma */ |
438 | gfp_flags |= GFP_DMA; |
439 | |
440 | dev_kfree_skb_any(skb); |
441 | |
442 | skb = __dev_alloc_skb(length: ring->rx_buffersize, gfp_mask: gfp_flags); |
443 | if (unlikely(!skb)) |
444 | return -ENOMEM; |
445 | dmaaddr = map_descbuffer(ring, buf: skb->data, |
446 | len: ring->rx_buffersize, tx: 0); |
447 | } |
448 | |
449 | if (b43legacy_dma_mapping_error(ring, addr: dmaaddr, buffersize: ring->rx_buffersize, dma_to_device: 0)) { |
450 | dev_kfree_skb_any(skb); |
451 | return -EIO; |
452 | } |
453 | |
454 | meta->skb = skb; |
455 | meta->dmaaddr = dmaaddr; |
456 | op32_fill_descriptor(ring, desc, dmaaddr, bufsize: ring->rx_buffersize, start: 0, end: 0, irq: 0); |
457 | |
458 | rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); |
459 | rxhdr->frame_len = 0; |
460 | txstat = (struct b43legacy_hwtxstatus *)(skb->data); |
461 | txstat->cookie = 0; |
462 | |
463 | return 0; |
464 | } |
465 | |
466 | /* Allocate the initial descbuffers. |
467 | * This is used for an RX ring only. |
468 | */ |
469 | static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) |
470 | { |
471 | int i; |
472 | int err = -ENOMEM; |
473 | struct b43legacy_dmadesc32 *desc; |
474 | struct b43legacy_dmadesc_meta *meta; |
475 | |
476 | for (i = 0; i < ring->nr_slots; i++) { |
477 | desc = op32_idx2desc(ring, slot: i, meta: &meta); |
478 | |
479 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); |
480 | if (err) { |
481 | b43legacyerr(wl: ring->dev->wl, |
482 | fmt: "Failed to allocate initial descbuffers\n" ); |
483 | goto err_unwind; |
484 | } |
485 | } |
486 | mb(); /* all descbuffer setup before next line */ |
487 | ring->used_slots = ring->nr_slots; |
488 | err = 0; |
489 | out: |
490 | return err; |
491 | |
492 | err_unwind: |
493 | for (i--; i >= 0; i--) { |
494 | desc = op32_idx2desc(ring, slot: i, meta: &meta); |
495 | |
496 | unmap_descbuffer(ring, addr: meta->dmaaddr, len: ring->rx_buffersize, tx: 0); |
497 | dev_kfree_skb(meta->skb); |
498 | } |
499 | goto out; |
500 | } |
501 | |
502 | /* Do initial setup of the DMA controller. |
503 | * Reset the controller, write the ring busaddress |
504 | * and switch the "enable" bit on. |
505 | */ |
506 | static int dmacontroller_setup(struct b43legacy_dmaring *ring) |
507 | { |
508 | int err = 0; |
509 | u32 value; |
510 | u32 addrext; |
511 | u32 trans = ring->dev->dma.translation; |
512 | u32 ringbase = (u32)(ring->dmabase); |
513 | |
514 | if (ring->tx) { |
515 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) |
516 | >> SSB_DMA_TRANSLATION_SHIFT; |
517 | value = B43legacy_DMA32_TXENABLE; |
518 | value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) |
519 | & B43legacy_DMA32_TXADDREXT_MASK; |
520 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); |
521 | b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, |
522 | value: (ringbase & ~SSB_DMA_TRANSLATION_MASK) |
523 | | trans); |
524 | } else { |
525 | err = alloc_initial_descbuffers(ring); |
526 | if (err) |
527 | goto out; |
528 | |
529 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) |
530 | >> SSB_DMA_TRANSLATION_SHIFT; |
531 | value = (ring->frameoffset << |
532 | B43legacy_DMA32_RXFROFF_SHIFT); |
533 | value |= B43legacy_DMA32_RXENABLE; |
534 | value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT) |
535 | & B43legacy_DMA32_RXADDREXT_MASK; |
536 | b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); |
537 | b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, |
538 | value: (ringbase & ~SSB_DMA_TRANSLATION_MASK) |
539 | | trans); |
540 | b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, value: 200); |
541 | } |
542 | |
543 | out: |
544 | return err; |
545 | } |
546 | |
547 | /* Shutdown the DMA controller. */ |
548 | static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) |
549 | { |
550 | if (ring->tx) { |
551 | b43legacy_dmacontroller_tx_reset(dev: ring->dev, mmio_base: ring->mmio_base, |
552 | type: ring->type); |
553 | b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, value: 0); |
554 | } else { |
555 | b43legacy_dmacontroller_rx_reset(dev: ring->dev, mmio_base: ring->mmio_base, |
556 | type: ring->type); |
557 | b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, value: 0); |
558 | } |
559 | } |
560 | |
561 | static void free_all_descbuffers(struct b43legacy_dmaring *ring) |
562 | { |
563 | struct b43legacy_dmadesc_meta *meta; |
564 | int i; |
565 | |
566 | if (!ring->used_slots) |
567 | return; |
568 | for (i = 0; i < ring->nr_slots; i++) { |
569 | op32_idx2desc(ring, slot: i, meta: &meta); |
570 | |
571 | if (!meta->skb) { |
572 | B43legacy_WARN_ON(!ring->tx); |
573 | continue; |
574 | } |
575 | if (ring->tx) |
576 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
577 | len: meta->skb->len, tx: 1); |
578 | else |
579 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
580 | len: ring->rx_buffersize, tx: 0); |
581 | free_descriptor_buffer(ring, meta, irq_context: 0); |
582 | } |
583 | } |
584 | |
585 | static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev) |
586 | { |
587 | u32 tmp; |
588 | u16 mmio_base; |
589 | |
590 | mmio_base = b43legacy_dmacontroller_base(type: 0, controller_idx: 0); |
591 | b43legacy_write32(dev, |
592 | offset: mmio_base + B43legacy_DMA32_TXCTL, |
593 | B43legacy_DMA32_TXADDREXT_MASK); |
594 | tmp = b43legacy_read32(dev, offset: mmio_base + |
595 | B43legacy_DMA32_TXCTL); |
596 | if (tmp & B43legacy_DMA32_TXADDREXT_MASK) |
597 | return B43legacy_DMA_32BIT; |
598 | return B43legacy_DMA_30BIT; |
599 | } |
600 | |
601 | /* Main initialization function. */ |
602 | static |
603 | struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, |
604 | int controller_index, |
605 | int for_tx, |
606 | enum b43legacy_dmatype type) |
607 | { |
608 | struct b43legacy_dmaring *ring; |
609 | int err; |
610 | int nr_slots; |
611 | dma_addr_t dma_test; |
612 | |
613 | ring = kzalloc(size: sizeof(*ring), GFP_KERNEL); |
614 | if (!ring) |
615 | goto out; |
616 | ring->type = type; |
617 | ring->dev = dev; |
618 | |
619 | nr_slots = B43legacy_RXRING_SLOTS; |
620 | if (for_tx) |
621 | nr_slots = B43legacy_TXRING_SLOTS; |
622 | |
623 | ring->meta = kcalloc(n: nr_slots, size: sizeof(struct b43legacy_dmadesc_meta), |
624 | GFP_KERNEL); |
625 | if (!ring->meta) |
626 | goto err_kfree_ring; |
627 | if (for_tx) { |
628 | ring->txhdr_cache = kcalloc(n: nr_slots, |
629 | size: sizeof(struct b43legacy_txhdr_fw3), |
630 | GFP_KERNEL); |
631 | if (!ring->txhdr_cache) |
632 | goto err_kfree_meta; |
633 | |
634 | /* test for ability to dma to txhdr_cache */ |
635 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, |
636 | sizeof(struct b43legacy_txhdr_fw3), |
637 | DMA_TO_DEVICE); |
638 | |
639 | if (b43legacy_dma_mapping_error(ring, addr: dma_test, |
640 | buffersize: sizeof(struct b43legacy_txhdr_fw3), dma_to_device: 1)) { |
641 | /* ugh realloc */ |
642 | kfree(objp: ring->txhdr_cache); |
643 | ring->txhdr_cache = kcalloc(n: nr_slots, |
644 | size: sizeof(struct b43legacy_txhdr_fw3), |
645 | GFP_KERNEL | GFP_DMA); |
646 | if (!ring->txhdr_cache) |
647 | goto err_kfree_meta; |
648 | |
649 | dma_test = dma_map_single(dev->dev->dma_dev, |
650 | ring->txhdr_cache, |
651 | sizeof(struct b43legacy_txhdr_fw3), |
652 | DMA_TO_DEVICE); |
653 | |
654 | if (b43legacy_dma_mapping_error(ring, addr: dma_test, |
655 | buffersize: sizeof(struct b43legacy_txhdr_fw3), dma_to_device: 1)) |
656 | goto err_kfree_txhdr_cache; |
657 | } |
658 | |
659 | dma_unmap_single(dev->dev->dma_dev, dma_test, |
660 | sizeof(struct b43legacy_txhdr_fw3), |
661 | DMA_TO_DEVICE); |
662 | } |
663 | |
664 | ring->nr_slots = nr_slots; |
665 | ring->mmio_base = b43legacy_dmacontroller_base(type, controller_idx: controller_index); |
666 | ring->index = controller_index; |
667 | if (for_tx) { |
668 | ring->tx = true; |
669 | ring->current_slot = -1; |
670 | } else { |
671 | if (ring->index == 0) { |
672 | ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; |
673 | ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; |
674 | } else if (ring->index == 3) { |
675 | ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; |
676 | ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; |
677 | } else |
678 | B43legacy_WARN_ON(1); |
679 | } |
680 | #ifdef CONFIG_B43LEGACY_DEBUG |
681 | ring->last_injected_overflow = jiffies; |
682 | #endif |
683 | |
684 | err = alloc_ringmemory(ring); |
685 | if (err) |
686 | goto err_kfree_txhdr_cache; |
687 | err = dmacontroller_setup(ring); |
688 | if (err) |
689 | goto err_free_ringmemory; |
690 | |
691 | out: |
692 | return ring; |
693 | |
694 | err_free_ringmemory: |
695 | free_ringmemory(ring); |
696 | err_kfree_txhdr_cache: |
697 | kfree(objp: ring->txhdr_cache); |
698 | err_kfree_meta: |
699 | kfree(objp: ring->meta); |
700 | err_kfree_ring: |
701 | kfree(objp: ring); |
702 | ring = NULL; |
703 | goto out; |
704 | } |
705 | |
706 | /* Main cleanup function. */ |
707 | static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) |
708 | { |
709 | if (!ring) |
710 | return; |
711 | |
712 | b43legacydbg(wl: ring->dev->wl, fmt: "DMA-%u 0x%04X (%s) max used slots:" |
713 | " %d/%d\n" , (unsigned int)(ring->type), ring->mmio_base, |
714 | (ring->tx) ? "TX" : "RX" , ring->max_used_slots, |
715 | ring->nr_slots); |
716 | /* Device IRQs are disabled prior entering this function, |
717 | * so no need to take care of concurrency with rx handler stuff. |
718 | */ |
719 | dmacontroller_cleanup(ring); |
720 | free_all_descbuffers(ring); |
721 | free_ringmemory(ring); |
722 | |
723 | kfree(objp: ring->txhdr_cache); |
724 | kfree(objp: ring->meta); |
725 | kfree(objp: ring); |
726 | } |
727 | |
728 | void b43legacy_dma_free(struct b43legacy_wldev *dev) |
729 | { |
730 | struct b43legacy_dma *dma; |
731 | |
732 | if (b43legacy_using_pio(dev)) |
733 | return; |
734 | dma = &dev->dma; |
735 | |
736 | b43legacy_destroy_dmaring(ring: dma->rx_ring3); |
737 | dma->rx_ring3 = NULL; |
738 | b43legacy_destroy_dmaring(ring: dma->rx_ring0); |
739 | dma->rx_ring0 = NULL; |
740 | |
741 | b43legacy_destroy_dmaring(ring: dma->tx_ring5); |
742 | dma->tx_ring5 = NULL; |
743 | b43legacy_destroy_dmaring(ring: dma->tx_ring4); |
744 | dma->tx_ring4 = NULL; |
745 | b43legacy_destroy_dmaring(ring: dma->tx_ring3); |
746 | dma->tx_ring3 = NULL; |
747 | b43legacy_destroy_dmaring(ring: dma->tx_ring2); |
748 | dma->tx_ring2 = NULL; |
749 | b43legacy_destroy_dmaring(ring: dma->tx_ring1); |
750 | dma->tx_ring1 = NULL; |
751 | b43legacy_destroy_dmaring(ring: dma->tx_ring0); |
752 | dma->tx_ring0 = NULL; |
753 | } |
754 | |
755 | int b43legacy_dma_init(struct b43legacy_wldev *dev) |
756 | { |
757 | struct b43legacy_dma *dma = &dev->dma; |
758 | struct b43legacy_dmaring *ring; |
759 | enum b43legacy_dmatype type = b43legacy_engine_type(dev); |
760 | int err; |
761 | |
762 | err = dma_set_mask_and_coherent(dev: dev->dev->dma_dev, DMA_BIT_MASK(type)); |
763 | if (err) { |
764 | #ifdef CONFIG_B43LEGACY_PIO |
765 | b43legacywarn(wl: dev->wl, fmt: "DMA for this device not supported. " |
766 | "Falling back to PIO\n" ); |
767 | dev->__using_pio = true; |
768 | return -EAGAIN; |
769 | #else |
770 | b43legacyerr(dev->wl, "DMA for this device not supported and " |
771 | "no PIO support compiled in\n" ); |
772 | return -EOPNOTSUPP; |
773 | #endif |
774 | } |
775 | dma->translation = ssb_dma_translation(dev: dev->dev); |
776 | |
777 | err = -ENOMEM; |
778 | /* setup TX DMA channels. */ |
779 | ring = b43legacy_setup_dmaring(dev, controller_index: 0, for_tx: 1, type); |
780 | if (!ring) |
781 | goto out; |
782 | dma->tx_ring0 = ring; |
783 | |
784 | ring = b43legacy_setup_dmaring(dev, controller_index: 1, for_tx: 1, type); |
785 | if (!ring) |
786 | goto err_destroy_tx0; |
787 | dma->tx_ring1 = ring; |
788 | |
789 | ring = b43legacy_setup_dmaring(dev, controller_index: 2, for_tx: 1, type); |
790 | if (!ring) |
791 | goto err_destroy_tx1; |
792 | dma->tx_ring2 = ring; |
793 | |
794 | ring = b43legacy_setup_dmaring(dev, controller_index: 3, for_tx: 1, type); |
795 | if (!ring) |
796 | goto err_destroy_tx2; |
797 | dma->tx_ring3 = ring; |
798 | |
799 | ring = b43legacy_setup_dmaring(dev, controller_index: 4, for_tx: 1, type); |
800 | if (!ring) |
801 | goto err_destroy_tx3; |
802 | dma->tx_ring4 = ring; |
803 | |
804 | ring = b43legacy_setup_dmaring(dev, controller_index: 5, for_tx: 1, type); |
805 | if (!ring) |
806 | goto err_destroy_tx4; |
807 | dma->tx_ring5 = ring; |
808 | |
809 | /* setup RX DMA channels. */ |
810 | ring = b43legacy_setup_dmaring(dev, controller_index: 0, for_tx: 0, type); |
811 | if (!ring) |
812 | goto err_destroy_tx5; |
813 | dma->rx_ring0 = ring; |
814 | |
815 | if (dev->dev->id.revision < 5) { |
816 | ring = b43legacy_setup_dmaring(dev, controller_index: 3, for_tx: 0, type); |
817 | if (!ring) |
818 | goto err_destroy_rx0; |
819 | dma->rx_ring3 = ring; |
820 | } |
821 | |
822 | b43legacydbg(wl: dev->wl, fmt: "%u-bit DMA initialized\n" , (unsigned int)type); |
823 | err = 0; |
824 | out: |
825 | return err; |
826 | |
827 | err_destroy_rx0: |
828 | b43legacy_destroy_dmaring(ring: dma->rx_ring0); |
829 | dma->rx_ring0 = NULL; |
830 | err_destroy_tx5: |
831 | b43legacy_destroy_dmaring(ring: dma->tx_ring5); |
832 | dma->tx_ring5 = NULL; |
833 | err_destroy_tx4: |
834 | b43legacy_destroy_dmaring(ring: dma->tx_ring4); |
835 | dma->tx_ring4 = NULL; |
836 | err_destroy_tx3: |
837 | b43legacy_destroy_dmaring(ring: dma->tx_ring3); |
838 | dma->tx_ring3 = NULL; |
839 | err_destroy_tx2: |
840 | b43legacy_destroy_dmaring(ring: dma->tx_ring2); |
841 | dma->tx_ring2 = NULL; |
842 | err_destroy_tx1: |
843 | b43legacy_destroy_dmaring(ring: dma->tx_ring1); |
844 | dma->tx_ring1 = NULL; |
845 | err_destroy_tx0: |
846 | b43legacy_destroy_dmaring(ring: dma->tx_ring0); |
847 | dma->tx_ring0 = NULL; |
848 | goto out; |
849 | } |
850 | |
851 | /* Generate a cookie for the TX header. */ |
852 | static u16 generate_cookie(struct b43legacy_dmaring *ring, |
853 | int slot) |
854 | { |
855 | u16 cookie = 0x1000; |
856 | |
857 | /* Use the upper 4 bits of the cookie as |
858 | * DMA controller ID and store the slot number |
859 | * in the lower 12 bits. |
860 | * Note that the cookie must never be 0, as this |
861 | * is a special value used in RX path. |
862 | */ |
863 | switch (ring->index) { |
864 | case 0: |
865 | cookie = 0xA000; |
866 | break; |
867 | case 1: |
868 | cookie = 0xB000; |
869 | break; |
870 | case 2: |
871 | cookie = 0xC000; |
872 | break; |
873 | case 3: |
874 | cookie = 0xD000; |
875 | break; |
876 | case 4: |
877 | cookie = 0xE000; |
878 | break; |
879 | case 5: |
880 | cookie = 0xF000; |
881 | break; |
882 | } |
883 | B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); |
884 | cookie |= (u16)slot; |
885 | |
886 | return cookie; |
887 | } |
888 | |
889 | /* Inspect a cookie and find out to which controller/slot it belongs. */ |
890 | static |
891 | struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, |
892 | u16 cookie, int *slot) |
893 | { |
894 | struct b43legacy_dma *dma = &dev->dma; |
895 | struct b43legacy_dmaring *ring = NULL; |
896 | |
897 | switch (cookie & 0xF000) { |
898 | case 0xA000: |
899 | ring = dma->tx_ring0; |
900 | break; |
901 | case 0xB000: |
902 | ring = dma->tx_ring1; |
903 | break; |
904 | case 0xC000: |
905 | ring = dma->tx_ring2; |
906 | break; |
907 | case 0xD000: |
908 | ring = dma->tx_ring3; |
909 | break; |
910 | case 0xE000: |
911 | ring = dma->tx_ring4; |
912 | break; |
913 | case 0xF000: |
914 | ring = dma->tx_ring5; |
915 | break; |
916 | default: |
917 | B43legacy_WARN_ON(1); |
918 | } |
919 | *slot = (cookie & 0x0FFF); |
920 | B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); |
921 | |
922 | return ring; |
923 | } |
924 | |
925 | static int dma_tx_fragment(struct b43legacy_dmaring *ring, |
926 | struct sk_buff **in_skb) |
927 | { |
928 | struct sk_buff *skb = *in_skb; |
929 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
930 | u8 *; |
931 | int slot, old_top_slot, old_used_slots; |
932 | int err; |
933 | struct b43legacy_dmadesc32 *desc; |
934 | struct b43legacy_dmadesc_meta *meta; |
935 | struct b43legacy_dmadesc_meta *meta_hdr; |
936 | struct sk_buff *bounce_skb; |
937 | |
938 | #define SLOTS_PER_PACKET 2 |
939 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); |
940 | |
941 | old_top_slot = ring->current_slot; |
942 | old_used_slots = ring->used_slots; |
943 | |
944 | /* Get a slot for the header. */ |
945 | slot = request_slot(ring); |
946 | desc = op32_idx2desc(ring, slot, meta: &meta_hdr); |
947 | memset(meta_hdr, 0, sizeof(*meta_hdr)); |
948 | |
949 | header = &(ring->txhdr_cache[slot * sizeof( |
950 | struct b43legacy_txhdr_fw3)]); |
951 | err = b43legacy_generate_txhdr(dev: ring->dev, txhdr: header, |
952 | fragment_data: skb->data, fragment_len: skb->len, info, |
953 | cookie: generate_cookie(ring, slot)); |
954 | if (unlikely(err)) { |
955 | ring->current_slot = old_top_slot; |
956 | ring->used_slots = old_used_slots; |
957 | return err; |
958 | } |
959 | |
960 | meta_hdr->dmaaddr = map_descbuffer(ring, buf: (unsigned char *)header, |
961 | len: sizeof(struct b43legacy_txhdr_fw3), tx: 1); |
962 | if (b43legacy_dma_mapping_error(ring, addr: meta_hdr->dmaaddr, |
963 | buffersize: sizeof(struct b43legacy_txhdr_fw3), dma_to_device: 1)) { |
964 | ring->current_slot = old_top_slot; |
965 | ring->used_slots = old_used_slots; |
966 | return -EIO; |
967 | } |
968 | op32_fill_descriptor(ring, desc, dmaaddr: meta_hdr->dmaaddr, |
969 | bufsize: sizeof(struct b43legacy_txhdr_fw3), start: 1, end: 0, irq: 0); |
970 | |
971 | /* Get a slot for the payload. */ |
972 | slot = request_slot(ring); |
973 | desc = op32_idx2desc(ring, slot, meta: &meta); |
974 | memset(meta, 0, sizeof(*meta)); |
975 | |
976 | meta->skb = skb; |
977 | meta->is_last_fragment = true; |
978 | |
979 | meta->dmaaddr = map_descbuffer(ring, buf: skb->data, len: skb->len, tx: 1); |
980 | /* create a bounce buffer in zone_dma on mapping failure. */ |
981 | if (b43legacy_dma_mapping_error(ring, addr: meta->dmaaddr, buffersize: skb->len, dma_to_device: 1)) { |
982 | bounce_skb = alloc_skb(size: skb->len, GFP_KERNEL | GFP_DMA); |
983 | if (!bounce_skb) { |
984 | ring->current_slot = old_top_slot; |
985 | ring->used_slots = old_used_slots; |
986 | err = -ENOMEM; |
987 | goto out_unmap_hdr; |
988 | } |
989 | |
990 | skb_put_data(skb: bounce_skb, data: skb->data, len: skb->len); |
991 | memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); |
992 | bounce_skb->dev = skb->dev; |
993 | skb_set_queue_mapping(skb: bounce_skb, queue_mapping: skb_get_queue_mapping(skb)); |
994 | info = IEEE80211_SKB_CB(skb: bounce_skb); |
995 | |
996 | dev_kfree_skb_any(skb); |
997 | skb = bounce_skb; |
998 | *in_skb = bounce_skb; |
999 | meta->skb = skb; |
1000 | meta->dmaaddr = map_descbuffer(ring, buf: skb->data, len: skb->len, tx: 1); |
1001 | if (b43legacy_dma_mapping_error(ring, addr: meta->dmaaddr, buffersize: skb->len, dma_to_device: 1)) { |
1002 | ring->current_slot = old_top_slot; |
1003 | ring->used_slots = old_used_slots; |
1004 | err = -EIO; |
1005 | goto out_free_bounce; |
1006 | } |
1007 | } |
1008 | |
1009 | op32_fill_descriptor(ring, desc, dmaaddr: meta->dmaaddr, |
1010 | bufsize: skb->len, start: 0, end: 1, irq: 1); |
1011 | |
1012 | wmb(); /* previous stuff MUST be done */ |
1013 | /* Now transfer the whole frame. */ |
1014 | op32_poke_tx(ring, slot: next_slot(ring, slot)); |
1015 | return 0; |
1016 | |
1017 | out_free_bounce: |
1018 | dev_kfree_skb_any(skb); |
1019 | out_unmap_hdr: |
1020 | unmap_descbuffer(ring, addr: meta_hdr->dmaaddr, |
1021 | len: sizeof(struct b43legacy_txhdr_fw3), tx: 1); |
1022 | return err; |
1023 | } |
1024 | |
1025 | static inline |
1026 | int should_inject_overflow(struct b43legacy_dmaring *ring) |
1027 | { |
1028 | #ifdef CONFIG_B43LEGACY_DEBUG |
1029 | if (unlikely(b43legacy_debug(ring->dev, |
1030 | B43legacy_DBG_DMAOVERFLOW))) { |
1031 | /* Check if we should inject another ringbuffer overflow |
1032 | * to test handling of this situation in the stack. */ |
1033 | unsigned long next_overflow; |
1034 | |
1035 | next_overflow = ring->last_injected_overflow + HZ; |
1036 | if (time_after(jiffies, next_overflow)) { |
1037 | ring->last_injected_overflow = jiffies; |
1038 | b43legacydbg(wl: ring->dev->wl, |
1039 | fmt: "Injecting TX ring overflow on " |
1040 | "DMA controller %d\n" , ring->index); |
1041 | return 1; |
1042 | } |
1043 | } |
1044 | #endif /* CONFIG_B43LEGACY_DEBUG */ |
1045 | return 0; |
1046 | } |
1047 | |
1048 | int b43legacy_dma_tx(struct b43legacy_wldev *dev, |
1049 | struct sk_buff *skb) |
1050 | { |
1051 | struct b43legacy_dmaring *ring; |
1052 | int err = 0; |
1053 | |
1054 | ring = priority_to_txring(dev, queue_priority: skb_get_queue_mapping(skb)); |
1055 | B43legacy_WARN_ON(!ring->tx); |
1056 | |
1057 | if (unlikely(ring->stopped)) { |
1058 | /* We get here only because of a bug in mac80211. |
1059 | * Because of a race, one packet may be queued after |
1060 | * the queue is stopped, thus we got called when we shouldn't. |
1061 | * For now, just refuse the transmit. */ |
1062 | if (b43legacy_debug(dev, feature: B43legacy_DBG_DMAVERBOSE)) |
1063 | b43legacyerr(wl: dev->wl, fmt: "Packet after queue stopped\n" ); |
1064 | return -ENOSPC; |
1065 | } |
1066 | |
1067 | if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { |
1068 | /* If we get here, we have a real error with the queue |
1069 | * full, but queues not stopped. */ |
1070 | b43legacyerr(wl: dev->wl, fmt: "DMA queue overflow\n" ); |
1071 | return -ENOSPC; |
1072 | } |
1073 | |
1074 | /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing |
1075 | * into the skb data or cb now. */ |
1076 | err = dma_tx_fragment(ring, in_skb: &skb); |
1077 | if (unlikely(err == -ENOKEY)) { |
1078 | /* Drop this packet, as we don't have the encryption key |
1079 | * anymore and must not transmit it unencrypted. */ |
1080 | dev_kfree_skb_any(skb); |
1081 | return 0; |
1082 | } |
1083 | if (unlikely(err)) { |
1084 | b43legacyerr(wl: dev->wl, fmt: "DMA tx mapping failure\n" ); |
1085 | return err; |
1086 | } |
1087 | if ((free_slots(ring) < SLOTS_PER_PACKET) || |
1088 | should_inject_overflow(ring)) { |
1089 | /* This TX ring is full. */ |
1090 | unsigned int skb_mapping = skb_get_queue_mapping(skb); |
1091 | ieee80211_stop_queue(hw: dev->wl->hw, queue: skb_mapping); |
1092 | dev->wl->tx_queue_stopped[skb_mapping] = 1; |
1093 | ring->stopped = true; |
1094 | if (b43legacy_debug(dev, feature: B43legacy_DBG_DMAVERBOSE)) |
1095 | b43legacydbg(wl: dev->wl, fmt: "Stopped TX ring %d\n" , |
1096 | ring->index); |
1097 | } |
1098 | return err; |
1099 | } |
1100 | |
1101 | void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, |
1102 | const struct b43legacy_txstatus *status) |
1103 | { |
1104 | struct b43legacy_dmaring *ring; |
1105 | struct b43legacy_dmadesc_meta *meta; |
1106 | int retry_limit; |
1107 | int slot; |
1108 | int firstused; |
1109 | |
1110 | ring = parse_cookie(dev, cookie: status->cookie, slot: &slot); |
1111 | if (unlikely(!ring)) |
1112 | return; |
1113 | B43legacy_WARN_ON(!ring->tx); |
1114 | |
1115 | /* Sanity check: TX packets are processed in-order on one ring. |
1116 | * Check if the slot deduced from the cookie really is the first |
1117 | * used slot. */ |
1118 | firstused = ring->current_slot - ring->used_slots + 1; |
1119 | if (firstused < 0) |
1120 | firstused = ring->nr_slots + firstused; |
1121 | if (unlikely(slot != firstused)) { |
1122 | /* This possibly is a firmware bug and will result in |
1123 | * malfunction, memory leaks and/or stall of DMA functionality. |
1124 | */ |
1125 | b43legacydbg(wl: dev->wl, fmt: "Out of order TX status report on DMA " |
1126 | "ring %d. Expected %d, but got %d\n" , |
1127 | ring->index, firstused, slot); |
1128 | return; |
1129 | } |
1130 | |
1131 | while (1) { |
1132 | B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); |
1133 | op32_idx2desc(ring, slot, meta: &meta); |
1134 | |
1135 | if (meta->skb) |
1136 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
1137 | len: meta->skb->len, tx: 1); |
1138 | else |
1139 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
1140 | len: sizeof(struct b43legacy_txhdr_fw3), |
1141 | tx: 1); |
1142 | |
1143 | if (meta->is_last_fragment) { |
1144 | struct ieee80211_tx_info *info; |
1145 | BUG_ON(!meta->skb); |
1146 | info = IEEE80211_SKB_CB(skb: meta->skb); |
1147 | |
1148 | /* preserve the confiured retry limit before clearing the status |
1149 | * The xmit function has overwritten the rc's value with the actual |
1150 | * retry limit done by the hardware */ |
1151 | retry_limit = info->status.rates[0].count; |
1152 | ieee80211_tx_info_clear_status(info); |
1153 | |
1154 | if (status->acked) |
1155 | info->flags |= IEEE80211_TX_STAT_ACK; |
1156 | |
1157 | if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { |
1158 | /* |
1159 | * If the short retries (RTS, not data frame) have exceeded |
1160 | * the limit, the hw will not have tried the selected rate, |
1161 | * but will have used the fallback rate instead. |
1162 | * Don't let the rate control count attempts for the selected |
1163 | * rate in this case, otherwise the statistics will be off. |
1164 | */ |
1165 | info->status.rates[0].count = 0; |
1166 | info->status.rates[1].count = status->frame_count; |
1167 | } else { |
1168 | if (status->frame_count > retry_limit) { |
1169 | info->status.rates[0].count = retry_limit; |
1170 | info->status.rates[1].count = status->frame_count - |
1171 | retry_limit; |
1172 | |
1173 | } else { |
1174 | info->status.rates[0].count = status->frame_count; |
1175 | info->status.rates[1].idx = -1; |
1176 | } |
1177 | } |
1178 | |
1179 | /* Call back to inform the ieee80211 subsystem about the |
1180 | * status of the transmission. |
1181 | * Some fields of txstat are already filled in dma_tx(). |
1182 | */ |
1183 | ieee80211_tx_status_irqsafe(hw: dev->wl->hw, skb: meta->skb); |
1184 | /* skb is freed by ieee80211_tx_status_irqsafe() */ |
1185 | meta->skb = NULL; |
1186 | } else { |
1187 | /* No need to call free_descriptor_buffer here, as |
1188 | * this is only the txhdr, which is not allocated. |
1189 | */ |
1190 | B43legacy_WARN_ON(meta->skb != NULL); |
1191 | } |
1192 | |
1193 | /* Everything unmapped and free'd. So it's not used anymore. */ |
1194 | ring->used_slots--; |
1195 | |
1196 | if (meta->is_last_fragment) |
1197 | break; |
1198 | slot = next_slot(ring, slot); |
1199 | } |
1200 | dev->stats.last_tx = jiffies; |
1201 | if (ring->stopped) { |
1202 | B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); |
1203 | ring->stopped = false; |
1204 | } |
1205 | |
1206 | if (dev->wl->tx_queue_stopped[ring->queue_prio]) { |
1207 | dev->wl->tx_queue_stopped[ring->queue_prio] = 0; |
1208 | } else { |
1209 | /* If the driver queue is running wake the corresponding |
1210 | * mac80211 queue. */ |
1211 | ieee80211_wake_queue(hw: dev->wl->hw, queue: ring->queue_prio); |
1212 | if (b43legacy_debug(dev, feature: B43legacy_DBG_DMAVERBOSE)) |
1213 | b43legacydbg(wl: dev->wl, fmt: "Woke up TX ring %d\n" , |
1214 | ring->index); |
1215 | } |
1216 | /* Add work to the queue. */ |
1217 | ieee80211_queue_work(hw: dev->wl->hw, work: &dev->wl->tx_work); |
1218 | } |
1219 | |
1220 | static void dma_rx(struct b43legacy_dmaring *ring, |
1221 | int *slot) |
1222 | { |
1223 | struct b43legacy_dmadesc32 *desc; |
1224 | struct b43legacy_dmadesc_meta *meta; |
1225 | struct b43legacy_rxhdr_fw3 *rxhdr; |
1226 | struct sk_buff *skb; |
1227 | u16 len; |
1228 | int err; |
1229 | dma_addr_t dmaaddr; |
1230 | |
1231 | desc = op32_idx2desc(ring, slot: *slot, meta: &meta); |
1232 | |
1233 | sync_descbuffer_for_cpu(ring, addr: meta->dmaaddr, len: ring->rx_buffersize); |
1234 | skb = meta->skb; |
1235 | |
1236 | if (ring->index == 3) { |
1237 | /* We received an xmit status. */ |
1238 | struct b43legacy_hwtxstatus *hw = |
1239 | (struct b43legacy_hwtxstatus *)skb->data; |
1240 | int i = 0; |
1241 | |
1242 | while (hw->cookie == 0) { |
1243 | if (i > 100) |
1244 | break; |
1245 | i++; |
1246 | udelay(2); |
1247 | barrier(); |
1248 | } |
1249 | b43legacy_handle_hwtxstatus(dev: ring->dev, hw); |
1250 | /* recycle the descriptor buffer. */ |
1251 | sync_descbuffer_for_device(ring, addr: meta->dmaaddr, |
1252 | len: ring->rx_buffersize); |
1253 | |
1254 | return; |
1255 | } |
1256 | rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; |
1257 | len = le16_to_cpu(rxhdr->frame_len); |
1258 | if (len == 0) { |
1259 | int i = 0; |
1260 | |
1261 | do { |
1262 | udelay(2); |
1263 | barrier(); |
1264 | len = le16_to_cpu(rxhdr->frame_len); |
1265 | } while (len == 0 && i++ < 5); |
1266 | if (unlikely(len == 0)) { |
1267 | /* recycle the descriptor buffer. */ |
1268 | sync_descbuffer_for_device(ring, addr: meta->dmaaddr, |
1269 | len: ring->rx_buffersize); |
1270 | goto drop; |
1271 | } |
1272 | } |
1273 | if (unlikely(len > ring->rx_buffersize)) { |
1274 | /* The data did not fit into one descriptor buffer |
1275 | * and is split over multiple buffers. |
1276 | * This should never happen, as we try to allocate buffers |
1277 | * big enough. So simply ignore this packet. |
1278 | */ |
1279 | int cnt = 0; |
1280 | s32 tmp = len; |
1281 | |
1282 | while (1) { |
1283 | desc = op32_idx2desc(ring, slot: *slot, meta: &meta); |
1284 | /* recycle the descriptor buffer. */ |
1285 | sync_descbuffer_for_device(ring, addr: meta->dmaaddr, |
1286 | len: ring->rx_buffersize); |
1287 | *slot = next_slot(ring, slot: *slot); |
1288 | cnt++; |
1289 | tmp -= ring->rx_buffersize; |
1290 | if (tmp <= 0) |
1291 | break; |
1292 | } |
1293 | b43legacyerr(wl: ring->dev->wl, fmt: "DMA RX buffer too small " |
1294 | "(len: %u, buffer: %u, nr-dropped: %d)\n" , |
1295 | len, ring->rx_buffersize, cnt); |
1296 | goto drop; |
1297 | } |
1298 | |
1299 | dmaaddr = meta->dmaaddr; |
1300 | err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); |
1301 | if (unlikely(err)) { |
1302 | b43legacydbg(wl: ring->dev->wl, fmt: "DMA RX: setup_rx_descbuffer()" |
1303 | " failed\n" ); |
1304 | sync_descbuffer_for_device(ring, addr: dmaaddr, |
1305 | len: ring->rx_buffersize); |
1306 | goto drop; |
1307 | } |
1308 | |
1309 | unmap_descbuffer(ring, addr: dmaaddr, len: ring->rx_buffersize, tx: 0); |
1310 | skb_put(skb, len: len + ring->frameoffset); |
1311 | skb_pull(skb, len: ring->frameoffset); |
1312 | |
1313 | b43legacy_rx(dev: ring->dev, skb, rxhdr: rxhdr); |
1314 | drop: |
1315 | return; |
1316 | } |
1317 | |
1318 | void b43legacy_dma_rx(struct b43legacy_dmaring *ring) |
1319 | { |
1320 | int slot; |
1321 | int current_slot; |
1322 | int used_slots = 0; |
1323 | |
1324 | B43legacy_WARN_ON(ring->tx); |
1325 | current_slot = op32_get_current_rxslot(ring); |
1326 | B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < |
1327 | ring->nr_slots)); |
1328 | |
1329 | slot = ring->current_slot; |
1330 | for (; slot != current_slot; slot = next_slot(ring, slot)) { |
1331 | dma_rx(ring, slot: &slot); |
1332 | update_max_used_slots(ring, current_used_slots: ++used_slots); |
1333 | } |
1334 | op32_set_current_rxslot(ring, slot); |
1335 | ring->current_slot = slot; |
1336 | } |
1337 | |
1338 | static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) |
1339 | { |
1340 | B43legacy_WARN_ON(!ring->tx); |
1341 | op32_tx_suspend(ring); |
1342 | } |
1343 | |
1344 | static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) |
1345 | { |
1346 | B43legacy_WARN_ON(!ring->tx); |
1347 | op32_tx_resume(ring); |
1348 | } |
1349 | |
1350 | void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) |
1351 | { |
1352 | b43legacy_power_saving_ctl_bits(dev, bit25: -1, bit26: 1); |
1353 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring0); |
1354 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring1); |
1355 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring2); |
1356 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring3); |
1357 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring4); |
1358 | b43legacy_dma_tx_suspend_ring(ring: dev->dma.tx_ring5); |
1359 | } |
1360 | |
1361 | void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) |
1362 | { |
1363 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring5); |
1364 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring4); |
1365 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring3); |
1366 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring2); |
1367 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring1); |
1368 | b43legacy_dma_tx_resume_ring(ring: dev->dma.tx_ring0); |
1369 | b43legacy_power_saving_ctl_bits(dev, bit25: -1, bit26: -1); |
1370 | } |
1371 | |