1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | |
4 | Broadcom B43 wireless driver |
5 | |
6 | DMA ringbuffer and descriptor allocation/management |
7 | |
8 | Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> |
9 | |
10 | Some code in this file is derived from the b44.c driver |
11 | Copyright (C) 2002 David S. Miller |
12 | Copyright (C) Pekka Pietikainen |
13 | |
14 | |
15 | */ |
16 | |
17 | #include "b43.h" |
18 | #include "dma.h" |
19 | #include "main.h" |
20 | #include "debugfs.h" |
21 | #include "xmit.h" |
22 | |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/skbuff.h> |
27 | #include <linux/etherdevice.h> |
28 | #include <linux/slab.h> |
29 | #include <asm/div64.h> |
30 | |
31 | |
32 | /* Required number of TX DMA slots per TX frame. |
33 | * This currently is 2, because we put the header and the ieee80211 frame |
34 | * into separate slots. */ |
35 | #define TX_SLOTS_PER_FRAME 2 |
36 | |
37 | static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, |
38 | enum b43_addrtype addrtype) |
39 | { |
40 | u32 addr; |
41 | |
42 | switch (addrtype) { |
43 | case B43_DMA_ADDR_LOW: |
44 | addr = lower_32_bits(dmaaddr); |
45 | if (dma->translation_in_low) { |
46 | addr &= ~SSB_DMA_TRANSLATION_MASK; |
47 | addr |= dma->translation; |
48 | } |
49 | break; |
50 | case B43_DMA_ADDR_HIGH: |
51 | addr = upper_32_bits(dmaaddr); |
52 | if (!dma->translation_in_low) { |
53 | addr &= ~SSB_DMA_TRANSLATION_MASK; |
54 | addr |= dma->translation; |
55 | } |
56 | break; |
57 | case B43_DMA_ADDR_EXT: |
58 | if (dma->translation_in_low) |
59 | addr = lower_32_bits(dmaaddr); |
60 | else |
61 | addr = upper_32_bits(dmaaddr); |
62 | addr &= SSB_DMA_TRANSLATION_MASK; |
63 | addr >>= SSB_DMA_TRANSLATION_SHIFT; |
64 | break; |
65 | } |
66 | |
67 | return addr; |
68 | } |
69 | |
70 | /* 32bit DMA ops. */ |
71 | static |
72 | struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, |
73 | int slot, |
74 | struct b43_dmadesc_meta **meta) |
75 | { |
76 | struct b43_dmadesc32 *desc; |
77 | |
78 | *meta = &(ring->meta[slot]); |
79 | desc = ring->descbase; |
80 | desc = &(desc[slot]); |
81 | |
82 | return (struct b43_dmadesc_generic *)desc; |
83 | } |
84 | |
85 | static void op32_fill_descriptor(struct b43_dmaring *ring, |
86 | struct b43_dmadesc_generic *desc, |
87 | dma_addr_t dmaaddr, u16 bufsize, |
88 | int start, int end, int irq) |
89 | { |
90 | struct b43_dmadesc32 *descbase = ring->descbase; |
91 | int slot; |
92 | u32 ctl; |
93 | u32 addr; |
94 | u32 addrext; |
95 | |
96 | slot = (int)(&(desc->dma32) - descbase); |
97 | B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); |
98 | |
99 | addr = b43_dma_address(dma: &ring->dev->dma, dmaaddr, addrtype: B43_DMA_ADDR_LOW); |
100 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr, addrtype: B43_DMA_ADDR_EXT); |
101 | |
102 | ctl = bufsize & B43_DMA32_DCTL_BYTECNT; |
103 | if (slot == ring->nr_slots - 1) |
104 | ctl |= B43_DMA32_DCTL_DTABLEEND; |
105 | if (start) |
106 | ctl |= B43_DMA32_DCTL_FRAMESTART; |
107 | if (end) |
108 | ctl |= B43_DMA32_DCTL_FRAMEEND; |
109 | if (irq) |
110 | ctl |= B43_DMA32_DCTL_IRQ; |
111 | ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) |
112 | & B43_DMA32_DCTL_ADDREXT_MASK; |
113 | |
114 | desc->dma32.control = cpu_to_le32(ctl); |
115 | desc->dma32.address = cpu_to_le32(addr); |
116 | } |
117 | |
118 | static void op32_poke_tx(struct b43_dmaring *ring, int slot) |
119 | { |
120 | b43_dma_write(ring, B43_DMA32_TXINDEX, |
121 | value: (u32) (slot * sizeof(struct b43_dmadesc32))); |
122 | } |
123 | |
124 | static void op32_tx_suspend(struct b43_dmaring *ring) |
125 | { |
126 | b43_dma_write(ring, B43_DMA32_TXCTL, value: b43_dma_read(ring, B43_DMA32_TXCTL) |
127 | | B43_DMA32_TXSUSPEND); |
128 | } |
129 | |
130 | static void op32_tx_resume(struct b43_dmaring *ring) |
131 | { |
132 | b43_dma_write(ring, B43_DMA32_TXCTL, value: b43_dma_read(ring, B43_DMA32_TXCTL) |
133 | & ~B43_DMA32_TXSUSPEND); |
134 | } |
135 | |
136 | static int op32_get_current_rxslot(struct b43_dmaring *ring) |
137 | { |
138 | u32 val; |
139 | |
140 | val = b43_dma_read(ring, B43_DMA32_RXSTATUS); |
141 | val &= B43_DMA32_RXDPTR; |
142 | |
143 | return (val / sizeof(struct b43_dmadesc32)); |
144 | } |
145 | |
146 | static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) |
147 | { |
148 | b43_dma_write(ring, B43_DMA32_RXINDEX, |
149 | value: (u32) (slot * sizeof(struct b43_dmadesc32))); |
150 | } |
151 | |
152 | static const struct b43_dma_ops dma32_ops = { |
153 | .idx2desc = op32_idx2desc, |
154 | .fill_descriptor = op32_fill_descriptor, |
155 | .poke_tx = op32_poke_tx, |
156 | .tx_suspend = op32_tx_suspend, |
157 | .tx_resume = op32_tx_resume, |
158 | .get_current_rxslot = op32_get_current_rxslot, |
159 | .set_current_rxslot = op32_set_current_rxslot, |
160 | }; |
161 | |
162 | /* 64bit DMA ops. */ |
163 | static |
164 | struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, |
165 | int slot, |
166 | struct b43_dmadesc_meta **meta) |
167 | { |
168 | struct b43_dmadesc64 *desc; |
169 | |
170 | *meta = &(ring->meta[slot]); |
171 | desc = ring->descbase; |
172 | desc = &(desc[slot]); |
173 | |
174 | return (struct b43_dmadesc_generic *)desc; |
175 | } |
176 | |
177 | static void op64_fill_descriptor(struct b43_dmaring *ring, |
178 | struct b43_dmadesc_generic *desc, |
179 | dma_addr_t dmaaddr, u16 bufsize, |
180 | int start, int end, int irq) |
181 | { |
182 | struct b43_dmadesc64 *descbase = ring->descbase; |
183 | int slot; |
184 | u32 ctl0 = 0, ctl1 = 0; |
185 | u32 addrlo, addrhi; |
186 | u32 addrext; |
187 | |
188 | slot = (int)(&(desc->dma64) - descbase); |
189 | B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); |
190 | |
191 | addrlo = b43_dma_address(dma: &ring->dev->dma, dmaaddr, addrtype: B43_DMA_ADDR_LOW); |
192 | addrhi = b43_dma_address(dma: &ring->dev->dma, dmaaddr, addrtype: B43_DMA_ADDR_HIGH); |
193 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr, addrtype: B43_DMA_ADDR_EXT); |
194 | |
195 | if (slot == ring->nr_slots - 1) |
196 | ctl0 |= B43_DMA64_DCTL0_DTABLEEND; |
197 | if (start) |
198 | ctl0 |= B43_DMA64_DCTL0_FRAMESTART; |
199 | if (end) |
200 | ctl0 |= B43_DMA64_DCTL0_FRAMEEND; |
201 | if (irq) |
202 | ctl0 |= B43_DMA64_DCTL0_IRQ; |
203 | ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; |
204 | ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) |
205 | & B43_DMA64_DCTL1_ADDREXT_MASK; |
206 | |
207 | desc->dma64.control0 = cpu_to_le32(ctl0); |
208 | desc->dma64.control1 = cpu_to_le32(ctl1); |
209 | desc->dma64.address_low = cpu_to_le32(addrlo); |
210 | desc->dma64.address_high = cpu_to_le32(addrhi); |
211 | } |
212 | |
213 | static void op64_poke_tx(struct b43_dmaring *ring, int slot) |
214 | { |
215 | b43_dma_write(ring, B43_DMA64_TXINDEX, |
216 | value: (u32) (slot * sizeof(struct b43_dmadesc64))); |
217 | } |
218 | |
219 | static void op64_tx_suspend(struct b43_dmaring *ring) |
220 | { |
221 | b43_dma_write(ring, B43_DMA64_TXCTL, value: b43_dma_read(ring, B43_DMA64_TXCTL) |
222 | | B43_DMA64_TXSUSPEND); |
223 | } |
224 | |
225 | static void op64_tx_resume(struct b43_dmaring *ring) |
226 | { |
227 | b43_dma_write(ring, B43_DMA64_TXCTL, value: b43_dma_read(ring, B43_DMA64_TXCTL) |
228 | & ~B43_DMA64_TXSUSPEND); |
229 | } |
230 | |
231 | static int op64_get_current_rxslot(struct b43_dmaring *ring) |
232 | { |
233 | u32 val; |
234 | |
235 | val = b43_dma_read(ring, B43_DMA64_RXSTATUS); |
236 | val &= B43_DMA64_RXSTATDPTR; |
237 | |
238 | return (val / sizeof(struct b43_dmadesc64)); |
239 | } |
240 | |
241 | static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) |
242 | { |
243 | b43_dma_write(ring, B43_DMA64_RXINDEX, |
244 | value: (u32) (slot * sizeof(struct b43_dmadesc64))); |
245 | } |
246 | |
247 | static const struct b43_dma_ops dma64_ops = { |
248 | .idx2desc = op64_idx2desc, |
249 | .fill_descriptor = op64_fill_descriptor, |
250 | .poke_tx = op64_poke_tx, |
251 | .tx_suspend = op64_tx_suspend, |
252 | .tx_resume = op64_tx_resume, |
253 | .get_current_rxslot = op64_get_current_rxslot, |
254 | .set_current_rxslot = op64_set_current_rxslot, |
255 | }; |
256 | |
257 | static inline int free_slots(struct b43_dmaring *ring) |
258 | { |
259 | return (ring->nr_slots - ring->used_slots); |
260 | } |
261 | |
262 | static inline int next_slot(struct b43_dmaring *ring, int slot) |
263 | { |
264 | B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); |
265 | if (slot == ring->nr_slots - 1) |
266 | return 0; |
267 | return slot + 1; |
268 | } |
269 | |
270 | static inline int prev_slot(struct b43_dmaring *ring, int slot) |
271 | { |
272 | B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); |
273 | if (slot == 0) |
274 | return ring->nr_slots - 1; |
275 | return slot - 1; |
276 | } |
277 | |
278 | #ifdef CONFIG_B43_DEBUG |
279 | static void update_max_used_slots(struct b43_dmaring *ring, |
280 | int current_used_slots) |
281 | { |
282 | if (current_used_slots <= ring->max_used_slots) |
283 | return; |
284 | ring->max_used_slots = current_used_slots; |
285 | if (b43_debug(dev: ring->dev, feature: B43_DBG_DMAVERBOSE)) { |
286 | b43dbg(wl: ring->dev->wl, |
287 | fmt: "max_used_slots increased to %d on %s ring %d\n" , |
288 | ring->max_used_slots, |
289 | ring->tx ? "TX" : "RX" , ring->index); |
290 | } |
291 | } |
292 | #else |
293 | static inline |
294 | void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) |
295 | { |
296 | } |
297 | #endif /* DEBUG */ |
298 | |
299 | /* Request a slot for usage. */ |
300 | static inline int request_slot(struct b43_dmaring *ring) |
301 | { |
302 | int slot; |
303 | |
304 | B43_WARN_ON(!ring->tx); |
305 | B43_WARN_ON(ring->stopped); |
306 | B43_WARN_ON(free_slots(ring) == 0); |
307 | |
308 | slot = next_slot(ring, slot: ring->current_slot); |
309 | ring->current_slot = slot; |
310 | ring->used_slots++; |
311 | |
312 | update_max_used_slots(ring, current_used_slots: ring->used_slots); |
313 | |
314 | return slot; |
315 | } |
316 | |
317 | static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) |
318 | { |
319 | static const u16 map64[] = { |
320 | B43_MMIO_DMA64_BASE0, |
321 | B43_MMIO_DMA64_BASE1, |
322 | B43_MMIO_DMA64_BASE2, |
323 | B43_MMIO_DMA64_BASE3, |
324 | B43_MMIO_DMA64_BASE4, |
325 | B43_MMIO_DMA64_BASE5, |
326 | }; |
327 | static const u16 map32[] = { |
328 | B43_MMIO_DMA32_BASE0, |
329 | B43_MMIO_DMA32_BASE1, |
330 | B43_MMIO_DMA32_BASE2, |
331 | B43_MMIO_DMA32_BASE3, |
332 | B43_MMIO_DMA32_BASE4, |
333 | B43_MMIO_DMA32_BASE5, |
334 | }; |
335 | |
336 | if (type == B43_DMA_64BIT) { |
337 | B43_WARN_ON(!(controller_idx >= 0 && |
338 | controller_idx < ARRAY_SIZE(map64))); |
339 | return map64[controller_idx]; |
340 | } |
341 | B43_WARN_ON(!(controller_idx >= 0 && |
342 | controller_idx < ARRAY_SIZE(map32))); |
343 | return map32[controller_idx]; |
344 | } |
345 | |
346 | static inline |
347 | dma_addr_t map_descbuffer(struct b43_dmaring *ring, |
348 | unsigned char *buf, size_t len, int tx) |
349 | { |
350 | dma_addr_t dmaaddr; |
351 | |
352 | if (tx) { |
353 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
354 | buf, len, DMA_TO_DEVICE); |
355 | } else { |
356 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
357 | buf, len, DMA_FROM_DEVICE); |
358 | } |
359 | |
360 | return dmaaddr; |
361 | } |
362 | |
363 | static inline |
364 | void unmap_descbuffer(struct b43_dmaring *ring, |
365 | dma_addr_t addr, size_t len, int tx) |
366 | { |
367 | if (tx) { |
368 | dma_unmap_single(ring->dev->dev->dma_dev, |
369 | addr, len, DMA_TO_DEVICE); |
370 | } else { |
371 | dma_unmap_single(ring->dev->dev->dma_dev, |
372 | addr, len, DMA_FROM_DEVICE); |
373 | } |
374 | } |
375 | |
376 | static inline |
377 | void sync_descbuffer_for_cpu(struct b43_dmaring *ring, |
378 | dma_addr_t addr, size_t len) |
379 | { |
380 | B43_WARN_ON(ring->tx); |
381 | dma_sync_single_for_cpu(dev: ring->dev->dev->dma_dev, |
382 | addr, size: len, dir: DMA_FROM_DEVICE); |
383 | } |
384 | |
385 | static inline |
386 | void sync_descbuffer_for_device(struct b43_dmaring *ring, |
387 | dma_addr_t addr, size_t len) |
388 | { |
389 | B43_WARN_ON(ring->tx); |
390 | dma_sync_single_for_device(dev: ring->dev->dev->dma_dev, |
391 | addr, size: len, dir: DMA_FROM_DEVICE); |
392 | } |
393 | |
394 | static inline |
395 | void free_descriptor_buffer(struct b43_dmaring *ring, |
396 | struct b43_dmadesc_meta *meta) |
397 | { |
398 | if (meta->skb) { |
399 | if (ring->tx) |
400 | ieee80211_free_txskb(hw: ring->dev->wl->hw, skb: meta->skb); |
401 | else |
402 | dev_kfree_skb_any(skb: meta->skb); |
403 | meta->skb = NULL; |
404 | } |
405 | } |
406 | |
407 | static int alloc_ringmemory(struct b43_dmaring *ring) |
408 | { |
409 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
410 | * alignment and 8K buffers for 64-bit DMA with 8K alignment. |
411 | * In practice we could use smaller buffers for the latter, but the |
412 | * alignment is really important because of the hardware bug. If bit |
413 | * 0x00001000 is used in DMA address, some hardware (like BCM4331) |
414 | * copies that bit into B43_DMA64_RXSTATUS and we get false values from |
415 | * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use |
416 | * more than 256 slots for ring. |
417 | */ |
418 | u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? |
419 | B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; |
420 | |
421 | ring->descbase = dma_alloc_coherent(dev: ring->dev->dev->dma_dev, |
422 | size: ring_mem_size, dma_handle: &(ring->dmabase), |
423 | GFP_KERNEL); |
424 | if (!ring->descbase) |
425 | return -ENOMEM; |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | static void free_ringmemory(struct b43_dmaring *ring) |
431 | { |
432 | u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? |
433 | B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; |
434 | dma_free_coherent(dev: ring->dev->dev->dma_dev, size: ring_mem_size, |
435 | cpu_addr: ring->descbase, dma_handle: ring->dmabase); |
436 | } |
437 | |
438 | /* Reset the RX DMA channel */ |
439 | static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, |
440 | enum b43_dmatype type) |
441 | { |
442 | int i; |
443 | u32 value; |
444 | u16 offset; |
445 | |
446 | might_sleep(); |
447 | |
448 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; |
449 | b43_write32(dev, offset: mmio_base + offset, value: 0); |
450 | for (i = 0; i < 10; i++) { |
451 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : |
452 | B43_DMA32_RXSTATUS; |
453 | value = b43_read32(dev, offset: mmio_base + offset); |
454 | if (type == B43_DMA_64BIT) { |
455 | value &= B43_DMA64_RXSTAT; |
456 | if (value == B43_DMA64_RXSTAT_DISABLED) { |
457 | i = -1; |
458 | break; |
459 | } |
460 | } else { |
461 | value &= B43_DMA32_RXSTATE; |
462 | if (value == B43_DMA32_RXSTAT_DISABLED) { |
463 | i = -1; |
464 | break; |
465 | } |
466 | } |
467 | msleep(msecs: 1); |
468 | } |
469 | if (i != -1) { |
470 | b43err(wl: dev->wl, fmt: "DMA RX reset timed out\n" ); |
471 | return -ENODEV; |
472 | } |
473 | |
474 | return 0; |
475 | } |
476 | |
477 | /* Reset the TX DMA channel */ |
478 | static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, |
479 | enum b43_dmatype type) |
480 | { |
481 | int i; |
482 | u32 value; |
483 | u16 offset; |
484 | |
485 | might_sleep(); |
486 | |
487 | for (i = 0; i < 10; i++) { |
488 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : |
489 | B43_DMA32_TXSTATUS; |
490 | value = b43_read32(dev, offset: mmio_base + offset); |
491 | if (type == B43_DMA_64BIT) { |
492 | value &= B43_DMA64_TXSTAT; |
493 | if (value == B43_DMA64_TXSTAT_DISABLED || |
494 | value == B43_DMA64_TXSTAT_IDLEWAIT || |
495 | value == B43_DMA64_TXSTAT_STOPPED) |
496 | break; |
497 | } else { |
498 | value &= B43_DMA32_TXSTATE; |
499 | if (value == B43_DMA32_TXSTAT_DISABLED || |
500 | value == B43_DMA32_TXSTAT_IDLEWAIT || |
501 | value == B43_DMA32_TXSTAT_STOPPED) |
502 | break; |
503 | } |
504 | msleep(msecs: 1); |
505 | } |
506 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; |
507 | b43_write32(dev, offset: mmio_base + offset, value: 0); |
508 | for (i = 0; i < 10; i++) { |
509 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : |
510 | B43_DMA32_TXSTATUS; |
511 | value = b43_read32(dev, offset: mmio_base + offset); |
512 | if (type == B43_DMA_64BIT) { |
513 | value &= B43_DMA64_TXSTAT; |
514 | if (value == B43_DMA64_TXSTAT_DISABLED) { |
515 | i = -1; |
516 | break; |
517 | } |
518 | } else { |
519 | value &= B43_DMA32_TXSTATE; |
520 | if (value == B43_DMA32_TXSTAT_DISABLED) { |
521 | i = -1; |
522 | break; |
523 | } |
524 | } |
525 | msleep(msecs: 1); |
526 | } |
527 | if (i != -1) { |
528 | b43err(wl: dev->wl, fmt: "DMA TX reset timed out\n" ); |
529 | return -ENODEV; |
530 | } |
531 | /* ensure the reset is completed. */ |
532 | msleep(msecs: 1); |
533 | |
534 | return 0; |
535 | } |
536 | |
537 | /* Check if a DMA mapping address is invalid. */ |
538 | static bool b43_dma_mapping_error(struct b43_dmaring *ring, |
539 | dma_addr_t addr, |
540 | size_t buffersize, bool dma_to_device) |
541 | { |
542 | if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) |
543 | return true; |
544 | |
545 | switch (ring->type) { |
546 | case B43_DMA_30BIT: |
547 | if ((u64)addr + buffersize > (1ULL << 30)) |
548 | goto address_error; |
549 | break; |
550 | case B43_DMA_32BIT: |
551 | if ((u64)addr + buffersize > (1ULL << 32)) |
552 | goto address_error; |
553 | break; |
554 | case B43_DMA_64BIT: |
555 | /* Currently we can't have addresses beyond |
556 | * 64bit in the kernel. */ |
557 | break; |
558 | } |
559 | |
560 | /* The address is OK. */ |
561 | return false; |
562 | |
563 | address_error: |
564 | /* We can't support this address. Unmap it again. */ |
565 | unmap_descbuffer(ring, addr, len: buffersize, tx: dma_to_device); |
566 | |
567 | return true; |
568 | } |
569 | |
570 | static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) |
571 | { |
572 | unsigned char *f = skb->data + ring->frameoffset; |
573 | |
574 | return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); |
575 | } |
576 | |
577 | static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) |
578 | { |
579 | struct b43_rxhdr_fw4 *rxhdr; |
580 | unsigned char *frame; |
581 | |
582 | /* This poisons the RX buffer to detect DMA failures. */ |
583 | |
584 | rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); |
585 | rxhdr->frame_len = 0; |
586 | |
587 | B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); |
588 | frame = skb->data + ring->frameoffset; |
589 | memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); |
590 | } |
591 | |
592 | static int setup_rx_descbuffer(struct b43_dmaring *ring, |
593 | struct b43_dmadesc_generic *desc, |
594 | struct b43_dmadesc_meta *meta, gfp_t gfp_flags) |
595 | { |
596 | dma_addr_t dmaaddr; |
597 | struct sk_buff *skb; |
598 | |
599 | B43_WARN_ON(ring->tx); |
600 | |
601 | skb = __dev_alloc_skb(length: ring->rx_buffersize, gfp_mask: gfp_flags); |
602 | if (unlikely(!skb)) |
603 | return -ENOMEM; |
604 | b43_poison_rx_buffer(ring, skb); |
605 | dmaaddr = map_descbuffer(ring, buf: skb->data, len: ring->rx_buffersize, tx: 0); |
606 | if (b43_dma_mapping_error(ring, addr: dmaaddr, buffersize: ring->rx_buffersize, dma_to_device: 0)) { |
607 | /* ugh. try to realloc in zone_dma */ |
608 | gfp_flags |= GFP_DMA; |
609 | |
610 | dev_kfree_skb_any(skb); |
611 | |
612 | skb = __dev_alloc_skb(length: ring->rx_buffersize, gfp_mask: gfp_flags); |
613 | if (unlikely(!skb)) |
614 | return -ENOMEM; |
615 | b43_poison_rx_buffer(ring, skb); |
616 | dmaaddr = map_descbuffer(ring, buf: skb->data, |
617 | len: ring->rx_buffersize, tx: 0); |
618 | if (b43_dma_mapping_error(ring, addr: dmaaddr, buffersize: ring->rx_buffersize, dma_to_device: 0)) { |
619 | b43err(wl: ring->dev->wl, fmt: "RX DMA buffer allocation failed\n" ); |
620 | dev_kfree_skb_any(skb); |
621 | return -EIO; |
622 | } |
623 | } |
624 | |
625 | meta->skb = skb; |
626 | meta->dmaaddr = dmaaddr; |
627 | ring->ops->fill_descriptor(ring, desc, dmaaddr, |
628 | ring->rx_buffersize, 0, 0, 0); |
629 | |
630 | return 0; |
631 | } |
632 | |
633 | /* Allocate the initial descbuffers. |
634 | * This is used for an RX ring only. |
635 | */ |
636 | static int alloc_initial_descbuffers(struct b43_dmaring *ring) |
637 | { |
638 | int i, err = -ENOMEM; |
639 | struct b43_dmadesc_generic *desc; |
640 | struct b43_dmadesc_meta *meta; |
641 | |
642 | for (i = 0; i < ring->nr_slots; i++) { |
643 | desc = ring->ops->idx2desc(ring, i, &meta); |
644 | |
645 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); |
646 | if (err) { |
647 | b43err(wl: ring->dev->wl, |
648 | fmt: "Failed to allocate initial descbuffers\n" ); |
649 | goto err_unwind; |
650 | } |
651 | } |
652 | mb(); |
653 | ring->used_slots = ring->nr_slots; |
654 | err = 0; |
655 | out: |
656 | return err; |
657 | |
658 | err_unwind: |
659 | for (i--; i >= 0; i--) { |
660 | desc = ring->ops->idx2desc(ring, i, &meta); |
661 | |
662 | unmap_descbuffer(ring, addr: meta->dmaaddr, len: ring->rx_buffersize, tx: 0); |
663 | dev_kfree_skb(meta->skb); |
664 | } |
665 | goto out; |
666 | } |
667 | |
668 | /* Do initial setup of the DMA controller. |
669 | * Reset the controller, write the ring busaddress |
670 | * and switch the "enable" bit on. |
671 | */ |
672 | static int dmacontroller_setup(struct b43_dmaring *ring) |
673 | { |
674 | int err = 0; |
675 | u32 value; |
676 | u32 addrext; |
677 | bool parity = ring->dev->dma.parity; |
678 | u32 addrlo; |
679 | u32 addrhi; |
680 | |
681 | if (ring->tx) { |
682 | if (ring->type == B43_DMA_64BIT) { |
683 | u64 ringbase = (u64) (ring->dmabase); |
684 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_EXT); |
685 | addrlo = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_LOW); |
686 | addrhi = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_HIGH); |
687 | |
688 | value = B43_DMA64_TXENABLE; |
689 | value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) |
690 | & B43_DMA64_TXADDREXT_MASK; |
691 | if (!parity) |
692 | value |= B43_DMA64_TXPARITYDISABLE; |
693 | b43_dma_write(ring, B43_DMA64_TXCTL, value); |
694 | b43_dma_write(ring, B43_DMA64_TXRINGLO, value: addrlo); |
695 | b43_dma_write(ring, B43_DMA64_TXRINGHI, value: addrhi); |
696 | } else { |
697 | u32 ringbase = (u32) (ring->dmabase); |
698 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_EXT); |
699 | addrlo = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_LOW); |
700 | |
701 | value = B43_DMA32_TXENABLE; |
702 | value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) |
703 | & B43_DMA32_TXADDREXT_MASK; |
704 | if (!parity) |
705 | value |= B43_DMA32_TXPARITYDISABLE; |
706 | b43_dma_write(ring, B43_DMA32_TXCTL, value); |
707 | b43_dma_write(ring, B43_DMA32_TXRING, value: addrlo); |
708 | } |
709 | } else { |
710 | err = alloc_initial_descbuffers(ring); |
711 | if (err) |
712 | goto out; |
713 | if (ring->type == B43_DMA_64BIT) { |
714 | u64 ringbase = (u64) (ring->dmabase); |
715 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_EXT); |
716 | addrlo = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_LOW); |
717 | addrhi = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_HIGH); |
718 | |
719 | value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); |
720 | value |= B43_DMA64_RXENABLE; |
721 | value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) |
722 | & B43_DMA64_RXADDREXT_MASK; |
723 | if (!parity) |
724 | value |= B43_DMA64_RXPARITYDISABLE; |
725 | b43_dma_write(ring, B43_DMA64_RXCTL, value); |
726 | b43_dma_write(ring, B43_DMA64_RXRINGLO, value: addrlo); |
727 | b43_dma_write(ring, B43_DMA64_RXRINGHI, value: addrhi); |
728 | b43_dma_write(ring, B43_DMA64_RXINDEX, value: ring->nr_slots * |
729 | sizeof(struct b43_dmadesc64)); |
730 | } else { |
731 | u32 ringbase = (u32) (ring->dmabase); |
732 | addrext = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_EXT); |
733 | addrlo = b43_dma_address(dma: &ring->dev->dma, dmaaddr: ringbase, addrtype: B43_DMA_ADDR_LOW); |
734 | |
735 | value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); |
736 | value |= B43_DMA32_RXENABLE; |
737 | value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) |
738 | & B43_DMA32_RXADDREXT_MASK; |
739 | if (!parity) |
740 | value |= B43_DMA32_RXPARITYDISABLE; |
741 | b43_dma_write(ring, B43_DMA32_RXCTL, value); |
742 | b43_dma_write(ring, B43_DMA32_RXRING, value: addrlo); |
743 | b43_dma_write(ring, B43_DMA32_RXINDEX, value: ring->nr_slots * |
744 | sizeof(struct b43_dmadesc32)); |
745 | } |
746 | } |
747 | |
748 | out: |
749 | return err; |
750 | } |
751 | |
752 | /* Shutdown the DMA controller. */ |
753 | static void dmacontroller_cleanup(struct b43_dmaring *ring) |
754 | { |
755 | if (ring->tx) { |
756 | b43_dmacontroller_tx_reset(dev: ring->dev, mmio_base: ring->mmio_base, |
757 | type: ring->type); |
758 | if (ring->type == B43_DMA_64BIT) { |
759 | b43_dma_write(ring, B43_DMA64_TXRINGLO, value: 0); |
760 | b43_dma_write(ring, B43_DMA64_TXRINGHI, value: 0); |
761 | } else |
762 | b43_dma_write(ring, B43_DMA32_TXRING, value: 0); |
763 | } else { |
764 | b43_dmacontroller_rx_reset(dev: ring->dev, mmio_base: ring->mmio_base, |
765 | type: ring->type); |
766 | if (ring->type == B43_DMA_64BIT) { |
767 | b43_dma_write(ring, B43_DMA64_RXRINGLO, value: 0); |
768 | b43_dma_write(ring, B43_DMA64_RXRINGHI, value: 0); |
769 | } else |
770 | b43_dma_write(ring, B43_DMA32_RXRING, value: 0); |
771 | } |
772 | } |
773 | |
774 | static void free_all_descbuffers(struct b43_dmaring *ring) |
775 | { |
776 | struct b43_dmadesc_meta *meta; |
777 | int i; |
778 | |
779 | if (!ring->used_slots) |
780 | return; |
781 | for (i = 0; i < ring->nr_slots; i++) { |
782 | /* get meta - ignore returned value */ |
783 | ring->ops->idx2desc(ring, i, &meta); |
784 | |
785 | if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { |
786 | B43_WARN_ON(!ring->tx); |
787 | continue; |
788 | } |
789 | if (ring->tx) { |
790 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
791 | len: meta->skb->len, tx: 1); |
792 | } else { |
793 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
794 | len: ring->rx_buffersize, tx: 0); |
795 | } |
796 | free_descriptor_buffer(ring, meta); |
797 | } |
798 | } |
799 | |
800 | static enum b43_dmatype b43_engine_type(struct b43_wldev *dev) |
801 | { |
802 | u32 tmp; |
803 | u16 mmio_base; |
804 | |
805 | switch (dev->dev->bus_type) { |
806 | #ifdef CONFIG_B43_BCMA |
807 | case B43_BUS_BCMA: |
808 | tmp = bcma_aread32(core: dev->dev->bdev, BCMA_IOST); |
809 | if (tmp & BCMA_IOST_DMA64) |
810 | return B43_DMA_64BIT; |
811 | break; |
812 | #endif |
813 | #ifdef CONFIG_B43_SSB |
814 | case B43_BUS_SSB: |
815 | tmp = ssb_read32(dev: dev->dev->sdev, SSB_TMSHIGH); |
816 | if (tmp & SSB_TMSHIGH_DMA64) |
817 | return B43_DMA_64BIT; |
818 | break; |
819 | #endif |
820 | } |
821 | |
822 | mmio_base = b43_dmacontroller_base(type: 0, controller_idx: 0); |
823 | b43_write32(dev, offset: mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); |
824 | tmp = b43_read32(dev, offset: mmio_base + B43_DMA32_TXCTL); |
825 | if (tmp & B43_DMA32_TXADDREXT_MASK) |
826 | return B43_DMA_32BIT; |
827 | return B43_DMA_30BIT; |
828 | } |
829 | |
830 | /* Main initialization function. */ |
831 | static |
832 | struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, |
833 | int controller_index, |
834 | int for_tx, |
835 | enum b43_dmatype type) |
836 | { |
837 | struct b43_dmaring *ring; |
838 | int i, err; |
839 | dma_addr_t dma_test; |
840 | |
841 | ring = kzalloc(size: sizeof(*ring), GFP_KERNEL); |
842 | if (!ring) |
843 | goto out; |
844 | |
845 | ring->nr_slots = B43_RXRING_SLOTS; |
846 | if (for_tx) |
847 | ring->nr_slots = B43_TXRING_SLOTS; |
848 | |
849 | ring->meta = kcalloc(n: ring->nr_slots, size: sizeof(struct b43_dmadesc_meta), |
850 | GFP_KERNEL); |
851 | if (!ring->meta) |
852 | goto err_kfree_ring; |
853 | for (i = 0; i < ring->nr_slots; i++) |
854 | ring->meta->skb = B43_DMA_PTR_POISON; |
855 | |
856 | ring->type = type; |
857 | ring->dev = dev; |
858 | ring->mmio_base = b43_dmacontroller_base(type, controller_idx: controller_index); |
859 | ring->index = controller_index; |
860 | if (type == B43_DMA_64BIT) |
861 | ring->ops = &dma64_ops; |
862 | else |
863 | ring->ops = &dma32_ops; |
864 | if (for_tx) { |
865 | ring->tx = true; |
866 | ring->current_slot = -1; |
867 | } else { |
868 | if (ring->index == 0) { |
869 | switch (dev->fw.hdr_format) { |
870 | case B43_FW_HDR_598: |
871 | ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE; |
872 | ring->frameoffset = B43_DMA0_RX_FW598_FO; |
873 | break; |
874 | case B43_FW_HDR_410: |
875 | case B43_FW_HDR_351: |
876 | ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE; |
877 | ring->frameoffset = B43_DMA0_RX_FW351_FO; |
878 | break; |
879 | } |
880 | } else |
881 | B43_WARN_ON(1); |
882 | } |
883 | #ifdef CONFIG_B43_DEBUG |
884 | ring->last_injected_overflow = jiffies; |
885 | #endif |
886 | |
887 | if (for_tx) { |
888 | /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ |
889 | BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); |
890 | |
891 | ring->txhdr_cache = kcalloc(n: ring->nr_slots / TX_SLOTS_PER_FRAME, |
892 | size: b43_txhdr_size(dev), |
893 | GFP_KERNEL); |
894 | if (!ring->txhdr_cache) |
895 | goto err_kfree_meta; |
896 | |
897 | /* test for ability to dma to txhdr_cache */ |
898 | dma_test = dma_map_single(dev->dev->dma_dev, |
899 | ring->txhdr_cache, |
900 | b43_txhdr_size(dev), |
901 | DMA_TO_DEVICE); |
902 | |
903 | if (b43_dma_mapping_error(ring, addr: dma_test, |
904 | buffersize: b43_txhdr_size(dev), dma_to_device: 1)) { |
905 | /* ugh realloc */ |
906 | kfree(objp: ring->txhdr_cache); |
907 | ring->txhdr_cache = kcalloc(n: ring->nr_slots / TX_SLOTS_PER_FRAME, |
908 | size: b43_txhdr_size(dev), |
909 | GFP_KERNEL | GFP_DMA); |
910 | if (!ring->txhdr_cache) |
911 | goto err_kfree_meta; |
912 | |
913 | dma_test = dma_map_single(dev->dev->dma_dev, |
914 | ring->txhdr_cache, |
915 | b43_txhdr_size(dev), |
916 | DMA_TO_DEVICE); |
917 | |
918 | if (b43_dma_mapping_error(ring, addr: dma_test, |
919 | buffersize: b43_txhdr_size(dev), dma_to_device: 1)) { |
920 | |
921 | b43err(wl: dev->wl, |
922 | fmt: "TXHDR DMA allocation failed\n" ); |
923 | goto err_kfree_txhdr_cache; |
924 | } |
925 | } |
926 | |
927 | dma_unmap_single(dev->dev->dma_dev, |
928 | dma_test, b43_txhdr_size(dev), |
929 | DMA_TO_DEVICE); |
930 | } |
931 | |
932 | err = alloc_ringmemory(ring); |
933 | if (err) |
934 | goto err_kfree_txhdr_cache; |
935 | err = dmacontroller_setup(ring); |
936 | if (err) |
937 | goto err_free_ringmemory; |
938 | |
939 | out: |
940 | return ring; |
941 | |
942 | err_free_ringmemory: |
943 | free_ringmemory(ring); |
944 | err_kfree_txhdr_cache: |
945 | kfree(objp: ring->txhdr_cache); |
946 | err_kfree_meta: |
947 | kfree(objp: ring->meta); |
948 | err_kfree_ring: |
949 | kfree(objp: ring); |
950 | ring = NULL; |
951 | goto out; |
952 | } |
953 | |
954 | #define divide(a, b) ({ \ |
955 | typeof(a) __a = a; \ |
956 | do_div(__a, b); \ |
957 | __a; \ |
958 | }) |
959 | |
960 | #define modulo(a, b) ({ \ |
961 | typeof(a) __a = a; \ |
962 | do_div(__a, b); \ |
963 | }) |
964 | |
965 | /* Main cleanup function. */ |
966 | static void b43_destroy_dmaring(struct b43_dmaring *ring, |
967 | const char *ringname) |
968 | { |
969 | if (!ring) |
970 | return; |
971 | |
972 | #ifdef CONFIG_B43_DEBUG |
973 | { |
974 | /* Print some statistics. */ |
975 | u64 failed_packets = ring->nr_failed_tx_packets; |
976 | u64 succeed_packets = ring->nr_succeed_tx_packets; |
977 | u64 nr_packets = failed_packets + succeed_packets; |
978 | u64 permille_failed = 0, average_tries = 0; |
979 | |
980 | if (nr_packets) |
981 | permille_failed = divide(failed_packets * 1000, nr_packets); |
982 | if (nr_packets) |
983 | average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); |
984 | |
985 | b43dbg(wl: ring->dev->wl, fmt: "DMA-%u %s: " |
986 | "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " |
987 | "Average tries %llu.%02llu\n" , |
988 | (unsigned int)(ring->type), ringname, |
989 | ring->max_used_slots, |
990 | ring->nr_slots, |
991 | (unsigned long long)failed_packets, |
992 | (unsigned long long)nr_packets, |
993 | (unsigned long long)divide(permille_failed, 10), |
994 | (unsigned long long)modulo(permille_failed, 10), |
995 | (unsigned long long)divide(average_tries, 100), |
996 | (unsigned long long)modulo(average_tries, 100)); |
997 | } |
998 | #endif /* DEBUG */ |
999 | |
1000 | /* Device IRQs are disabled prior entering this function, |
1001 | * so no need to take care of concurrency with rx handler stuff. |
1002 | */ |
1003 | dmacontroller_cleanup(ring); |
1004 | free_all_descbuffers(ring); |
1005 | free_ringmemory(ring); |
1006 | |
1007 | kfree(objp: ring->txhdr_cache); |
1008 | kfree(objp: ring->meta); |
1009 | kfree(objp: ring); |
1010 | } |
1011 | |
1012 | #define destroy_ring(dma, ring) do { \ |
1013 | b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ |
1014 | (dma)->ring = NULL; \ |
1015 | } while (0) |
1016 | |
1017 | void b43_dma_free(struct b43_wldev *dev) |
1018 | { |
1019 | struct b43_dma *dma; |
1020 | |
1021 | if (b43_using_pio_transfers(dev)) |
1022 | return; |
1023 | dma = &dev->dma; |
1024 | |
1025 | destroy_ring(dma, rx_ring); |
1026 | destroy_ring(dma, tx_ring_AC_BK); |
1027 | destroy_ring(dma, tx_ring_AC_BE); |
1028 | destroy_ring(dma, tx_ring_AC_VI); |
1029 | destroy_ring(dma, tx_ring_AC_VO); |
1030 | destroy_ring(dma, tx_ring_mcast); |
1031 | } |
1032 | |
1033 | /* Some hardware with 64-bit DMA seems to be bugged and looks for translation |
1034 | * bit in low address word instead of high one. |
1035 | */ |
1036 | static bool b43_dma_translation_in_low_word(struct b43_wldev *dev, |
1037 | enum b43_dmatype type) |
1038 | { |
1039 | if (type != B43_DMA_64BIT) |
1040 | return true; |
1041 | |
1042 | #ifdef CONFIG_B43_SSB |
1043 | if (dev->dev->bus_type == B43_BUS_SSB && |
1044 | dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && |
1045 | !(pci_is_pcie(dev: dev->dev->sdev->bus->host_pci) && |
1046 | ssb_read32(dev: dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)) |
1047 | return true; |
1048 | #endif |
1049 | return false; |
1050 | } |
1051 | |
1052 | int b43_dma_init(struct b43_wldev *dev) |
1053 | { |
1054 | struct b43_dma *dma = &dev->dma; |
1055 | enum b43_dmatype type = b43_engine_type(dev); |
1056 | int err; |
1057 | |
1058 | err = dma_set_mask_and_coherent(dev: dev->dev->dma_dev, DMA_BIT_MASK(type)); |
1059 | if (err) { |
1060 | b43err(wl: dev->wl, fmt: "The machine/kernel does not support " |
1061 | "the required %u-bit DMA mask\n" , type); |
1062 | return err; |
1063 | } |
1064 | |
1065 | switch (dev->dev->bus_type) { |
1066 | #ifdef CONFIG_B43_BCMA |
1067 | case B43_BUS_BCMA: |
1068 | dma->translation = bcma_core_dma_translation(core: dev->dev->bdev); |
1069 | break; |
1070 | #endif |
1071 | #ifdef CONFIG_B43_SSB |
1072 | case B43_BUS_SSB: |
1073 | dma->translation = ssb_dma_translation(dev: dev->dev->sdev); |
1074 | break; |
1075 | #endif |
1076 | } |
1077 | dma->translation_in_low = b43_dma_translation_in_low_word(dev, type); |
1078 | |
1079 | dma->parity = true; |
1080 | #ifdef CONFIG_B43_BCMA |
1081 | /* TODO: find out which SSB devices need disabling parity */ |
1082 | if (dev->dev->bus_type == B43_BUS_BCMA) |
1083 | dma->parity = false; |
1084 | #endif |
1085 | |
1086 | err = -ENOMEM; |
1087 | /* setup TX DMA channels. */ |
1088 | dma->tx_ring_AC_BK = b43_setup_dmaring(dev, controller_index: 0, for_tx: 1, type); |
1089 | if (!dma->tx_ring_AC_BK) |
1090 | goto out; |
1091 | |
1092 | dma->tx_ring_AC_BE = b43_setup_dmaring(dev, controller_index: 1, for_tx: 1, type); |
1093 | if (!dma->tx_ring_AC_BE) |
1094 | goto err_destroy_bk; |
1095 | |
1096 | dma->tx_ring_AC_VI = b43_setup_dmaring(dev, controller_index: 2, for_tx: 1, type); |
1097 | if (!dma->tx_ring_AC_VI) |
1098 | goto err_destroy_be; |
1099 | |
1100 | dma->tx_ring_AC_VO = b43_setup_dmaring(dev, controller_index: 3, for_tx: 1, type); |
1101 | if (!dma->tx_ring_AC_VO) |
1102 | goto err_destroy_vi; |
1103 | |
1104 | dma->tx_ring_mcast = b43_setup_dmaring(dev, controller_index: 4, for_tx: 1, type); |
1105 | if (!dma->tx_ring_mcast) |
1106 | goto err_destroy_vo; |
1107 | |
1108 | /* setup RX DMA channel. */ |
1109 | dma->rx_ring = b43_setup_dmaring(dev, controller_index: 0, for_tx: 0, type); |
1110 | if (!dma->rx_ring) |
1111 | goto err_destroy_mcast; |
1112 | |
1113 | /* No support for the TX status DMA ring. */ |
1114 | B43_WARN_ON(dev->dev->core_rev < 5); |
1115 | |
1116 | b43dbg(wl: dev->wl, fmt: "%u-bit DMA initialized\n" , |
1117 | (unsigned int)type); |
1118 | err = 0; |
1119 | out: |
1120 | return err; |
1121 | |
1122 | err_destroy_mcast: |
1123 | destroy_ring(dma, tx_ring_mcast); |
1124 | err_destroy_vo: |
1125 | destroy_ring(dma, tx_ring_AC_VO); |
1126 | err_destroy_vi: |
1127 | destroy_ring(dma, tx_ring_AC_VI); |
1128 | err_destroy_be: |
1129 | destroy_ring(dma, tx_ring_AC_BE); |
1130 | err_destroy_bk: |
1131 | destroy_ring(dma, tx_ring_AC_BK); |
1132 | return err; |
1133 | } |
1134 | |
1135 | /* Generate a cookie for the TX header. */ |
1136 | static u16 generate_cookie(struct b43_dmaring *ring, int slot) |
1137 | { |
1138 | u16 cookie; |
1139 | |
1140 | /* Use the upper 4 bits of the cookie as |
1141 | * DMA controller ID and store the slot number |
1142 | * in the lower 12 bits. |
1143 | * Note that the cookie must never be 0, as this |
1144 | * is a special value used in RX path. |
1145 | * It can also not be 0xFFFF because that is special |
1146 | * for multicast frames. |
1147 | */ |
1148 | cookie = (((u16)ring->index + 1) << 12); |
1149 | B43_WARN_ON(slot & ~0x0FFF); |
1150 | cookie |= (u16)slot; |
1151 | |
1152 | return cookie; |
1153 | } |
1154 | |
1155 | /* Inspect a cookie and find out to which controller/slot it belongs. */ |
1156 | static |
1157 | struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) |
1158 | { |
1159 | struct b43_dma *dma = &dev->dma; |
1160 | struct b43_dmaring *ring = NULL; |
1161 | |
1162 | switch (cookie & 0xF000) { |
1163 | case 0x1000: |
1164 | ring = dma->tx_ring_AC_BK; |
1165 | break; |
1166 | case 0x2000: |
1167 | ring = dma->tx_ring_AC_BE; |
1168 | break; |
1169 | case 0x3000: |
1170 | ring = dma->tx_ring_AC_VI; |
1171 | break; |
1172 | case 0x4000: |
1173 | ring = dma->tx_ring_AC_VO; |
1174 | break; |
1175 | case 0x5000: |
1176 | ring = dma->tx_ring_mcast; |
1177 | break; |
1178 | } |
1179 | *slot = (cookie & 0x0FFF); |
1180 | if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { |
1181 | b43dbg(wl: dev->wl, fmt: "TX-status contains " |
1182 | "invalid cookie: 0x%04X\n" , cookie); |
1183 | return NULL; |
1184 | } |
1185 | |
1186 | return ring; |
1187 | } |
1188 | |
1189 | static int dma_tx_fragment(struct b43_dmaring *ring, |
1190 | struct sk_buff *skb) |
1191 | { |
1192 | const struct b43_dma_ops *ops = ring->ops; |
1193 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1194 | struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); |
1195 | u8 *; |
1196 | int slot, old_top_slot, old_used_slots; |
1197 | int err; |
1198 | struct b43_dmadesc_generic *desc; |
1199 | struct b43_dmadesc_meta *meta; |
1200 | struct b43_dmadesc_meta *meta_hdr; |
1201 | u16 cookie; |
1202 | size_t hdrsize = b43_txhdr_size(dev: ring->dev); |
1203 | |
1204 | /* Important note: If the number of used DMA slots per TX frame |
1205 | * is changed here, the TX_SLOTS_PER_FRAME definition at the top of |
1206 | * the file has to be updated, too! |
1207 | */ |
1208 | |
1209 | old_top_slot = ring->current_slot; |
1210 | old_used_slots = ring->used_slots; |
1211 | |
1212 | /* Get a slot for the header. */ |
1213 | slot = request_slot(ring); |
1214 | desc = ops->idx2desc(ring, slot, &meta_hdr); |
1215 | memset(meta_hdr, 0, sizeof(*meta_hdr)); |
1216 | |
1217 | header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); |
1218 | cookie = generate_cookie(ring, slot); |
1219 | err = b43_generate_txhdr(dev: ring->dev, txhdr: header, |
1220 | skb_frag: skb, txctl: info, cookie); |
1221 | if (unlikely(err)) { |
1222 | ring->current_slot = old_top_slot; |
1223 | ring->used_slots = old_used_slots; |
1224 | return err; |
1225 | } |
1226 | |
1227 | meta_hdr->dmaaddr = map_descbuffer(ring, buf: (unsigned char *)header, |
1228 | len: hdrsize, tx: 1); |
1229 | if (b43_dma_mapping_error(ring, addr: meta_hdr->dmaaddr, buffersize: hdrsize, dma_to_device: 1)) { |
1230 | ring->current_slot = old_top_slot; |
1231 | ring->used_slots = old_used_slots; |
1232 | return -EIO; |
1233 | } |
1234 | ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, |
1235 | hdrsize, 1, 0, 0); |
1236 | |
1237 | /* Get a slot for the payload. */ |
1238 | slot = request_slot(ring); |
1239 | desc = ops->idx2desc(ring, slot, &meta); |
1240 | memset(meta, 0, sizeof(*meta)); |
1241 | |
1242 | meta->skb = skb; |
1243 | meta->is_last_fragment = true; |
1244 | priv_info->bouncebuffer = NULL; |
1245 | |
1246 | meta->dmaaddr = map_descbuffer(ring, buf: skb->data, len: skb->len, tx: 1); |
1247 | /* create a bounce buffer in zone_dma on mapping failure. */ |
1248 | if (b43_dma_mapping_error(ring, addr: meta->dmaaddr, buffersize: skb->len, dma_to_device: 1)) { |
1249 | priv_info->bouncebuffer = kmemdup(p: skb->data, size: skb->len, |
1250 | GFP_ATOMIC | GFP_DMA); |
1251 | if (!priv_info->bouncebuffer) { |
1252 | ring->current_slot = old_top_slot; |
1253 | ring->used_slots = old_used_slots; |
1254 | err = -ENOMEM; |
1255 | goto out_unmap_hdr; |
1256 | } |
1257 | |
1258 | meta->dmaaddr = map_descbuffer(ring, buf: priv_info->bouncebuffer, len: skb->len, tx: 1); |
1259 | if (b43_dma_mapping_error(ring, addr: meta->dmaaddr, buffersize: skb->len, dma_to_device: 1)) { |
1260 | kfree(objp: priv_info->bouncebuffer); |
1261 | priv_info->bouncebuffer = NULL; |
1262 | ring->current_slot = old_top_slot; |
1263 | ring->used_slots = old_used_slots; |
1264 | err = -EIO; |
1265 | goto out_unmap_hdr; |
1266 | } |
1267 | } |
1268 | |
1269 | ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); |
1270 | |
1271 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { |
1272 | /* Tell the firmware about the cookie of the last |
1273 | * mcast frame, so it can clear the more-data bit in it. */ |
1274 | b43_shm_write16(dev: ring->dev, routing: B43_SHM_SHARED, |
1275 | B43_SHM_SH_MCASTCOOKIE, value: cookie); |
1276 | } |
1277 | /* Now transfer the whole frame. */ |
1278 | wmb(); |
1279 | ops->poke_tx(ring, next_slot(ring, slot)); |
1280 | return 0; |
1281 | |
1282 | out_unmap_hdr: |
1283 | unmap_descbuffer(ring, addr: meta_hdr->dmaaddr, |
1284 | len: hdrsize, tx: 1); |
1285 | return err; |
1286 | } |
1287 | |
1288 | static inline int should_inject_overflow(struct b43_dmaring *ring) |
1289 | { |
1290 | #ifdef CONFIG_B43_DEBUG |
1291 | if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { |
1292 | /* Check if we should inject another ringbuffer overflow |
1293 | * to test handling of this situation in the stack. */ |
1294 | unsigned long next_overflow; |
1295 | |
1296 | next_overflow = ring->last_injected_overflow + HZ; |
1297 | if (time_after(jiffies, next_overflow)) { |
1298 | ring->last_injected_overflow = jiffies; |
1299 | b43dbg(wl: ring->dev->wl, |
1300 | fmt: "Injecting TX ring overflow on " |
1301 | "DMA controller %d\n" , ring->index); |
1302 | return 1; |
1303 | } |
1304 | } |
1305 | #endif /* CONFIG_B43_DEBUG */ |
1306 | return 0; |
1307 | } |
1308 | |
1309 | /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ |
1310 | static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, |
1311 | u8 queue_prio) |
1312 | { |
1313 | struct b43_dmaring *ring; |
1314 | |
1315 | if (dev->qos_enabled) { |
1316 | /* 0 = highest priority */ |
1317 | switch (queue_prio) { |
1318 | default: |
1319 | B43_WARN_ON(1); |
1320 | fallthrough; |
1321 | case 0: |
1322 | ring = dev->dma.tx_ring_AC_VO; |
1323 | break; |
1324 | case 1: |
1325 | ring = dev->dma.tx_ring_AC_VI; |
1326 | break; |
1327 | case 2: |
1328 | ring = dev->dma.tx_ring_AC_BE; |
1329 | break; |
1330 | case 3: |
1331 | ring = dev->dma.tx_ring_AC_BK; |
1332 | break; |
1333 | } |
1334 | } else |
1335 | ring = dev->dma.tx_ring_AC_BE; |
1336 | |
1337 | return ring; |
1338 | } |
1339 | |
1340 | int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) |
1341 | { |
1342 | struct b43_dmaring *ring; |
1343 | struct ieee80211_hdr *hdr; |
1344 | int err = 0; |
1345 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1346 | |
1347 | hdr = (struct ieee80211_hdr *)skb->data; |
1348 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { |
1349 | /* The multicast ring will be sent after the DTIM */ |
1350 | ring = dev->dma.tx_ring_mcast; |
1351 | /* Set the more-data bit. Ucode will clear it on |
1352 | * the last frame for us. */ |
1353 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
1354 | } else { |
1355 | /* Decide by priority where to put this frame. */ |
1356 | ring = select_ring_by_priority( |
1357 | dev, queue_prio: skb_get_queue_mapping(skb)); |
1358 | } |
1359 | |
1360 | B43_WARN_ON(!ring->tx); |
1361 | |
1362 | if (unlikely(ring->stopped)) { |
1363 | /* We get here only because of a bug in mac80211. |
1364 | * Because of a race, one packet may be queued after |
1365 | * the queue is stopped, thus we got called when we shouldn't. |
1366 | * For now, just refuse the transmit. */ |
1367 | if (b43_debug(dev, feature: B43_DBG_DMAVERBOSE)) |
1368 | b43err(wl: dev->wl, fmt: "Packet after queue stopped\n" ); |
1369 | err = -ENOSPC; |
1370 | goto out; |
1371 | } |
1372 | |
1373 | if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) { |
1374 | /* If we get here, we have a real error with the queue |
1375 | * full, but queues not stopped. */ |
1376 | b43err(wl: dev->wl, fmt: "DMA queue overflow\n" ); |
1377 | err = -ENOSPC; |
1378 | goto out; |
1379 | } |
1380 | |
1381 | /* Assign the queue number to the ring (if not already done before) |
1382 | * so TX status handling can use it. The queue to ring mapping is |
1383 | * static, so we don't need to store it per frame. */ |
1384 | ring->queue_prio = skb_get_queue_mapping(skb); |
1385 | |
1386 | err = dma_tx_fragment(ring, skb); |
1387 | if (unlikely(err == -ENOKEY)) { |
1388 | /* Drop this packet, as we don't have the encryption key |
1389 | * anymore and must not transmit it unencrypted. */ |
1390 | ieee80211_free_txskb(hw: dev->wl->hw, skb); |
1391 | err = 0; |
1392 | goto out; |
1393 | } |
1394 | if (unlikely(err)) { |
1395 | b43err(wl: dev->wl, fmt: "DMA tx mapping failure\n" ); |
1396 | goto out; |
1397 | } |
1398 | if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || |
1399 | should_inject_overflow(ring)) { |
1400 | /* This TX ring is full. */ |
1401 | unsigned int skb_mapping = skb_get_queue_mapping(skb); |
1402 | b43_stop_queue(dev, queue_prio: skb_mapping); |
1403 | dev->wl->tx_queue_stopped[skb_mapping] = true; |
1404 | ring->stopped = true; |
1405 | if (b43_debug(dev, feature: B43_DBG_DMAVERBOSE)) { |
1406 | b43dbg(wl: dev->wl, fmt: "Stopped TX ring %d\n" , ring->index); |
1407 | } |
1408 | } |
1409 | out: |
1410 | |
1411 | return err; |
1412 | } |
1413 | |
1414 | void b43_dma_handle_txstatus(struct b43_wldev *dev, |
1415 | const struct b43_txstatus *status) |
1416 | { |
1417 | const struct b43_dma_ops *ops; |
1418 | struct b43_dmaring *ring; |
1419 | struct b43_dmadesc_meta *meta; |
1420 | static const struct b43_txstatus fake; /* filled with 0 */ |
1421 | const struct b43_txstatus *txstat; |
1422 | int slot, firstused; |
1423 | bool frame_succeed; |
1424 | int skip; |
1425 | static u8 err_out1; |
1426 | |
1427 | ring = parse_cookie(dev, cookie: status->cookie, slot: &slot); |
1428 | if (unlikely(!ring)) |
1429 | return; |
1430 | B43_WARN_ON(!ring->tx); |
1431 | |
1432 | /* Sanity check: TX packets are processed in-order on one ring. |
1433 | * Check if the slot deduced from the cookie really is the first |
1434 | * used slot. */ |
1435 | firstused = ring->current_slot - ring->used_slots + 1; |
1436 | if (firstused < 0) |
1437 | firstused = ring->nr_slots + firstused; |
1438 | |
1439 | skip = 0; |
1440 | if (unlikely(slot != firstused)) { |
1441 | /* This possibly is a firmware bug and will result in |
1442 | * malfunction, memory leaks and/or stall of DMA functionality. |
1443 | */ |
1444 | if (slot == next_slot(ring, slot: next_slot(ring, slot: firstused))) { |
1445 | /* If a single header/data pair was missed, skip over |
1446 | * the first two slots in an attempt to recover. |
1447 | */ |
1448 | slot = firstused; |
1449 | skip = 2; |
1450 | if (!err_out1) { |
1451 | /* Report the error once. */ |
1452 | b43dbg(wl: dev->wl, |
1453 | fmt: "Skip on DMA ring %d slot %d.\n" , |
1454 | ring->index, slot); |
1455 | err_out1 = 1; |
1456 | } |
1457 | } else { |
1458 | /* More than a single header/data pair were missed. |
1459 | * Report this error. If running with open-source |
1460 | * firmware, then reset the controller to |
1461 | * revive operation. |
1462 | */ |
1463 | b43dbg(wl: dev->wl, |
1464 | fmt: "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n" , |
1465 | ring->index, firstused, slot); |
1466 | if (dev->fw.opensource) |
1467 | b43_controller_restart(dev, reason: "Out of order TX" ); |
1468 | return; |
1469 | } |
1470 | } |
1471 | |
1472 | ops = ring->ops; |
1473 | while (1) { |
1474 | B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); |
1475 | /* get meta - ignore returned value */ |
1476 | ops->idx2desc(ring, slot, &meta); |
1477 | |
1478 | if (b43_dma_ptr_is_poisoned(meta->skb)) { |
1479 | b43dbg(wl: dev->wl, fmt: "Poisoned TX slot %d (first=%d) " |
1480 | "on ring %d\n" , |
1481 | slot, firstused, ring->index); |
1482 | break; |
1483 | } |
1484 | |
1485 | if (meta->skb) { |
1486 | struct b43_private_tx_info *priv_info = |
1487 | b43_get_priv_tx_info(info: IEEE80211_SKB_CB(skb: meta->skb)); |
1488 | |
1489 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
1490 | len: meta->skb->len, tx: 1); |
1491 | kfree(objp: priv_info->bouncebuffer); |
1492 | priv_info->bouncebuffer = NULL; |
1493 | } else { |
1494 | unmap_descbuffer(ring, addr: meta->dmaaddr, |
1495 | len: b43_txhdr_size(dev), tx: 1); |
1496 | } |
1497 | |
1498 | if (meta->is_last_fragment) { |
1499 | struct ieee80211_tx_info *info; |
1500 | |
1501 | if (unlikely(!meta->skb)) { |
1502 | /* This is a scatter-gather fragment of a frame, |
1503 | * so the skb pointer must not be NULL. |
1504 | */ |
1505 | b43dbg(wl: dev->wl, fmt: "TX status unexpected NULL skb " |
1506 | "at slot %d (first=%d) on ring %d\n" , |
1507 | slot, firstused, ring->index); |
1508 | break; |
1509 | } |
1510 | |
1511 | info = IEEE80211_SKB_CB(skb: meta->skb); |
1512 | |
1513 | /* |
1514 | * Call back to inform the ieee80211 subsystem about |
1515 | * the status of the transmission. When skipping over |
1516 | * a missed TX status report, use a status structure |
1517 | * filled with zeros to indicate that the frame was not |
1518 | * sent (frame_count 0) and not acknowledged |
1519 | */ |
1520 | if (unlikely(skip)) |
1521 | txstat = &fake; |
1522 | else |
1523 | txstat = status; |
1524 | |
1525 | frame_succeed = b43_fill_txstatus_report(dev, report: info, |
1526 | status: txstat); |
1527 | #ifdef CONFIG_B43_DEBUG |
1528 | if (frame_succeed) |
1529 | ring->nr_succeed_tx_packets++; |
1530 | else |
1531 | ring->nr_failed_tx_packets++; |
1532 | ring->nr_total_packet_tries += status->frame_count; |
1533 | #endif /* DEBUG */ |
1534 | ieee80211_tx_status_skb(hw: dev->wl->hw, skb: meta->skb); |
1535 | |
1536 | /* skb will be freed by ieee80211_tx_status_skb(). |
1537 | * Poison our pointer. */ |
1538 | meta->skb = B43_DMA_PTR_POISON; |
1539 | } else { |
1540 | /* No need to call free_descriptor_buffer here, as |
1541 | * this is only the txhdr, which is not allocated. |
1542 | */ |
1543 | if (unlikely(meta->skb)) { |
1544 | b43dbg(wl: dev->wl, fmt: "TX status unexpected non-NULL skb " |
1545 | "at slot %d (first=%d) on ring %d\n" , |
1546 | slot, firstused, ring->index); |
1547 | break; |
1548 | } |
1549 | } |
1550 | |
1551 | /* Everything unmapped and free'd. So it's not used anymore. */ |
1552 | ring->used_slots--; |
1553 | |
1554 | if (meta->is_last_fragment && !skip) { |
1555 | /* This is the last scatter-gather |
1556 | * fragment of the frame. We are done. */ |
1557 | break; |
1558 | } |
1559 | slot = next_slot(ring, slot); |
1560 | if (skip > 0) |
1561 | --skip; |
1562 | } |
1563 | if (ring->stopped) { |
1564 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); |
1565 | ring->stopped = false; |
1566 | } |
1567 | |
1568 | if (dev->wl->tx_queue_stopped[ring->queue_prio]) { |
1569 | dev->wl->tx_queue_stopped[ring->queue_prio] = false; |
1570 | } else { |
1571 | /* If the driver queue is running wake the corresponding |
1572 | * mac80211 queue. */ |
1573 | b43_wake_queue(dev, queue_prio: ring->queue_prio); |
1574 | if (b43_debug(dev, feature: B43_DBG_DMAVERBOSE)) { |
1575 | b43dbg(wl: dev->wl, fmt: "Woke up TX ring %d\n" , ring->index); |
1576 | } |
1577 | } |
1578 | /* Add work to the queue. */ |
1579 | ieee80211_queue_work(hw: dev->wl->hw, work: &dev->wl->tx_work); |
1580 | } |
1581 | |
1582 | static void dma_rx(struct b43_dmaring *ring, int *slot) |
1583 | { |
1584 | const struct b43_dma_ops *ops = ring->ops; |
1585 | struct b43_dmadesc_generic *desc; |
1586 | struct b43_dmadesc_meta *meta; |
1587 | struct b43_rxhdr_fw4 *rxhdr; |
1588 | struct sk_buff *skb; |
1589 | u16 len; |
1590 | int err; |
1591 | dma_addr_t dmaaddr; |
1592 | |
1593 | desc = ops->idx2desc(ring, *slot, &meta); |
1594 | |
1595 | sync_descbuffer_for_cpu(ring, addr: meta->dmaaddr, len: ring->rx_buffersize); |
1596 | skb = meta->skb; |
1597 | |
1598 | rxhdr = (struct b43_rxhdr_fw4 *)skb->data; |
1599 | len = le16_to_cpu(rxhdr->frame_len); |
1600 | if (len == 0) { |
1601 | int i = 0; |
1602 | |
1603 | do { |
1604 | udelay(2); |
1605 | barrier(); |
1606 | len = le16_to_cpu(rxhdr->frame_len); |
1607 | } while (len == 0 && i++ < 5); |
1608 | if (unlikely(len == 0)) { |
1609 | dmaaddr = meta->dmaaddr; |
1610 | goto drop_recycle_buffer; |
1611 | } |
1612 | } |
1613 | if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { |
1614 | /* Something went wrong with the DMA. |
1615 | * The device did not touch the buffer and did not overwrite the poison. */ |
1616 | b43dbg(wl: ring->dev->wl, fmt: "DMA RX: Dropping poisoned buffer.\n" ); |
1617 | dmaaddr = meta->dmaaddr; |
1618 | goto drop_recycle_buffer; |
1619 | } |
1620 | if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { |
1621 | /* The data did not fit into one descriptor buffer |
1622 | * and is split over multiple buffers. |
1623 | * This should never happen, as we try to allocate buffers |
1624 | * big enough. So simply ignore this packet. |
1625 | */ |
1626 | int cnt = 0; |
1627 | s32 tmp = len; |
1628 | |
1629 | while (1) { |
1630 | desc = ops->idx2desc(ring, *slot, &meta); |
1631 | /* recycle the descriptor buffer. */ |
1632 | b43_poison_rx_buffer(ring, skb: meta->skb); |
1633 | sync_descbuffer_for_device(ring, addr: meta->dmaaddr, |
1634 | len: ring->rx_buffersize); |
1635 | *slot = next_slot(ring, slot: *slot); |
1636 | cnt++; |
1637 | tmp -= ring->rx_buffersize; |
1638 | if (tmp <= 0) |
1639 | break; |
1640 | } |
1641 | b43err(wl: ring->dev->wl, fmt: "DMA RX buffer too small " |
1642 | "(len: %u, buffer: %u, nr-dropped: %d)\n" , |
1643 | len, ring->rx_buffersize, cnt); |
1644 | goto drop; |
1645 | } |
1646 | |
1647 | dmaaddr = meta->dmaaddr; |
1648 | err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); |
1649 | if (unlikely(err)) { |
1650 | b43dbg(wl: ring->dev->wl, fmt: "DMA RX: setup_rx_descbuffer() failed\n" ); |
1651 | goto drop_recycle_buffer; |
1652 | } |
1653 | |
1654 | unmap_descbuffer(ring, addr: dmaaddr, len: ring->rx_buffersize, tx: 0); |
1655 | skb_put(skb, len: len + ring->frameoffset); |
1656 | skb_pull(skb, len: ring->frameoffset); |
1657 | |
1658 | b43_rx(dev: ring->dev, skb, rxhdr: rxhdr); |
1659 | drop: |
1660 | return; |
1661 | |
1662 | drop_recycle_buffer: |
1663 | /* Poison and recycle the RX buffer. */ |
1664 | b43_poison_rx_buffer(ring, skb); |
1665 | sync_descbuffer_for_device(ring, addr: dmaaddr, len: ring->rx_buffersize); |
1666 | } |
1667 | |
1668 | void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) |
1669 | { |
1670 | int current_slot, previous_slot; |
1671 | |
1672 | B43_WARN_ON(ring->tx); |
1673 | |
1674 | /* Device has filled all buffers, drop all packets and let TCP |
1675 | * decrease speed. |
1676 | * Decrement RX index by one will let the device to see all slots |
1677 | * as free again |
1678 | */ |
1679 | /* |
1680 | *TODO: How to increase rx_drop in mac80211? |
1681 | */ |
1682 | current_slot = ring->ops->get_current_rxslot(ring); |
1683 | previous_slot = prev_slot(ring, slot: current_slot); |
1684 | ring->ops->set_current_rxslot(ring, previous_slot); |
1685 | } |
1686 | |
1687 | void b43_dma_rx(struct b43_dmaring *ring) |
1688 | { |
1689 | const struct b43_dma_ops *ops = ring->ops; |
1690 | int slot, current_slot; |
1691 | int used_slots = 0; |
1692 | |
1693 | B43_WARN_ON(ring->tx); |
1694 | current_slot = ops->get_current_rxslot(ring); |
1695 | B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); |
1696 | |
1697 | slot = ring->current_slot; |
1698 | for (; slot != current_slot; slot = next_slot(ring, slot)) { |
1699 | dma_rx(ring, slot: &slot); |
1700 | update_max_used_slots(ring, current_used_slots: ++used_slots); |
1701 | } |
1702 | wmb(); |
1703 | ops->set_current_rxslot(ring, slot); |
1704 | ring->current_slot = slot; |
1705 | } |
1706 | |
1707 | static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) |
1708 | { |
1709 | B43_WARN_ON(!ring->tx); |
1710 | ring->ops->tx_suspend(ring); |
1711 | } |
1712 | |
1713 | static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) |
1714 | { |
1715 | B43_WARN_ON(!ring->tx); |
1716 | ring->ops->tx_resume(ring); |
1717 | } |
1718 | |
1719 | void b43_dma_tx_suspend(struct b43_wldev *dev) |
1720 | { |
1721 | b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); |
1722 | b43_dma_tx_suspend_ring(ring: dev->dma.tx_ring_AC_BK); |
1723 | b43_dma_tx_suspend_ring(ring: dev->dma.tx_ring_AC_BE); |
1724 | b43_dma_tx_suspend_ring(ring: dev->dma.tx_ring_AC_VI); |
1725 | b43_dma_tx_suspend_ring(ring: dev->dma.tx_ring_AC_VO); |
1726 | b43_dma_tx_suspend_ring(ring: dev->dma.tx_ring_mcast); |
1727 | } |
1728 | |
1729 | void b43_dma_tx_resume(struct b43_wldev *dev) |
1730 | { |
1731 | b43_dma_tx_resume_ring(ring: dev->dma.tx_ring_mcast); |
1732 | b43_dma_tx_resume_ring(ring: dev->dma.tx_ring_AC_VO); |
1733 | b43_dma_tx_resume_ring(ring: dev->dma.tx_ring_AC_VI); |
1734 | b43_dma_tx_resume_ring(ring: dev->dma.tx_ring_AC_BE); |
1735 | b43_dma_tx_resume_ring(ring: dev->dma.tx_ring_AC_BK); |
1736 | b43_power_saving_ctl_bits(dev, ps_flags: 0); |
1737 | } |
1738 | |
1739 | static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, |
1740 | u16 mmio_base, bool enable) |
1741 | { |
1742 | u32 ctl; |
1743 | |
1744 | if (type == B43_DMA_64BIT) { |
1745 | ctl = b43_read32(dev, offset: mmio_base + B43_DMA64_RXCTL); |
1746 | ctl &= ~B43_DMA64_RXDIRECTFIFO; |
1747 | if (enable) |
1748 | ctl |= B43_DMA64_RXDIRECTFIFO; |
1749 | b43_write32(dev, offset: mmio_base + B43_DMA64_RXCTL, value: ctl); |
1750 | } else { |
1751 | ctl = b43_read32(dev, offset: mmio_base + B43_DMA32_RXCTL); |
1752 | ctl &= ~B43_DMA32_RXDIRECTFIFO; |
1753 | if (enable) |
1754 | ctl |= B43_DMA32_RXDIRECTFIFO; |
1755 | b43_write32(dev, offset: mmio_base + B43_DMA32_RXCTL, value: ctl); |
1756 | } |
1757 | } |
1758 | |
1759 | /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. |
1760 | * This is called from PIO code, so DMA structures are not available. */ |
1761 | void b43_dma_direct_fifo_rx(struct b43_wldev *dev, |
1762 | unsigned int engine_index, bool enable) |
1763 | { |
1764 | enum b43_dmatype type; |
1765 | u16 mmio_base; |
1766 | |
1767 | type = b43_engine_type(dev); |
1768 | |
1769 | mmio_base = b43_dmacontroller_base(type, controller_idx: engine_index); |
1770 | direct_fifo_rx(dev, type, mmio_base, enable); |
1771 | } |
1772 | |