1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
4 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> |
5 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
6 | <http://rt2x00.serialmonkey.com> |
7 | |
8 | */ |
9 | |
10 | /* |
11 | Module: rt2x00lib |
12 | Abstract: rt2x00 queue specific routines. |
13 | */ |
14 | |
15 | #include <linux/slab.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/dma-mapping.h> |
19 | |
20 | #include "rt2x00.h" |
21 | #include "rt2x00lib.h" |
22 | |
23 | struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) |
24 | { |
25 | struct data_queue *queue = entry->queue; |
26 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; |
27 | struct sk_buff *skb; |
28 | struct skb_frame_desc *skbdesc; |
29 | unsigned int frame_size; |
30 | unsigned int head_size = 0; |
31 | unsigned int tail_size = 0; |
32 | |
33 | /* |
34 | * The frame size includes descriptor size, because the |
35 | * hardware directly receive the frame into the skbuffer. |
36 | */ |
37 | frame_size = queue->data_size + queue->desc_size + queue->winfo_size; |
38 | |
39 | /* |
40 | * The payload should be aligned to a 4-byte boundary, |
41 | * this means we need at least 3 bytes for moving the frame |
42 | * into the correct offset. |
43 | */ |
44 | head_size = 4; |
45 | |
46 | /* |
47 | * For IV/EIV/ICV assembly we must make sure there is |
48 | * at least 8 bytes bytes available in headroom for IV/EIV |
49 | * and 8 bytes for ICV data as tailroon. |
50 | */ |
51 | if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { |
52 | head_size += 8; |
53 | tail_size += 8; |
54 | } |
55 | |
56 | /* |
57 | * Allocate skbuffer. |
58 | */ |
59 | skb = __dev_alloc_skb(length: frame_size + head_size + tail_size, gfp_mask: gfp); |
60 | if (!skb) |
61 | return NULL; |
62 | |
63 | /* |
64 | * Make sure we not have a frame with the requested bytes |
65 | * available in the head and tail. |
66 | */ |
67 | skb_reserve(skb, len: head_size); |
68 | skb_put(skb, len: frame_size); |
69 | |
70 | /* |
71 | * Populate skbdesc. |
72 | */ |
73 | skbdesc = get_skb_frame_desc(skb); |
74 | memset(skbdesc, 0, sizeof(*skbdesc)); |
75 | |
76 | if (rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_DMA)) { |
77 | dma_addr_t skb_dma; |
78 | |
79 | skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, |
80 | DMA_FROM_DEVICE); |
81 | if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { |
82 | dev_kfree_skb_any(skb); |
83 | return NULL; |
84 | } |
85 | |
86 | skbdesc->skb_dma = skb_dma; |
87 | skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; |
88 | } |
89 | |
90 | return skb; |
91 | } |
92 | |
93 | int rt2x00queue_map_txskb(struct queue_entry *entry) |
94 | { |
95 | struct device *dev = entry->queue->rt2x00dev->dev; |
96 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb: entry->skb); |
97 | |
98 | skbdesc->skb_dma = |
99 | dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); |
100 | |
101 | if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) |
102 | return -ENOMEM; |
103 | |
104 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; |
105 | rt2x00lib_dmadone(entry); |
106 | return 0; |
107 | } |
108 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); |
109 | |
110 | void rt2x00queue_unmap_skb(struct queue_entry *entry) |
111 | { |
112 | struct device *dev = entry->queue->rt2x00dev->dev; |
113 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb: entry->skb); |
114 | |
115 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { |
116 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
117 | DMA_FROM_DEVICE); |
118 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; |
119 | } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { |
120 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
121 | DMA_TO_DEVICE); |
122 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; |
123 | } |
124 | } |
125 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); |
126 | |
127 | void rt2x00queue_free_skb(struct queue_entry *entry) |
128 | { |
129 | if (!entry->skb) |
130 | return; |
131 | |
132 | rt2x00queue_unmap_skb(entry); |
133 | dev_kfree_skb_any(skb: entry->skb); |
134 | entry->skb = NULL; |
135 | } |
136 | |
137 | void rt2x00queue_align_frame(struct sk_buff *skb) |
138 | { |
139 | unsigned int frame_length = skb->len; |
140 | unsigned int align = ALIGN_SIZE(skb, 0); |
141 | |
142 | if (!align) |
143 | return; |
144 | |
145 | skb_push(skb, len: align); |
146 | memmove(skb->data, skb->data + align, frame_length); |
147 | skb_trim(skb, len: frame_length); |
148 | } |
149 | |
150 | /* |
151 | * H/W needs L2 padding between the header and the paylod if header size |
152 | * is not 4 bytes aligned. |
153 | */ |
154 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
155 | { |
156 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
157 | |
158 | if (!l2pad) |
159 | return; |
160 | |
161 | skb_push(skb, len: l2pad); |
162 | memmove(skb->data, skb->data + l2pad, hdr_len); |
163 | } |
164 | |
165 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
166 | { |
167 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
168 | |
169 | if (!l2pad) |
170 | return; |
171 | |
172 | memmove(skb->data + l2pad, skb->data, hdr_len); |
173 | skb_pull(skb, len: l2pad); |
174 | } |
175 | |
176 | static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, |
177 | struct sk_buff *skb, |
178 | struct txentry_desc *txdesc) |
179 | { |
180 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
181 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
182 | struct rt2x00_intf *intf = vif_to_intf(vif: tx_info->control.vif); |
183 | u16 seqno; |
184 | |
185 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
186 | return; |
187 | |
188 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
189 | |
190 | if (!rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_SW_SEQNO)) { |
191 | /* |
192 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase |
193 | * seqno on retransmitted data (non-QOS) and management frames. |
194 | * To workaround the problem let's generate seqno in software. |
195 | * Except for beacons which are transmitted periodically by H/W |
196 | * hence hardware has to assign seqno for them. |
197 | */ |
198 | if (ieee80211_is_beacon(fc: hdr->frame_control)) { |
199 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
200 | /* H/W will generate sequence number */ |
201 | return; |
202 | } |
203 | |
204 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
205 | } |
206 | |
207 | /* |
208 | * The hardware is not able to insert a sequence number. Assign a |
209 | * software generated one here. |
210 | * |
211 | * This is wrong because beacons are not getting sequence |
212 | * numbers assigned properly. |
213 | * |
214 | * A secondary problem exists for drivers that cannot toggle |
215 | * sequence counting per-frame, since those will override the |
216 | * sequence counter given by mac80211. |
217 | */ |
218 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
219 | seqno = atomic_add_return(i: 0x10, v: &intf->seqno); |
220 | else |
221 | seqno = atomic_read(v: &intf->seqno); |
222 | |
223 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
224 | hdr->seq_ctrl |= cpu_to_le16(seqno); |
225 | } |
226 | |
227 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
228 | struct sk_buff *skb, |
229 | struct txentry_desc *txdesc, |
230 | const struct rt2x00_rate *hwrate) |
231 | { |
232 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
233 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
234 | unsigned int data_length; |
235 | unsigned int duration; |
236 | unsigned int residual; |
237 | |
238 | /* |
239 | * Determine with what IFS priority this frame should be send. |
240 | * Set ifs to IFS_SIFS when the this is not the first fragment, |
241 | * or this fragment came after RTS/CTS. |
242 | */ |
243 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
244 | txdesc->u.plcp.ifs = IFS_BACKOFF; |
245 | else |
246 | txdesc->u.plcp.ifs = IFS_SIFS; |
247 | |
248 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ |
249 | data_length = skb->len + 4; |
250 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); |
251 | |
252 | /* |
253 | * PLCP setup |
254 | * Length calculation depends on OFDM/CCK rate. |
255 | */ |
256 | txdesc->u.plcp.signal = hwrate->plcp; |
257 | txdesc->u.plcp.service = 0x04; |
258 | |
259 | if (hwrate->flags & DEV_RATE_OFDM) { |
260 | txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; |
261 | txdesc->u.plcp.length_low = data_length & 0x3f; |
262 | } else { |
263 | /* |
264 | * Convert length to microseconds. |
265 | */ |
266 | residual = GET_DURATION_RES(data_length, hwrate->bitrate); |
267 | duration = GET_DURATION(data_length, hwrate->bitrate); |
268 | |
269 | if (residual != 0) { |
270 | duration++; |
271 | |
272 | /* |
273 | * Check if we need to set the Length Extension |
274 | */ |
275 | if (hwrate->bitrate == 110 && residual <= 30) |
276 | txdesc->u.plcp.service |= 0x80; |
277 | } |
278 | |
279 | txdesc->u.plcp.length_high = (duration >> 8) & 0xff; |
280 | txdesc->u.plcp.length_low = duration & 0xff; |
281 | |
282 | /* |
283 | * When preamble is enabled we should set the |
284 | * preamble bit for the signal. |
285 | */ |
286 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) |
287 | txdesc->u.plcp.signal |= 0x08; |
288 | } |
289 | } |
290 | |
291 | static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, |
292 | struct sk_buff *skb, |
293 | struct txentry_desc *txdesc, |
294 | struct ieee80211_sta *sta, |
295 | const struct rt2x00_rate *hwrate) |
296 | { |
297 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
298 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
299 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
300 | struct rt2x00_sta *sta_priv = NULL; |
301 | u8 density = 0; |
302 | |
303 | if (sta) { |
304 | sta_priv = sta_to_rt2x00_sta(sta); |
305 | txdesc->u.ht.wcid = sta_priv->wcid; |
306 | density = sta->deflink.ht_cap.ampdu_density; |
307 | } |
308 | |
309 | /* |
310 | * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the |
311 | * mcs rate to be used |
312 | */ |
313 | if (txrate->flags & IEEE80211_TX_RC_MCS) { |
314 | txdesc->u.ht.mcs = txrate->idx; |
315 | |
316 | /* |
317 | * MIMO PS should be set to 1 for STA's using dynamic SM PS |
318 | * when using more then one tx stream (>MCS7). |
319 | */ |
320 | if (sta && txdesc->u.ht.mcs > 7 && |
321 | sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) |
322 | __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); |
323 | } else { |
324 | txdesc->u.ht.mcs = rt2x00_get_rate_mcs(mcs_value: hwrate->mcs); |
325 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) |
326 | txdesc->u.ht.mcs |= 0x08; |
327 | } |
328 | |
329 | if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { |
330 | if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) |
331 | txdesc->u.ht.txop = TXOP_SIFS; |
332 | else |
333 | txdesc->u.ht.txop = TXOP_BACKOFF; |
334 | |
335 | /* Left zero on all other settings. */ |
336 | return; |
337 | } |
338 | |
339 | /* |
340 | * Only one STBC stream is supported for now. |
341 | */ |
342 | if (tx_info->flags & IEEE80211_TX_CTL_STBC) |
343 | txdesc->u.ht.stbc = 1; |
344 | |
345 | /* |
346 | * This frame is eligible for an AMPDU, however, don't aggregate |
347 | * frames that are intended to probe a specific tx rate. |
348 | */ |
349 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && |
350 | !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { |
351 | __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); |
352 | txdesc->u.ht.mpdu_density = density; |
353 | txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ |
354 | } |
355 | |
356 | /* |
357 | * Set 40Mhz mode if necessary (for legacy rates this will |
358 | * duplicate the frame to both channels). |
359 | */ |
360 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || |
361 | txrate->flags & IEEE80211_TX_RC_DUP_DATA) |
362 | __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); |
363 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) |
364 | __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); |
365 | |
366 | /* |
367 | * Determine IFS values |
368 | * - Use TXOP_BACKOFF for management frames except beacons |
369 | * - Use TXOP_SIFS for fragment bursts |
370 | * - Use TXOP_HTTXOP for everything else |
371 | * |
372 | * Note: rt2800 devices won't use CTS protection (if used) |
373 | * for frames not transmitted with TXOP_HTTXOP |
374 | */ |
375 | if (ieee80211_is_mgmt(fc: hdr->frame_control) && |
376 | !ieee80211_is_beacon(fc: hdr->frame_control)) |
377 | txdesc->u.ht.txop = TXOP_BACKOFF; |
378 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) |
379 | txdesc->u.ht.txop = TXOP_SIFS; |
380 | else |
381 | txdesc->u.ht.txop = TXOP_HTTXOP; |
382 | } |
383 | |
384 | static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, |
385 | struct sk_buff *skb, |
386 | struct txentry_desc *txdesc, |
387 | struct ieee80211_sta *sta) |
388 | { |
389 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
390 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
391 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
392 | struct ieee80211_rate *rate; |
393 | const struct rt2x00_rate *hwrate = NULL; |
394 | |
395 | memset(txdesc, 0, sizeof(*txdesc)); |
396 | |
397 | /* |
398 | * Header and frame information. |
399 | */ |
400 | txdesc->length = skb->len; |
401 | txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); |
402 | |
403 | /* |
404 | * Check whether this frame is to be acked. |
405 | */ |
406 | if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) |
407 | __set_bit(ENTRY_TXD_ACK, &txdesc->flags); |
408 | |
409 | /* |
410 | * Check if this is a RTS/CTS frame |
411 | */ |
412 | if (ieee80211_is_rts(fc: hdr->frame_control) || |
413 | ieee80211_is_cts(fc: hdr->frame_control)) { |
414 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
415 | if (ieee80211_is_rts(fc: hdr->frame_control)) |
416 | __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); |
417 | else |
418 | __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); |
419 | } |
420 | |
421 | /* |
422 | * Determine retry information. |
423 | */ |
424 | txdesc->retry_limit = tx_info->control.rates[0].count - 1; |
425 | if (txdesc->retry_limit >= rt2x00dev->long_retry) |
426 | __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); |
427 | |
428 | /* |
429 | * Check if more fragments are pending |
430 | */ |
431 | if (ieee80211_has_morefrags(fc: hdr->frame_control)) { |
432 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
433 | __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); |
434 | } |
435 | |
436 | /* |
437 | * Check if more frames (!= fragments) are pending |
438 | */ |
439 | if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) |
440 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
441 | |
442 | /* |
443 | * Beacons and probe responses require the tsf timestamp |
444 | * to be inserted into the frame. |
445 | */ |
446 | if ((ieee80211_is_beacon(fc: hdr->frame_control) || |
447 | ieee80211_is_probe_resp(fc: hdr->frame_control)) && |
448 | !(tx_info->flags & IEEE80211_TX_CTL_INJECTED)) |
449 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); |
450 | |
451 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && |
452 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) |
453 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); |
454 | |
455 | /* |
456 | * Determine rate modulation. |
457 | */ |
458 | if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
459 | txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; |
460 | else if (txrate->flags & IEEE80211_TX_RC_MCS) |
461 | txdesc->rate_mode = RATE_MODE_HT_MIX; |
462 | else { |
463 | rate = ieee80211_get_tx_rate(hw: rt2x00dev->hw, c: tx_info); |
464 | hwrate = rt2x00_get_rate(hw_value: rate->hw_value); |
465 | if (hwrate->flags & DEV_RATE_OFDM) |
466 | txdesc->rate_mode = RATE_MODE_OFDM; |
467 | else |
468 | txdesc->rate_mode = RATE_MODE_CCK; |
469 | } |
470 | |
471 | /* |
472 | * Apply TX descriptor handling by components |
473 | */ |
474 | rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); |
475 | rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); |
476 | |
477 | if (rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_HT_TX_DESC)) |
478 | rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, |
479 | sta, hwrate); |
480 | else |
481 | rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, |
482 | hwrate); |
483 | } |
484 | |
485 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, |
486 | struct txentry_desc *txdesc) |
487 | { |
488 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
489 | |
490 | /* |
491 | * This should not happen, we already checked the entry |
492 | * was ours. When the hardware disagrees there has been |
493 | * a queue corruption! |
494 | */ |
495 | if (unlikely(rt2x00dev->ops->lib->get_entry_state && |
496 | rt2x00dev->ops->lib->get_entry_state(entry))) { |
497 | rt2x00_err(rt2x00dev, |
498 | "Corrupt queue %d, accessing entry which is not ours\n" |
499 | "Please file bug report to %s\n" , |
500 | entry->queue->qid, DRV_PROJECT); |
501 | return -EINVAL; |
502 | } |
503 | |
504 | /* |
505 | * Add the requested extra tx headroom in front of the skb. |
506 | */ |
507 | skb_push(skb: entry->skb, len: rt2x00dev->extra_tx_headroom); |
508 | memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); |
509 | |
510 | /* |
511 | * Call the driver's write_tx_data function, if it exists. |
512 | */ |
513 | if (rt2x00dev->ops->lib->write_tx_data) |
514 | rt2x00dev->ops->lib->write_tx_data(entry, txdesc); |
515 | |
516 | /* |
517 | * Map the skb to DMA. |
518 | */ |
519 | if (rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_DMA) && |
520 | rt2x00queue_map_txskb(entry)) |
521 | return -ENOMEM; |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, |
527 | struct txentry_desc *txdesc) |
528 | { |
529 | struct data_queue *queue = entry->queue; |
530 | |
531 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
532 | |
533 | /* |
534 | * All processing on the frame has been completed, this means |
535 | * it is now ready to be dumped to userspace through debugfs. |
536 | */ |
537 | rt2x00debug_dump_frame(rt2x00dev: queue->rt2x00dev, type: DUMP_FRAME_TX, entry); |
538 | } |
539 | |
540 | static void rt2x00queue_kick_tx_queue(struct data_queue *queue, |
541 | struct txentry_desc *txdesc) |
542 | { |
543 | /* |
544 | * Check if we need to kick the queue, there are however a few rules |
545 | * 1) Don't kick unless this is the last in frame in a burst. |
546 | * When the burst flag is set, this frame is always followed |
547 | * by another frame which in some way are related to eachother. |
548 | * This is true for fragments, RTS or CTS-to-self frames. |
549 | * 2) Rule 1 can be broken when the available entries |
550 | * in the queue are less then a certain threshold. |
551 | */ |
552 | if (rt2x00queue_threshold(queue) || |
553 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) |
554 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
555 | } |
556 | |
557 | static void rt2x00queue_bar_check(struct queue_entry *entry) |
558 | { |
559 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
560 | struct ieee80211_bar *bar = (void *) (entry->skb->data + |
561 | rt2x00dev->extra_tx_headroom); |
562 | struct rt2x00_bar_list_entry *bar_entry; |
563 | |
564 | if (likely(!ieee80211_is_back_req(bar->frame_control))) |
565 | return; |
566 | |
567 | bar_entry = kmalloc(size: sizeof(*bar_entry), GFP_ATOMIC); |
568 | |
569 | /* |
570 | * If the alloc fails we still send the BAR out but just don't track |
571 | * it in our bar list. And as a result we will report it to mac80211 |
572 | * back as failed. |
573 | */ |
574 | if (!bar_entry) |
575 | return; |
576 | |
577 | bar_entry->entry = entry; |
578 | bar_entry->block_acked = 0; |
579 | |
580 | /* |
581 | * Copy the relevant parts of the 802.11 BAR into out check list |
582 | * such that we can use RCU for less-overhead in the RX path since |
583 | * sending BARs and processing the according BlockAck should be |
584 | * the exception. |
585 | */ |
586 | memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); |
587 | memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); |
588 | bar_entry->control = bar->control; |
589 | bar_entry->start_seq_num = bar->start_seq_num; |
590 | |
591 | /* |
592 | * Insert BAR into our BAR check list. |
593 | */ |
594 | spin_lock_bh(lock: &rt2x00dev->bar_list_lock); |
595 | list_add_tail_rcu(new: &bar_entry->list, head: &rt2x00dev->bar_list); |
596 | spin_unlock_bh(lock: &rt2x00dev->bar_list_lock); |
597 | } |
598 | |
599 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
600 | struct ieee80211_sta *sta, bool local) |
601 | { |
602 | struct ieee80211_tx_info *tx_info; |
603 | struct queue_entry *entry; |
604 | struct txentry_desc txdesc; |
605 | struct skb_frame_desc *skbdesc; |
606 | u8 rate_idx, rate_flags; |
607 | int ret = 0; |
608 | |
609 | /* |
610 | * Copy all TX descriptor information into txdesc, |
611 | * after that we are free to use the skb->cb array |
612 | * for our information. |
613 | */ |
614 | rt2x00queue_create_tx_descriptor(rt2x00dev: queue->rt2x00dev, skb, txdesc: &txdesc, sta); |
615 | |
616 | /* |
617 | * All information is retrieved from the skb->cb array, |
618 | * now we should claim ownership of the driver part of that |
619 | * array, preserving the bitrate index and flags. |
620 | */ |
621 | tx_info = IEEE80211_SKB_CB(skb); |
622 | rate_idx = tx_info->control.rates[0].idx; |
623 | rate_flags = tx_info->control.rates[0].flags; |
624 | skbdesc = get_skb_frame_desc(skb); |
625 | memset(skbdesc, 0, sizeof(*skbdesc)); |
626 | skbdesc->tx_rate_idx = rate_idx; |
627 | skbdesc->tx_rate_flags = rate_flags; |
628 | |
629 | if (local) |
630 | skbdesc->flags |= SKBDESC_NOT_MAC80211; |
631 | |
632 | /* |
633 | * When hardware encryption is supported, and this frame |
634 | * is to be encrypted, we should strip the IV/EIV data from |
635 | * the frame so we can provide it to the driver separately. |
636 | */ |
637 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && |
638 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
639 | if (rt2x00_has_cap_flag(rt2x00dev: queue->rt2x00dev, cap_flag: REQUIRE_COPY_IV)) |
640 | rt2x00crypto_tx_copy_iv(skb, txdesc: &txdesc); |
641 | else |
642 | rt2x00crypto_tx_remove_iv(skb, txdesc: &txdesc); |
643 | } |
644 | |
645 | /* |
646 | * When DMA allocation is required we should guarantee to the |
647 | * driver that the DMA is aligned to a 4-byte boundary. |
648 | * However some drivers require L2 padding to pad the payload |
649 | * rather then the header. This could be a requirement for |
650 | * PCI and USB devices, while header alignment only is valid |
651 | * for PCI devices. |
652 | */ |
653 | if (rt2x00_has_cap_flag(rt2x00dev: queue->rt2x00dev, cap_flag: REQUIRE_L2PAD)) |
654 | rt2x00queue_insert_l2pad(skb, hdr_len: txdesc.header_length); |
655 | else if (rt2x00_has_cap_flag(rt2x00dev: queue->rt2x00dev, cap_flag: REQUIRE_DMA)) |
656 | rt2x00queue_align_frame(skb); |
657 | |
658 | /* |
659 | * That function must be called with bh disabled. |
660 | */ |
661 | spin_lock(lock: &queue->tx_lock); |
662 | |
663 | if (unlikely(rt2x00queue_full(queue))) { |
664 | rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n" , |
665 | queue->qid); |
666 | ret = -ENOBUFS; |
667 | goto out; |
668 | } |
669 | |
670 | entry = rt2x00queue_get_entry(queue, index: Q_INDEX); |
671 | |
672 | if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, |
673 | &entry->flags))) { |
674 | rt2x00_err(queue->rt2x00dev, |
675 | "Arrived at non-free entry in the non-full queue %d\n" |
676 | "Please file bug report to %s\n" , |
677 | queue->qid, DRV_PROJECT); |
678 | ret = -EINVAL; |
679 | goto out; |
680 | } |
681 | |
682 | entry->skb = skb; |
683 | |
684 | /* |
685 | * It could be possible that the queue was corrupted and this |
686 | * call failed. Since we always return NETDEV_TX_OK to mac80211, |
687 | * this frame will simply be dropped. |
688 | */ |
689 | if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { |
690 | clear_bit(nr: ENTRY_OWNER_DEVICE_DATA, addr: &entry->flags); |
691 | entry->skb = NULL; |
692 | ret = -EIO; |
693 | goto out; |
694 | } |
695 | |
696 | /* |
697 | * Put BlockAckReqs into our check list for driver BA processing. |
698 | */ |
699 | rt2x00queue_bar_check(entry); |
700 | |
701 | set_bit(nr: ENTRY_DATA_PENDING, addr: &entry->flags); |
702 | |
703 | rt2x00queue_index_inc(entry, index: Q_INDEX); |
704 | rt2x00queue_write_tx_descriptor(entry, txdesc: &txdesc); |
705 | rt2x00queue_kick_tx_queue(queue, txdesc: &txdesc); |
706 | |
707 | out: |
708 | /* |
709 | * Pausing queue has to be serialized with rt2x00lib_txdone(), so we |
710 | * do this under queue->tx_lock. Bottom halve was already disabled |
711 | * before ieee80211_xmit() call. |
712 | */ |
713 | if (rt2x00queue_threshold(queue)) |
714 | rt2x00queue_pause_queue(queue); |
715 | |
716 | spin_unlock(lock: &queue->tx_lock); |
717 | return ret; |
718 | } |
719 | |
720 | int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, |
721 | struct ieee80211_vif *vif) |
722 | { |
723 | struct rt2x00_intf *intf = vif_to_intf(vif); |
724 | |
725 | if (unlikely(!intf->beacon)) |
726 | return -ENOBUFS; |
727 | |
728 | /* |
729 | * Clean up the beacon skb. |
730 | */ |
731 | rt2x00queue_free_skb(entry: intf->beacon); |
732 | |
733 | /* |
734 | * Clear beacon (single bssid devices don't need to clear the beacon |
735 | * since the beacon queue will get stopped anyway). |
736 | */ |
737 | if (rt2x00dev->ops->lib->clear_beacon) |
738 | rt2x00dev->ops->lib->clear_beacon(intf->beacon); |
739 | |
740 | return 0; |
741 | } |
742 | |
743 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, |
744 | struct ieee80211_vif *vif) |
745 | { |
746 | struct rt2x00_intf *intf = vif_to_intf(vif); |
747 | struct skb_frame_desc *skbdesc; |
748 | struct txentry_desc txdesc; |
749 | |
750 | if (unlikely(!intf->beacon)) |
751 | return -ENOBUFS; |
752 | |
753 | /* |
754 | * Clean up the beacon skb. |
755 | */ |
756 | rt2x00queue_free_skb(entry: intf->beacon); |
757 | |
758 | intf->beacon->skb = ieee80211_beacon_get(hw: rt2x00dev->hw, vif, link_id: 0); |
759 | if (!intf->beacon->skb) |
760 | return -ENOMEM; |
761 | |
762 | /* |
763 | * Copy all TX descriptor information into txdesc, |
764 | * after that we are free to use the skb->cb array |
765 | * for our information. |
766 | */ |
767 | rt2x00queue_create_tx_descriptor(rt2x00dev, skb: intf->beacon->skb, txdesc: &txdesc, NULL); |
768 | |
769 | /* |
770 | * Fill in skb descriptor |
771 | */ |
772 | skbdesc = get_skb_frame_desc(skb: intf->beacon->skb); |
773 | memset(skbdesc, 0, sizeof(*skbdesc)); |
774 | |
775 | /* |
776 | * Send beacon to hardware. |
777 | */ |
778 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); |
779 | |
780 | return 0; |
781 | |
782 | } |
783 | |
784 | bool rt2x00queue_for_each_entry(struct data_queue *queue, |
785 | enum queue_index start, |
786 | enum queue_index end, |
787 | void *data, |
788 | bool (*fn)(struct queue_entry *entry, |
789 | void *data)) |
790 | { |
791 | unsigned long irqflags; |
792 | unsigned int index_start; |
793 | unsigned int index_end; |
794 | unsigned int i; |
795 | |
796 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { |
797 | rt2x00_err(queue->rt2x00dev, |
798 | "Entry requested from invalid index range (%d - %d)\n" , |
799 | start, end); |
800 | return true; |
801 | } |
802 | |
803 | /* |
804 | * Only protect the range we are going to loop over, |
805 | * if during our loop a extra entry is set to pending |
806 | * it should not be kicked during this run, since it |
807 | * is part of another TX operation. |
808 | */ |
809 | spin_lock_irqsave(&queue->index_lock, irqflags); |
810 | index_start = queue->index[start]; |
811 | index_end = queue->index[end]; |
812 | spin_unlock_irqrestore(lock: &queue->index_lock, flags: irqflags); |
813 | |
814 | /* |
815 | * Start from the TX done pointer, this guarantees that we will |
816 | * send out all frames in the correct order. |
817 | */ |
818 | if (index_start < index_end) { |
819 | for (i = index_start; i < index_end; i++) { |
820 | if (fn(&queue->entries[i], data)) |
821 | return true; |
822 | } |
823 | } else { |
824 | for (i = index_start; i < queue->limit; i++) { |
825 | if (fn(&queue->entries[i], data)) |
826 | return true; |
827 | } |
828 | |
829 | for (i = 0; i < index_end; i++) { |
830 | if (fn(&queue->entries[i], data)) |
831 | return true; |
832 | } |
833 | } |
834 | |
835 | return false; |
836 | } |
837 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); |
838 | |
839 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, |
840 | enum queue_index index) |
841 | { |
842 | struct queue_entry *entry; |
843 | unsigned long irqflags; |
844 | |
845 | if (unlikely(index >= Q_INDEX_MAX)) { |
846 | rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n" , |
847 | index); |
848 | return NULL; |
849 | } |
850 | |
851 | spin_lock_irqsave(&queue->index_lock, irqflags); |
852 | |
853 | entry = &queue->entries[queue->index[index]]; |
854 | |
855 | spin_unlock_irqrestore(lock: &queue->index_lock, flags: irqflags); |
856 | |
857 | return entry; |
858 | } |
859 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); |
860 | |
861 | void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) |
862 | { |
863 | struct data_queue *queue = entry->queue; |
864 | unsigned long irqflags; |
865 | |
866 | if (unlikely(index >= Q_INDEX_MAX)) { |
867 | rt2x00_err(queue->rt2x00dev, |
868 | "Index change on invalid index type (%d)\n" , index); |
869 | return; |
870 | } |
871 | |
872 | spin_lock_irqsave(&queue->index_lock, irqflags); |
873 | |
874 | queue->index[index]++; |
875 | if (queue->index[index] >= queue->limit) |
876 | queue->index[index] = 0; |
877 | |
878 | entry->last_action = jiffies; |
879 | |
880 | if (index == Q_INDEX) { |
881 | queue->length++; |
882 | } else if (index == Q_INDEX_DONE) { |
883 | queue->length--; |
884 | queue->count++; |
885 | } |
886 | |
887 | spin_unlock_irqrestore(lock: &queue->index_lock, flags: irqflags); |
888 | } |
889 | |
890 | static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) |
891 | { |
892 | switch (queue->qid) { |
893 | case QID_AC_VO: |
894 | case QID_AC_VI: |
895 | case QID_AC_BE: |
896 | case QID_AC_BK: |
897 | /* |
898 | * For TX queues, we have to disable the queue |
899 | * inside mac80211. |
900 | */ |
901 | ieee80211_stop_queue(hw: queue->rt2x00dev->hw, queue: queue->qid); |
902 | break; |
903 | default: |
904 | break; |
905 | } |
906 | } |
907 | void rt2x00queue_pause_queue(struct data_queue *queue) |
908 | { |
909 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || |
910 | !test_bit(QUEUE_STARTED, &queue->flags) || |
911 | test_and_set_bit(nr: QUEUE_PAUSED, addr: &queue->flags)) |
912 | return; |
913 | |
914 | rt2x00queue_pause_queue_nocheck(queue); |
915 | } |
916 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); |
917 | |
918 | void rt2x00queue_unpause_queue(struct data_queue *queue) |
919 | { |
920 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || |
921 | !test_bit(QUEUE_STARTED, &queue->flags) || |
922 | !test_and_clear_bit(nr: QUEUE_PAUSED, addr: &queue->flags)) |
923 | return; |
924 | |
925 | switch (queue->qid) { |
926 | case QID_AC_VO: |
927 | case QID_AC_VI: |
928 | case QID_AC_BE: |
929 | case QID_AC_BK: |
930 | /* |
931 | * For TX queues, we have to enable the queue |
932 | * inside mac80211. |
933 | */ |
934 | ieee80211_wake_queue(hw: queue->rt2x00dev->hw, queue: queue->qid); |
935 | break; |
936 | case QID_RX: |
937 | /* |
938 | * For RX we need to kick the queue now in order to |
939 | * receive frames. |
940 | */ |
941 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
942 | break; |
943 | default: |
944 | break; |
945 | } |
946 | } |
947 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); |
948 | |
949 | void rt2x00queue_start_queue(struct data_queue *queue) |
950 | { |
951 | mutex_lock(&queue->status_lock); |
952 | |
953 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || |
954 | test_and_set_bit(nr: QUEUE_STARTED, addr: &queue->flags)) { |
955 | mutex_unlock(lock: &queue->status_lock); |
956 | return; |
957 | } |
958 | |
959 | set_bit(nr: QUEUE_PAUSED, addr: &queue->flags); |
960 | |
961 | queue->rt2x00dev->ops->lib->start_queue(queue); |
962 | |
963 | rt2x00queue_unpause_queue(queue); |
964 | |
965 | mutex_unlock(lock: &queue->status_lock); |
966 | } |
967 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); |
968 | |
969 | void rt2x00queue_stop_queue(struct data_queue *queue) |
970 | { |
971 | mutex_lock(&queue->status_lock); |
972 | |
973 | if (!test_and_clear_bit(nr: QUEUE_STARTED, addr: &queue->flags)) { |
974 | mutex_unlock(lock: &queue->status_lock); |
975 | return; |
976 | } |
977 | |
978 | rt2x00queue_pause_queue_nocheck(queue); |
979 | |
980 | queue->rt2x00dev->ops->lib->stop_queue(queue); |
981 | |
982 | mutex_unlock(lock: &queue->status_lock); |
983 | } |
984 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); |
985 | |
986 | void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) |
987 | { |
988 | bool tx_queue = |
989 | (queue->qid == QID_AC_VO) || |
990 | (queue->qid == QID_AC_VI) || |
991 | (queue->qid == QID_AC_BE) || |
992 | (queue->qid == QID_AC_BK); |
993 | |
994 | if (rt2x00queue_empty(queue)) |
995 | return; |
996 | |
997 | /* |
998 | * If we are not supposed to drop any pending |
999 | * frames, this means we must force a start (=kick) |
1000 | * to the queue to make sure the hardware will |
1001 | * start transmitting. |
1002 | */ |
1003 | if (!drop && tx_queue) |
1004 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
1005 | |
1006 | /* |
1007 | * Check if driver supports flushing, if that is the case we can |
1008 | * defer the flushing to the driver. Otherwise we must use the |
1009 | * alternative which just waits for the queue to become empty. |
1010 | */ |
1011 | if (likely(queue->rt2x00dev->ops->lib->flush_queue)) |
1012 | queue->rt2x00dev->ops->lib->flush_queue(queue, drop); |
1013 | |
1014 | /* |
1015 | * The queue flush has failed... |
1016 | */ |
1017 | if (unlikely(!rt2x00queue_empty(queue))) |
1018 | rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n" , |
1019 | queue->qid); |
1020 | } |
1021 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); |
1022 | |
1023 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) |
1024 | { |
1025 | struct data_queue *queue; |
1026 | |
1027 | /* |
1028 | * rt2x00queue_start_queue will call ieee80211_wake_queue |
1029 | * for each queue after is has been properly initialized. |
1030 | */ |
1031 | tx_queue_for_each(rt2x00dev, queue) |
1032 | rt2x00queue_start_queue(queue); |
1033 | |
1034 | rt2x00queue_start_queue(rt2x00dev->rx); |
1035 | } |
1036 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); |
1037 | |
1038 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) |
1039 | { |
1040 | struct data_queue *queue; |
1041 | |
1042 | /* |
1043 | * rt2x00queue_stop_queue will call ieee80211_stop_queue |
1044 | * as well, but we are completely shutting doing everything |
1045 | * now, so it is much safer to stop all TX queues at once, |
1046 | * and use rt2x00queue_stop_queue for cleaning up. |
1047 | */ |
1048 | ieee80211_stop_queues(hw: rt2x00dev->hw); |
1049 | |
1050 | tx_queue_for_each(rt2x00dev, queue) |
1051 | rt2x00queue_stop_queue(queue); |
1052 | |
1053 | rt2x00queue_stop_queue(rt2x00dev->rx); |
1054 | } |
1055 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); |
1056 | |
1057 | void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) |
1058 | { |
1059 | struct data_queue *queue; |
1060 | |
1061 | tx_queue_for_each(rt2x00dev, queue) |
1062 | rt2x00queue_flush_queue(queue, drop); |
1063 | |
1064 | rt2x00queue_flush_queue(rt2x00dev->rx, drop); |
1065 | } |
1066 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); |
1067 | |
1068 | static void rt2x00queue_reset(struct data_queue *queue) |
1069 | { |
1070 | unsigned long irqflags; |
1071 | unsigned int i; |
1072 | |
1073 | spin_lock_irqsave(&queue->index_lock, irqflags); |
1074 | |
1075 | queue->count = 0; |
1076 | queue->length = 0; |
1077 | |
1078 | for (i = 0; i < Q_INDEX_MAX; i++) |
1079 | queue->index[i] = 0; |
1080 | |
1081 | spin_unlock_irqrestore(lock: &queue->index_lock, flags: irqflags); |
1082 | } |
1083 | |
1084 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
1085 | { |
1086 | struct data_queue *queue; |
1087 | unsigned int i; |
1088 | |
1089 | queue_for_each(rt2x00dev, queue) { |
1090 | rt2x00queue_reset(queue); |
1091 | |
1092 | for (i = 0; i < queue->limit; i++) |
1093 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
1094 | } |
1095 | } |
1096 | |
1097 | static int rt2x00queue_alloc_entries(struct data_queue *queue) |
1098 | { |
1099 | struct queue_entry *entries; |
1100 | unsigned int entry_size; |
1101 | unsigned int i; |
1102 | |
1103 | rt2x00queue_reset(queue); |
1104 | |
1105 | /* |
1106 | * Allocate all queue entries. |
1107 | */ |
1108 | entry_size = sizeof(*entries) + queue->priv_size; |
1109 | entries = kcalloc(n: queue->limit, size: entry_size, GFP_KERNEL); |
1110 | if (!entries) |
1111 | return -ENOMEM; |
1112 | |
1113 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ |
1114 | (((char *)(__base)) + ((__limit) * (__esize)) + \ |
1115 | ((__index) * (__psize))) |
1116 | |
1117 | for (i = 0; i < queue->limit; i++) { |
1118 | entries[i].flags = 0; |
1119 | entries[i].queue = queue; |
1120 | entries[i].skb = NULL; |
1121 | entries[i].entry_idx = i; |
1122 | entries[i].priv_data = |
1123 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, |
1124 | sizeof(*entries), queue->priv_size); |
1125 | } |
1126 | |
1127 | #undef QUEUE_ENTRY_PRIV_OFFSET |
1128 | |
1129 | queue->entries = entries; |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | static void rt2x00queue_free_skbs(struct data_queue *queue) |
1135 | { |
1136 | unsigned int i; |
1137 | |
1138 | if (!queue->entries) |
1139 | return; |
1140 | |
1141 | for (i = 0; i < queue->limit; i++) { |
1142 | rt2x00queue_free_skb(entry: &queue->entries[i]); |
1143 | } |
1144 | } |
1145 | |
1146 | static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) |
1147 | { |
1148 | unsigned int i; |
1149 | struct sk_buff *skb; |
1150 | |
1151 | for (i = 0; i < queue->limit; i++) { |
1152 | skb = rt2x00queue_alloc_rxskb(entry: &queue->entries[i], GFP_KERNEL); |
1153 | if (!skb) |
1154 | return -ENOMEM; |
1155 | queue->entries[i].skb = skb; |
1156 | } |
1157 | |
1158 | return 0; |
1159 | } |
1160 | |
1161 | int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) |
1162 | { |
1163 | struct data_queue *queue; |
1164 | int status; |
1165 | |
1166 | status = rt2x00queue_alloc_entries(queue: rt2x00dev->rx); |
1167 | if (status) |
1168 | goto exit; |
1169 | |
1170 | tx_queue_for_each(rt2x00dev, queue) { |
1171 | status = rt2x00queue_alloc_entries(queue); |
1172 | if (status) |
1173 | goto exit; |
1174 | } |
1175 | |
1176 | status = rt2x00queue_alloc_entries(queue: rt2x00dev->bcn); |
1177 | if (status) |
1178 | goto exit; |
1179 | |
1180 | if (rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_ATIM_QUEUE)) { |
1181 | status = rt2x00queue_alloc_entries(queue: rt2x00dev->atim); |
1182 | if (status) |
1183 | goto exit; |
1184 | } |
1185 | |
1186 | status = rt2x00queue_alloc_rxskbs(queue: rt2x00dev->rx); |
1187 | if (status) |
1188 | goto exit; |
1189 | |
1190 | return 0; |
1191 | |
1192 | exit: |
1193 | rt2x00_err(rt2x00dev, "Queue entries allocation failed\n" ); |
1194 | |
1195 | rt2x00queue_uninitialize(rt2x00dev); |
1196 | |
1197 | return status; |
1198 | } |
1199 | |
1200 | void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) |
1201 | { |
1202 | struct data_queue *queue; |
1203 | |
1204 | rt2x00queue_free_skbs(queue: rt2x00dev->rx); |
1205 | |
1206 | queue_for_each(rt2x00dev, queue) { |
1207 | kfree(objp: queue->entries); |
1208 | queue->entries = NULL; |
1209 | } |
1210 | } |
1211 | |
1212 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
1213 | struct data_queue *queue, enum data_queue_qid qid) |
1214 | { |
1215 | mutex_init(&queue->status_lock); |
1216 | spin_lock_init(&queue->tx_lock); |
1217 | spin_lock_init(&queue->index_lock); |
1218 | |
1219 | queue->rt2x00dev = rt2x00dev; |
1220 | queue->qid = qid; |
1221 | queue->txop = 0; |
1222 | queue->aifs = 2; |
1223 | queue->cw_min = 5; |
1224 | queue->cw_max = 10; |
1225 | |
1226 | rt2x00dev->ops->queue_init(queue); |
1227 | |
1228 | queue->threshold = DIV_ROUND_UP(queue->limit, 10); |
1229 | } |
1230 | |
1231 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) |
1232 | { |
1233 | struct data_queue *queue; |
1234 | enum data_queue_qid qid; |
1235 | unsigned int req_atim = |
1236 | rt2x00_has_cap_flag(rt2x00dev, cap_flag: REQUIRE_ATIM_QUEUE); |
1237 | |
1238 | /* |
1239 | * We need the following queues: |
1240 | * RX: 1 |
1241 | * TX: ops->tx_queues |
1242 | * Beacon: 1 |
1243 | * Atim: 1 (if required) |
1244 | */ |
1245 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
1246 | |
1247 | queue = kcalloc(n: rt2x00dev->data_queues, size: sizeof(*queue), GFP_KERNEL); |
1248 | if (!queue) |
1249 | return -ENOMEM; |
1250 | |
1251 | /* |
1252 | * Initialize pointers |
1253 | */ |
1254 | rt2x00dev->rx = queue; |
1255 | rt2x00dev->tx = &queue[1]; |
1256 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; |
1257 | rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; |
1258 | |
1259 | /* |
1260 | * Initialize queue parameters. |
1261 | * RX: qid = QID_RX |
1262 | * TX: qid = QID_AC_VO + index |
1263 | * TX: cw_min: 2^5 = 32. |
1264 | * TX: cw_max: 2^10 = 1024. |
1265 | * BCN: qid = QID_BEACON |
1266 | * ATIM: qid = QID_ATIM |
1267 | */ |
1268 | rt2x00queue_init(rt2x00dev, queue: rt2x00dev->rx, qid: QID_RX); |
1269 | |
1270 | qid = QID_AC_VO; |
1271 | tx_queue_for_each(rt2x00dev, queue) |
1272 | rt2x00queue_init(rt2x00dev, queue, qid: qid++); |
1273 | |
1274 | rt2x00queue_init(rt2x00dev, queue: rt2x00dev->bcn, qid: QID_BEACON); |
1275 | if (req_atim) |
1276 | rt2x00queue_init(rt2x00dev, queue: rt2x00dev->atim, qid: QID_ATIM); |
1277 | |
1278 | return 0; |
1279 | } |
1280 | |
1281 | void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) |
1282 | { |
1283 | kfree(objp: rt2x00dev->rx); |
1284 | rt2x00dev->rx = NULL; |
1285 | rt2x00dev->tx = NULL; |
1286 | rt2x00dev->bcn = NULL; |
1287 | } |
1288 | |