1 | /* |
2 | * Copyright (c) 2008-2011 Atheros Communications Inc. |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any |
5 | * purpose with or without fee is hereby granted, provided that the above |
6 | * copyright notice and this permission notice appear in all copies. |
7 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
15 | */ |
16 | |
17 | #include <linux/dma-mapping.h> |
18 | #include "ath9k.h" |
19 | #include "ar9003_mac.h" |
20 | |
21 | #define BITS_PER_BYTE 8 |
22 | #define OFDM_PLCP_BITS 22 |
23 | #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) |
24 | #define L_STF 8 |
25 | #define L_LTF 8 |
26 | #define L_SIG 4 |
27 | #define HT_SIG 8 |
28 | #define HT_STF 4 |
29 | #define HT_LTF(_ns) (4 * (_ns)) |
30 | #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ |
31 | #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ |
32 | #define TIME_SYMBOLS(t) ((t) >> 2) |
33 | #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18) |
34 | #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) |
35 | #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) |
36 | |
37 | /* Shifts in ar5008_phy.c and ar9003_phy.c are equal for all revisions */ |
38 | #define ATH9K_PWRTBL_11NA_OFDM_SHIFT 0 |
39 | #define ATH9K_PWRTBL_11NG_OFDM_SHIFT 4 |
40 | #define ATH9K_PWRTBL_11NA_HT_SHIFT 8 |
41 | #define ATH9K_PWRTBL_11NG_HT_SHIFT 12 |
42 | |
43 | |
44 | static u16 bits_per_symbol[][2] = { |
45 | /* 20MHz 40MHz */ |
46 | { 26, 54 }, /* 0: BPSK */ |
47 | { 52, 108 }, /* 1: QPSK 1/2 */ |
48 | { 78, 162 }, /* 2: QPSK 3/4 */ |
49 | { 104, 216 }, /* 3: 16-QAM 1/2 */ |
50 | { 156, 324 }, /* 4: 16-QAM 3/4 */ |
51 | { 208, 432 }, /* 5: 64-QAM 2/3 */ |
52 | { 234, 486 }, /* 6: 64-QAM 3/4 */ |
53 | { 260, 540 }, /* 7: 64-QAM 5/6 */ |
54 | }; |
55 | |
56 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
57 | struct ath_atx_tid *tid, struct sk_buff *skb); |
58 | static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, |
59 | int tx_flags, struct ath_txq *txq, |
60 | struct ieee80211_sta *sta); |
61 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
62 | struct ath_txq *txq, struct list_head *bf_q, |
63 | struct ieee80211_sta *sta, |
64 | struct ath_tx_status *ts, int txok); |
65 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, |
66 | struct list_head *head, bool internal); |
67 | static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, |
68 | struct ath_tx_status *ts, int nframes, int nbad, |
69 | int txok); |
70 | static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
71 | struct ath_buf *bf); |
72 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, |
73 | struct ath_txq *txq, |
74 | struct ath_atx_tid *tid, |
75 | struct sk_buff *skb); |
76 | static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, |
77 | struct ath_tx_control *txctl); |
78 | |
79 | enum { |
80 | MCS_HT20, |
81 | MCS_HT20_SGI, |
82 | MCS_HT40, |
83 | MCS_HT40_SGI, |
84 | }; |
85 | |
86 | /*********************/ |
87 | /* Aggregation logic */ |
88 | /*********************/ |
89 | |
90 | static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) |
91 | { |
92 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
93 | struct ieee80211_sta *sta = info->status.status_driver_data[0]; |
94 | |
95 | if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | |
96 | IEEE80211_TX_STATUS_EOSP)) { |
97 | ieee80211_tx_status_skb(hw, skb); |
98 | return; |
99 | } |
100 | |
101 | if (sta) |
102 | ieee80211_tx_status_noskb(hw, sta, info); |
103 | |
104 | dev_kfree_skb(skb); |
105 | } |
106 | |
107 | void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) |
108 | __releases(&txq->axq_lock) |
109 | { |
110 | struct ieee80211_hw *hw = sc->hw; |
111 | struct sk_buff_head q; |
112 | struct sk_buff *skb; |
113 | |
114 | __skb_queue_head_init(list: &q); |
115 | skb_queue_splice_init(list: &txq->complete_q, head: &q); |
116 | spin_unlock_bh(lock: &txq->axq_lock); |
117 | |
118 | while ((skb = __skb_dequeue(list: &q))) |
119 | ath_tx_status(hw, skb); |
120 | } |
121 | |
122 | void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
123 | { |
124 | struct ieee80211_txq *queue = |
125 | container_of((void *)tid, struct ieee80211_txq, drv_priv); |
126 | |
127 | ieee80211_schedule_txq(hw: sc->hw, txq: queue); |
128 | } |
129 | |
130 | void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) |
131 | { |
132 | struct ath_softc *sc = hw->priv; |
133 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
134 | struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv; |
135 | struct ath_txq *txq = tid->txq; |
136 | |
137 | ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n" , |
138 | queue->sta ? queue->sta->addr : queue->vif->addr, |
139 | tid->tidno); |
140 | |
141 | ath_txq_lock(sc, txq); |
142 | ath_txq_schedule(sc, txq); |
143 | ath_txq_unlock(sc, txq); |
144 | } |
145 | |
146 | static struct ath_frame_info *get_frame_info(struct sk_buff *skb) |
147 | { |
148 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
149 | BUILD_BUG_ON(sizeof(struct ath_frame_info) > |
150 | sizeof(tx_info->status.status_driver_data)); |
151 | return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; |
152 | } |
153 | |
154 | static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) |
155 | { |
156 | if (!tid->an->sta) |
157 | return; |
158 | |
159 | ieee80211_send_bar(vif: tid->an->vif, ra: tid->an->sta->addr, tid: tid->tidno, |
160 | ssn: seqno << IEEE80211_SEQ_SEQ_SHIFT); |
161 | } |
162 | |
163 | static bool ath_merge_ratetbl(struct ieee80211_sta *sta, struct ath_buf *bf, |
164 | struct ieee80211_tx_info *tx_info) |
165 | { |
166 | struct ieee80211_sta_rates *ratetbl; |
167 | u8 i; |
168 | |
169 | if (!sta) |
170 | return false; |
171 | |
172 | ratetbl = rcu_dereference(sta->rates); |
173 | if (!ratetbl) |
174 | return false; |
175 | |
176 | if (tx_info->control.rates[0].idx < 0 || |
177 | tx_info->control.rates[0].count == 0) |
178 | { |
179 | i = 0; |
180 | } else { |
181 | bf->rates[0] = tx_info->control.rates[0]; |
182 | i = 1; |
183 | } |
184 | |
185 | for ( ; i < IEEE80211_TX_MAX_RATES; i++) { |
186 | bf->rates[i].idx = ratetbl->rate[i].idx; |
187 | bf->rates[i].flags = ratetbl->rate[i].flags; |
188 | if (tx_info->control.use_rts) |
189 | bf->rates[i].count = ratetbl->rate[i].count_rts; |
190 | else if (tx_info->control.use_cts_prot) |
191 | bf->rates[i].count = ratetbl->rate[i].count_cts; |
192 | else |
193 | bf->rates[i].count = ratetbl->rate[i].count; |
194 | } |
195 | |
196 | return true; |
197 | } |
198 | |
199 | static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, |
200 | struct ath_buf *bf) |
201 | { |
202 | struct ieee80211_tx_info *tx_info; |
203 | |
204 | tx_info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
205 | |
206 | if (!ath_merge_ratetbl(sta, bf, tx_info)) |
207 | ieee80211_get_tx_rates(vif, sta, skb: bf->bf_mpdu, dest: bf->rates, |
208 | ARRAY_SIZE(bf->rates)); |
209 | } |
210 | |
211 | static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, |
212 | struct sk_buff *skb) |
213 | { |
214 | struct ath_frame_info *fi = get_frame_info(skb); |
215 | int q = fi->txq; |
216 | |
217 | if (q < 0) |
218 | return; |
219 | |
220 | txq = sc->tx.txq_map[q]; |
221 | if (WARN_ON(--txq->pending_frames < 0)) |
222 | txq->pending_frames = 0; |
223 | |
224 | } |
225 | |
226 | static struct ath_atx_tid * |
227 | ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) |
228 | { |
229 | u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; |
230 | return ATH_AN_2_TID(an, tidno); |
231 | } |
232 | |
233 | static int |
234 | ath_tid_pull(struct ath_atx_tid *tid, struct sk_buff **skbuf) |
235 | { |
236 | struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv); |
237 | struct ath_softc *sc = tid->an->sc; |
238 | struct ieee80211_hw *hw = sc->hw; |
239 | struct ath_tx_control txctl = { |
240 | .txq = tid->txq, |
241 | .sta = tid->an->sta, |
242 | }; |
243 | struct sk_buff *skb; |
244 | struct ath_frame_info *fi; |
245 | int q, ret; |
246 | |
247 | skb = ieee80211_tx_dequeue(hw, txq); |
248 | if (!skb) |
249 | return -ENOENT; |
250 | |
251 | ret = ath_tx_prepare(hw, skb, txctl: &txctl); |
252 | if (ret) { |
253 | ieee80211_free_txskb(hw, skb); |
254 | return ret; |
255 | } |
256 | |
257 | q = skb_get_queue_mapping(skb); |
258 | if (tid->txq == sc->tx.txq_map[q]) { |
259 | fi = get_frame_info(skb); |
260 | fi->txq = q; |
261 | ++tid->txq->pending_frames; |
262 | } |
263 | |
264 | *skbuf = skb; |
265 | return 0; |
266 | } |
267 | |
268 | static int ath_tid_dequeue(struct ath_atx_tid *tid, |
269 | struct sk_buff **skb) |
270 | { |
271 | int ret = 0; |
272 | *skb = __skb_dequeue(list: &tid->retry_q); |
273 | if (!*skb) |
274 | ret = ath_tid_pull(tid, skbuf: skb); |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
280 | { |
281 | struct ath_txq *txq = tid->txq; |
282 | struct sk_buff *skb; |
283 | struct ath_buf *bf; |
284 | struct list_head bf_head; |
285 | struct ath_tx_status ts; |
286 | struct ath_frame_info *fi; |
287 | bool sendbar = false; |
288 | |
289 | INIT_LIST_HEAD(list: &bf_head); |
290 | |
291 | memset(&ts, 0, sizeof(ts)); |
292 | |
293 | while ((skb = __skb_dequeue(list: &tid->retry_q))) { |
294 | fi = get_frame_info(skb); |
295 | bf = fi->bf; |
296 | if (!bf) { |
297 | ath_txq_skb_done(sc, txq, skb); |
298 | ieee80211_free_txskb(hw: sc->hw, skb); |
299 | continue; |
300 | } |
301 | |
302 | if (fi->baw_tracked) { |
303 | ath_tx_update_baw(sc, tid, bf); |
304 | sendbar = true; |
305 | } |
306 | |
307 | list_add_tail(new: &bf->list, head: &bf_head); |
308 | ath_tx_complete_buf(sc, bf, txq, bf_q: &bf_head, NULL, ts: &ts, txok: 0); |
309 | } |
310 | |
311 | if (sendbar) { |
312 | ath_txq_unlock(sc, txq); |
313 | ath_send_bar(tid, seqno: tid->seq_start); |
314 | ath_txq_lock(sc, txq); |
315 | } |
316 | } |
317 | |
318 | static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
319 | struct ath_buf *bf) |
320 | { |
321 | struct ath_frame_info *fi = get_frame_info(skb: bf->bf_mpdu); |
322 | u16 seqno = bf->bf_state.seqno; |
323 | int index, cindex; |
324 | |
325 | if (!fi->baw_tracked) |
326 | return; |
327 | |
328 | index = ATH_BA_INDEX(tid->seq_start, seqno); |
329 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); |
330 | |
331 | __clear_bit(cindex, tid->tx_buf); |
332 | |
333 | while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { |
334 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); |
335 | INCR(tid->baw_head, ATH_TID_MAX_BUFS); |
336 | if (tid->bar_index >= 0) |
337 | tid->bar_index--; |
338 | } |
339 | } |
340 | |
341 | static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
342 | struct ath_buf *bf) |
343 | { |
344 | struct ath_frame_info *fi = get_frame_info(skb: bf->bf_mpdu); |
345 | u16 seqno = bf->bf_state.seqno; |
346 | int index, cindex; |
347 | |
348 | if (fi->baw_tracked) |
349 | return; |
350 | |
351 | index = ATH_BA_INDEX(tid->seq_start, seqno); |
352 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); |
353 | __set_bit(cindex, tid->tx_buf); |
354 | fi->baw_tracked = 1; |
355 | |
356 | if (index >= ((tid->baw_tail - tid->baw_head) & |
357 | (ATH_TID_MAX_BUFS - 1))) { |
358 | tid->baw_tail = cindex; |
359 | INCR(tid->baw_tail, ATH_TID_MAX_BUFS); |
360 | } |
361 | } |
362 | |
363 | static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, |
364 | struct ath_atx_tid *tid) |
365 | |
366 | { |
367 | struct sk_buff *skb; |
368 | struct ath_buf *bf; |
369 | struct list_head bf_head; |
370 | struct ath_tx_status ts; |
371 | struct ath_frame_info *fi; |
372 | |
373 | memset(&ts, 0, sizeof(ts)); |
374 | INIT_LIST_HEAD(list: &bf_head); |
375 | |
376 | while (ath_tid_dequeue(tid, skb: &skb) == 0) { |
377 | fi = get_frame_info(skb); |
378 | bf = fi->bf; |
379 | |
380 | if (!bf) { |
381 | ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL); |
382 | continue; |
383 | } |
384 | |
385 | list_add_tail(new: &bf->list, head: &bf_head); |
386 | ath_tx_complete_buf(sc, bf, txq, bf_q: &bf_head, NULL, ts: &ts, txok: 0); |
387 | } |
388 | } |
389 | |
390 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, |
391 | struct sk_buff *skb, int count) |
392 | { |
393 | struct ath_frame_info *fi = get_frame_info(skb); |
394 | struct ath_buf *bf = fi->bf; |
395 | struct ieee80211_hdr *hdr; |
396 | int prev = fi->retries; |
397 | |
398 | TX_STAT_INC(sc, txq->axq_qnum, a_retries); |
399 | fi->retries += count; |
400 | |
401 | if (prev > 0) |
402 | return; |
403 | |
404 | hdr = (struct ieee80211_hdr *)skb->data; |
405 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); |
406 | dma_sync_single_for_device(dev: sc->dev, addr: bf->bf_buf_addr, |
407 | size: sizeof(*hdr), dir: DMA_TO_DEVICE); |
408 | } |
409 | |
410 | static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) |
411 | { |
412 | struct ath_buf *bf = NULL; |
413 | |
414 | spin_lock_bh(lock: &sc->tx.txbuflock); |
415 | |
416 | if (unlikely(list_empty(&sc->tx.txbuf))) { |
417 | spin_unlock_bh(lock: &sc->tx.txbuflock); |
418 | return NULL; |
419 | } |
420 | |
421 | bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); |
422 | list_del(entry: &bf->list); |
423 | |
424 | spin_unlock_bh(lock: &sc->tx.txbuflock); |
425 | |
426 | return bf; |
427 | } |
428 | |
429 | static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) |
430 | { |
431 | spin_lock_bh(lock: &sc->tx.txbuflock); |
432 | list_add_tail(new: &bf->list, head: &sc->tx.txbuf); |
433 | spin_unlock_bh(lock: &sc->tx.txbuflock); |
434 | } |
435 | |
436 | static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) |
437 | { |
438 | struct ath_buf *tbf; |
439 | |
440 | tbf = ath_tx_get_buffer(sc); |
441 | if (WARN_ON(!tbf)) |
442 | return NULL; |
443 | |
444 | ATH_TXBUF_RESET(tbf); |
445 | |
446 | tbf->bf_mpdu = bf->bf_mpdu; |
447 | tbf->bf_buf_addr = bf->bf_buf_addr; |
448 | memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); |
449 | tbf->bf_state = bf->bf_state; |
450 | tbf->bf_state.stale = false; |
451 | |
452 | return tbf; |
453 | } |
454 | |
455 | static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, |
456 | struct ath_tx_status *ts, int txok, |
457 | int *nframes, int *nbad) |
458 | { |
459 | u16 seq_st = 0; |
460 | u32 ba[WME_BA_BMP_SIZE >> 5]; |
461 | int ba_index; |
462 | int isaggr = 0; |
463 | |
464 | *nbad = 0; |
465 | *nframes = 0; |
466 | |
467 | isaggr = bf_isaggr(bf); |
468 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
469 | |
470 | if (isaggr) { |
471 | seq_st = ts->ts_seqnum; |
472 | memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); |
473 | } |
474 | |
475 | while (bf) { |
476 | ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); |
477 | |
478 | (*nframes)++; |
479 | if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) |
480 | (*nbad)++; |
481 | |
482 | bf = bf->bf_next; |
483 | } |
484 | } |
485 | |
486 | |
487 | static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, |
488 | struct ath_buf *bf, struct list_head *bf_q, |
489 | struct ieee80211_sta *sta, |
490 | struct ath_atx_tid *tid, |
491 | struct ath_tx_status *ts, int txok) |
492 | { |
493 | struct ath_node *an = NULL; |
494 | struct sk_buff *skb; |
495 | struct ieee80211_tx_info *tx_info; |
496 | struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; |
497 | struct list_head bf_head; |
498 | struct sk_buff_head bf_pending; |
499 | u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; |
500 | u32 ba[WME_BA_BMP_SIZE >> 5]; |
501 | int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; |
502 | bool rc_update = true, isba; |
503 | struct ieee80211_tx_rate rates[4]; |
504 | struct ath_frame_info *fi; |
505 | int nframes; |
506 | bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); |
507 | int i, retries; |
508 | int bar_index = -1; |
509 | |
510 | skb = bf->bf_mpdu; |
511 | tx_info = IEEE80211_SKB_CB(skb); |
512 | |
513 | memcpy(rates, bf->rates, sizeof(rates)); |
514 | |
515 | retries = ts->ts_longretry + 1; |
516 | for (i = 0; i < ts->ts_rateindex; i++) |
517 | retries += rates[i].count; |
518 | |
519 | if (!sta) { |
520 | INIT_LIST_HEAD(list: &bf_head); |
521 | while (bf) { |
522 | bf_next = bf->bf_next; |
523 | |
524 | if (!bf->bf_state.stale || bf_next != NULL) |
525 | list_move_tail(list: &bf->list, head: &bf_head); |
526 | |
527 | ath_tx_complete_buf(sc, bf, txq, bf_q: &bf_head, NULL, ts, txok: 0); |
528 | |
529 | bf = bf_next; |
530 | } |
531 | return; |
532 | } |
533 | |
534 | an = (struct ath_node *)sta->drv_priv; |
535 | seq_first = tid->seq_start; |
536 | isba = ts->ts_flags & ATH9K_TX_BA; |
537 | |
538 | /* |
539 | * The hardware occasionally sends a tx status for the wrong TID. |
540 | * In this case, the BA status cannot be considered valid and all |
541 | * subframes need to be retransmitted |
542 | * |
543 | * Only BlockAcks have a TID and therefore normal Acks cannot be |
544 | * checked |
545 | */ |
546 | if (isba && tid->tidno != ts->tid) |
547 | txok = false; |
548 | |
549 | isaggr = bf_isaggr(bf); |
550 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
551 | |
552 | if (isaggr && txok) { |
553 | if (ts->ts_flags & ATH9K_TX_BA) { |
554 | seq_st = ts->ts_seqnum; |
555 | memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); |
556 | } else { |
557 | /* |
558 | * AR5416 can become deaf/mute when BA |
559 | * issue happens. Chip needs to be reset. |
560 | * But AP code may have sychronization issues |
561 | * when perform internal reset in this routine. |
562 | * Only enable reset in STA mode for now. |
563 | */ |
564 | if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) |
565 | needreset = 1; |
566 | } |
567 | } |
568 | |
569 | __skb_queue_head_init(list: &bf_pending); |
570 | |
571 | ath_tx_count_frames(sc, bf, ts, txok, nframes: &nframes, nbad: &nbad); |
572 | while (bf) { |
573 | u16 seqno = bf->bf_state.seqno; |
574 | |
575 | txfail = txpending = sendbar = 0; |
576 | bf_next = bf->bf_next; |
577 | |
578 | skb = bf->bf_mpdu; |
579 | tx_info = IEEE80211_SKB_CB(skb); |
580 | fi = get_frame_info(skb); |
581 | |
582 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) || |
583 | !tid->active) { |
584 | /* |
585 | * Outside of the current BlockAck window, |
586 | * maybe part of a previous session |
587 | */ |
588 | txfail = 1; |
589 | } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { |
590 | /* transmit completion, subframe is |
591 | * acked by block ack */ |
592 | acked_cnt++; |
593 | } else if (!isaggr && txok) { |
594 | /* transmit completion */ |
595 | acked_cnt++; |
596 | } else if (flush) { |
597 | txpending = 1; |
598 | } else if (fi->retries < ATH_MAX_SW_RETRIES) { |
599 | if (txok || !an->sleeping) |
600 | ath_tx_set_retry(sc, txq, skb: bf->bf_mpdu, |
601 | count: retries); |
602 | |
603 | txpending = 1; |
604 | } else { |
605 | txfail = 1; |
606 | txfail_cnt++; |
607 | bar_index = max_t(int, bar_index, |
608 | ATH_BA_INDEX(seq_first, seqno)); |
609 | } |
610 | |
611 | /* |
612 | * Make sure the last desc is reclaimed if it |
613 | * not a holding desc. |
614 | */ |
615 | INIT_LIST_HEAD(list: &bf_head); |
616 | if (bf_next != NULL || !bf_last->bf_state.stale) |
617 | list_move_tail(list: &bf->list, head: &bf_head); |
618 | |
619 | if (!txpending) { |
620 | /* |
621 | * complete the acked-ones/xretried ones; update |
622 | * block-ack window |
623 | */ |
624 | ath_tx_update_baw(sc, tid, bf); |
625 | |
626 | if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { |
627 | memcpy(tx_info->control.rates, rates, sizeof(rates)); |
628 | ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); |
629 | rc_update = false; |
630 | if (bf == bf->bf_lastbf) |
631 | ath_dynack_sample_tx_ts(ah: sc->sc_ah, |
632 | skb: bf->bf_mpdu, |
633 | ts, sta); |
634 | } |
635 | |
636 | ath_tx_complete_buf(sc, bf, txq, bf_q: &bf_head, sta, ts, |
637 | txok: !txfail); |
638 | } else { |
639 | if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) { |
640 | tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP; |
641 | ieee80211_sta_eosp(pubsta: sta); |
642 | } |
643 | /* retry the un-acked ones */ |
644 | if (bf->bf_next == NULL && bf_last->bf_state.stale) { |
645 | struct ath_buf *tbf; |
646 | |
647 | tbf = ath_clone_txbuf(sc, bf: bf_last); |
648 | /* |
649 | * Update tx baw and complete the |
650 | * frame with failed status if we |
651 | * run out of tx buf. |
652 | */ |
653 | if (!tbf) { |
654 | ath_tx_update_baw(sc, tid, bf); |
655 | |
656 | ath_tx_complete_buf(sc, bf, txq, |
657 | bf_q: &bf_head, NULL, ts, |
658 | txok: 0); |
659 | bar_index = max_t(int, bar_index, |
660 | ATH_BA_INDEX(seq_first, seqno)); |
661 | break; |
662 | } |
663 | |
664 | fi->bf = tbf; |
665 | } |
666 | |
667 | /* |
668 | * Put this buffer to the temporary pending |
669 | * queue to retain ordering |
670 | */ |
671 | __skb_queue_tail(list: &bf_pending, newsk: skb); |
672 | } |
673 | |
674 | bf = bf_next; |
675 | } |
676 | |
677 | /* prepend un-acked frames to the beginning of the pending frame queue */ |
678 | if (!skb_queue_empty(list: &bf_pending)) { |
679 | if (an->sleeping) |
680 | ieee80211_sta_set_buffered(sta, tid: tid->tidno, buffered: true); |
681 | |
682 | skb_queue_splice_tail(list: &bf_pending, head: &tid->retry_q); |
683 | if (!an->sleeping) { |
684 | ath_tx_queue_tid(sc, tid); |
685 | if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) |
686 | tid->clear_ps_filter = true; |
687 | } |
688 | } |
689 | |
690 | if (bar_index >= 0) { |
691 | u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); |
692 | |
693 | if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) |
694 | tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); |
695 | |
696 | ath_txq_unlock(sc, txq); |
697 | ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); |
698 | ath_txq_lock(sc, txq); |
699 | } |
700 | |
701 | if (needreset) |
702 | ath9k_queue_reset(sc, type: RESET_TYPE_TX_ERROR); |
703 | } |
704 | |
705 | static bool bf_is_ampdu_not_probing(struct ath_buf *bf) |
706 | { |
707 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
708 | return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); |
709 | } |
710 | |
711 | static void ath_tx_count_airtime(struct ath_softc *sc, |
712 | struct ieee80211_sta *sta, |
713 | struct ath_buf *bf, |
714 | struct ath_tx_status *ts, |
715 | u8 tid) |
716 | { |
717 | u32 airtime = 0; |
718 | int i; |
719 | |
720 | airtime += ts->duration * (ts->ts_longretry + 1); |
721 | for(i = 0; i < ts->ts_rateindex; i++) { |
722 | int rate_dur = ath9k_hw_get_duration(ah: sc->sc_ah, ds: bf->bf_desc, index: i); |
723 | airtime += rate_dur * bf->rates[i].count; |
724 | } |
725 | |
726 | ieee80211_sta_register_airtime(pubsta: sta, tid, tx_airtime: airtime, rx_airtime: 0); |
727 | } |
728 | |
729 | static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, |
730 | struct ath_tx_status *ts, struct ath_buf *bf, |
731 | struct list_head *bf_head) |
732 | { |
733 | struct ieee80211_hw *hw = sc->hw; |
734 | struct ieee80211_tx_info *info; |
735 | struct ieee80211_sta *sta; |
736 | struct ieee80211_hdr *hdr; |
737 | struct ath_atx_tid *tid = NULL; |
738 | bool txok, flush; |
739 | |
740 | txok = !(ts->ts_status & ATH9K_TXERR_MASK); |
741 | flush = !!(ts->ts_status & ATH9K_TX_FLUSH); |
742 | txq->axq_tx_inprogress = false; |
743 | |
744 | txq->axq_depth--; |
745 | if (bf_is_ampdu_not_probing(bf)) |
746 | txq->axq_ampdu_depth--; |
747 | |
748 | ts->duration = ath9k_hw_get_duration(ah: sc->sc_ah, ds: bf->bf_desc, |
749 | index: ts->ts_rateindex); |
750 | |
751 | hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; |
752 | sta = ieee80211_find_sta_by_ifaddr(hw, addr: hdr->addr1, localaddr: hdr->addr2); |
753 | if (sta) { |
754 | struct ath_node *an = (struct ath_node *)sta->drv_priv; |
755 | tid = ath_get_skb_tid(sc, an, skb: bf->bf_mpdu); |
756 | ath_tx_count_airtime(sc, sta, bf, ts, tid: tid->tidno); |
757 | if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) |
758 | tid->clear_ps_filter = true; |
759 | } |
760 | |
761 | if (!bf_isampdu(bf)) { |
762 | if (!flush) { |
763 | info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
764 | memcpy(info->control.rates, bf->rates, |
765 | sizeof(info->control.rates)); |
766 | ath_tx_rc_status(sc, bf, ts, nframes: 1, nbad: txok ? 0 : 1, txok); |
767 | ath_dynack_sample_tx_ts(ah: sc->sc_ah, skb: bf->bf_mpdu, ts, |
768 | sta); |
769 | } |
770 | ath_tx_complete_buf(sc, bf, txq, bf_q: bf_head, sta, ts, txok); |
771 | } else |
772 | ath_tx_complete_aggr(sc, txq, bf, bf_q: bf_head, sta, tid, ts, txok); |
773 | |
774 | if (!flush) |
775 | ath_txq_schedule(sc, txq); |
776 | } |
777 | |
778 | static bool ath_lookup_legacy(struct ath_buf *bf) |
779 | { |
780 | struct sk_buff *skb; |
781 | struct ieee80211_tx_info *tx_info; |
782 | struct ieee80211_tx_rate *rates; |
783 | int i; |
784 | |
785 | skb = bf->bf_mpdu; |
786 | tx_info = IEEE80211_SKB_CB(skb); |
787 | rates = tx_info->control.rates; |
788 | |
789 | for (i = 0; i < 4; i++) { |
790 | if (!rates[i].count || rates[i].idx < 0) |
791 | break; |
792 | |
793 | if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) |
794 | return true; |
795 | } |
796 | |
797 | return false; |
798 | } |
799 | |
800 | static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, |
801 | struct ath_atx_tid *tid) |
802 | { |
803 | struct sk_buff *skb; |
804 | struct ieee80211_tx_info *tx_info; |
805 | struct ieee80211_tx_rate *rates; |
806 | u32 max_4ms_framelen, frmlen; |
807 | u16 aggr_limit, bt_aggr_limit, legacy = 0; |
808 | int q = tid->txq->mac80211_qnum; |
809 | int i; |
810 | |
811 | skb = bf->bf_mpdu; |
812 | tx_info = IEEE80211_SKB_CB(skb); |
813 | rates = bf->rates; |
814 | |
815 | /* |
816 | * Find the lowest frame length among the rate series that will have a |
817 | * 4ms (or TXOP limited) transmit duration. |
818 | */ |
819 | max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; |
820 | |
821 | for (i = 0; i < 4; i++) { |
822 | int modeidx; |
823 | |
824 | if (!rates[i].count) |
825 | continue; |
826 | |
827 | if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { |
828 | legacy = 1; |
829 | break; |
830 | } |
831 | |
832 | if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) |
833 | modeidx = MCS_HT40; |
834 | else |
835 | modeidx = MCS_HT20; |
836 | |
837 | if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) |
838 | modeidx++; |
839 | |
840 | frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; |
841 | max_4ms_framelen = min(max_4ms_framelen, frmlen); |
842 | } |
843 | |
844 | /* |
845 | * limit aggregate size by the minimum rate if rate selected is |
846 | * not a probe rate, if rate selected is a probe rate then |
847 | * avoid aggregation of this packet. |
848 | */ |
849 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) |
850 | return 0; |
851 | |
852 | aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); |
853 | |
854 | /* |
855 | * Override the default aggregation limit for BTCOEX. |
856 | */ |
857 | bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen); |
858 | if (bt_aggr_limit) |
859 | aggr_limit = bt_aggr_limit; |
860 | |
861 | if (tid->an->maxampdu) |
862 | aggr_limit = min(aggr_limit, tid->an->maxampdu); |
863 | |
864 | return aggr_limit; |
865 | } |
866 | |
867 | /* |
868 | * Returns the number of delimiters to be added to |
869 | * meet the minimum required mpdudensity. |
870 | */ |
871 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, |
872 | struct ath_buf *bf, u16 frmlen, |
873 | bool first_subfrm) |
874 | { |
875 | #define FIRST_DESC_NDELIMS 60 |
876 | u32 nsymbits, nsymbols; |
877 | u16 minlen; |
878 | u8 flags, rix; |
879 | int width, streams, half_gi, ndelim, mindelim; |
880 | struct ath_frame_info *fi = get_frame_info(skb: bf->bf_mpdu); |
881 | |
882 | /* Select standard number of delimiters based on frame length alone */ |
883 | ndelim = ATH_AGGR_GET_NDELIM(frmlen); |
884 | |
885 | /* |
886 | * If encryption enabled, hardware requires some more padding between |
887 | * subframes. |
888 | * TODO - this could be improved to be dependent on the rate. |
889 | * The hardware can keep up at lower rates, but not higher rates |
890 | */ |
891 | if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && |
892 | !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) |
893 | ndelim += ATH_AGGR_ENCRYPTDELIM; |
894 | |
895 | /* |
896 | * Add delimiter when using RTS/CTS with aggregation |
897 | * and non enterprise AR9003 card |
898 | */ |
899 | if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && |
900 | (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) |
901 | ndelim = max(ndelim, FIRST_DESC_NDELIMS); |
902 | |
903 | /* |
904 | * Convert desired mpdu density from microeconds to bytes based |
905 | * on highest rate in rate series (i.e. first rate) to determine |
906 | * required minimum length for subframe. Take into account |
907 | * whether high rate is 20 or 40Mhz and half or full GI. |
908 | * |
909 | * If there is no mpdu density restriction, no further calculation |
910 | * is needed. |
911 | */ |
912 | |
913 | if (tid->an->mpdudensity == 0) |
914 | return ndelim; |
915 | |
916 | rix = bf->rates[0].idx; |
917 | flags = bf->rates[0].flags; |
918 | width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; |
919 | half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; |
920 | |
921 | if (half_gi) |
922 | nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); |
923 | else |
924 | nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); |
925 | |
926 | if (nsymbols == 0) |
927 | nsymbols = 1; |
928 | |
929 | streams = HT_RC_2_STREAMS(rix); |
930 | nsymbits = bits_per_symbol[rix % 8][width] * streams; |
931 | minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; |
932 | |
933 | if (frmlen < minlen) { |
934 | mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; |
935 | ndelim = max(mindelim, ndelim); |
936 | } |
937 | |
938 | return ndelim; |
939 | } |
940 | |
941 | static int |
942 | ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, |
943 | struct ath_atx_tid *tid, struct ath_buf **buf) |
944 | { |
945 | struct ieee80211_tx_info *tx_info; |
946 | struct ath_frame_info *fi; |
947 | struct ath_buf *bf; |
948 | struct sk_buff *skb, *first_skb = NULL; |
949 | u16 seqno; |
950 | int ret; |
951 | |
952 | while (1) { |
953 | ret = ath_tid_dequeue(tid, skb: &skb); |
954 | if (ret < 0) |
955 | return ret; |
956 | |
957 | fi = get_frame_info(skb); |
958 | bf = fi->bf; |
959 | if (!fi->bf) |
960 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
961 | else |
962 | bf->bf_state.stale = false; |
963 | |
964 | if (!bf) { |
965 | ath_txq_skb_done(sc, txq, skb); |
966 | ieee80211_free_txskb(hw: sc->hw, skb); |
967 | continue; |
968 | } |
969 | |
970 | bf->bf_next = NULL; |
971 | bf->bf_lastbf = bf; |
972 | |
973 | tx_info = IEEE80211_SKB_CB(skb); |
974 | tx_info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | |
975 | IEEE80211_TX_STATUS_EOSP); |
976 | |
977 | /* |
978 | * No aggregation session is running, but there may be frames |
979 | * from a previous session or a failed attempt in the queue. |
980 | * Send them out as normal data frames |
981 | */ |
982 | if (!tid->active) |
983 | tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
984 | |
985 | if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { |
986 | bf->bf_state.bf_type = 0; |
987 | break; |
988 | } |
989 | |
990 | bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; |
991 | seqno = bf->bf_state.seqno; |
992 | |
993 | /* do not step over block-ack window */ |
994 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { |
995 | __skb_queue_tail(list: &tid->retry_q, newsk: skb); |
996 | |
997 | /* If there are other skbs in the retry q, they are |
998 | * probably within the BAW, so loop immediately to get |
999 | * one of them. Otherwise the queue can get stuck. */ |
1000 | if (!skb_queue_is_first(list: &tid->retry_q, skb) && |
1001 | !WARN_ON(skb == first_skb)) { |
1002 | if(!first_skb) /* infinite loop prevention */ |
1003 | first_skb = skb; |
1004 | continue; |
1005 | } |
1006 | return -EINPROGRESS; |
1007 | } |
1008 | |
1009 | if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { |
1010 | struct ath_tx_status ts = {}; |
1011 | struct list_head bf_head; |
1012 | |
1013 | INIT_LIST_HEAD(list: &bf_head); |
1014 | list_add(new: &bf->list, head: &bf_head); |
1015 | ath_tx_update_baw(sc, tid, bf); |
1016 | ath_tx_complete_buf(sc, bf, txq, bf_q: &bf_head, NULL, ts: &ts, txok: 0); |
1017 | continue; |
1018 | } |
1019 | |
1020 | if (bf_isampdu(bf)) |
1021 | ath_tx_addto_baw(sc, tid, bf); |
1022 | |
1023 | break; |
1024 | } |
1025 | |
1026 | *buf = bf; |
1027 | return 0; |
1028 | } |
1029 | |
1030 | static int |
1031 | ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, |
1032 | struct ath_atx_tid *tid, struct list_head *bf_q, |
1033 | struct ath_buf *bf_first) |
1034 | { |
1035 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) |
1036 | struct ath_buf *bf = bf_first, *bf_prev = NULL; |
1037 | int nframes = 0, ndelim, ret; |
1038 | u16 aggr_limit = 0, al = 0, bpad = 0, |
1039 | al_delta, h_baw = tid->baw_size / 2; |
1040 | struct ieee80211_tx_info *tx_info; |
1041 | struct ath_frame_info *fi; |
1042 | struct sk_buff *skb; |
1043 | |
1044 | |
1045 | bf = bf_first; |
1046 | aggr_limit = ath_lookup_rate(sc, bf, tid); |
1047 | |
1048 | while (bf) |
1049 | { |
1050 | skb = bf->bf_mpdu; |
1051 | fi = get_frame_info(skb); |
1052 | |
1053 | /* do not exceed aggregation limit */ |
1054 | al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; |
1055 | if (nframes) { |
1056 | if (aggr_limit < al + bpad + al_delta || |
1057 | ath_lookup_legacy(bf) || nframes >= h_baw) |
1058 | goto stop; |
1059 | |
1060 | tx_info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
1061 | if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || |
1062 | !(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) |
1063 | goto stop; |
1064 | } |
1065 | |
1066 | /* add padding for previous frame to aggregation length */ |
1067 | al += bpad + al_delta; |
1068 | |
1069 | /* |
1070 | * Get the delimiters needed to meet the MPDU |
1071 | * density for this node. |
1072 | */ |
1073 | ndelim = ath_compute_num_delims(sc, tid, bf: bf_first, frmlen: fi->framelen, |
1074 | first_subfrm: !nframes); |
1075 | bpad = PADBYTES(al_delta) + (ndelim << 2); |
1076 | |
1077 | nframes++; |
1078 | bf->bf_next = NULL; |
1079 | |
1080 | /* link buffers of this frame to the aggregate */ |
1081 | bf->bf_state.ndelim = ndelim; |
1082 | |
1083 | list_add_tail(new: &bf->list, head: bf_q); |
1084 | if (bf_prev) |
1085 | bf_prev->bf_next = bf; |
1086 | |
1087 | bf_prev = bf; |
1088 | |
1089 | ret = ath_tx_get_tid_subframe(sc, txq, tid, buf: &bf); |
1090 | if (ret < 0) |
1091 | break; |
1092 | } |
1093 | goto finish; |
1094 | stop: |
1095 | __skb_queue_tail(list: &tid->retry_q, newsk: bf->bf_mpdu); |
1096 | finish: |
1097 | bf = bf_first; |
1098 | bf->bf_lastbf = bf_prev; |
1099 | |
1100 | if (bf == bf_prev) { |
1101 | al = get_frame_info(skb: bf->bf_mpdu)->framelen; |
1102 | bf->bf_state.bf_type = BUF_AMPDU; |
1103 | } else { |
1104 | TX_STAT_INC(sc, txq->axq_qnum, a_aggr); |
1105 | } |
1106 | |
1107 | return al; |
1108 | #undef PADBYTES |
1109 | } |
1110 | |
1111 | /* |
1112 | * rix - rate index |
1113 | * pktlen - total bytes (delims + data + fcs + pads + pad delims) |
1114 | * width - 0 for 20 MHz, 1 for 40 MHz |
1115 | * half_gi - to use 4us v/s 3.6 us for symbol time |
1116 | */ |
1117 | u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, |
1118 | int width, int half_gi, bool shortPreamble) |
1119 | { |
1120 | u32 nbits, nsymbits, duration, nsymbols; |
1121 | int streams; |
1122 | |
1123 | /* find number of symbols: PLCP + data */ |
1124 | streams = HT_RC_2_STREAMS(rix); |
1125 | nbits = (pktlen << 3) + OFDM_PLCP_BITS; |
1126 | nsymbits = bits_per_symbol[rix % 8][width] * streams; |
1127 | nsymbols = (nbits + nsymbits - 1) / nsymbits; |
1128 | |
1129 | if (!half_gi) |
1130 | duration = SYMBOL_TIME(nsymbols); |
1131 | else |
1132 | duration = SYMBOL_TIME_HALFGI(nsymbols); |
1133 | |
1134 | /* addup duration for legacy/ht training and signal fields */ |
1135 | duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); |
1136 | |
1137 | return duration; |
1138 | } |
1139 | |
1140 | static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi) |
1141 | { |
1142 | int streams = HT_RC_2_STREAMS(mcs); |
1143 | int symbols, bits; |
1144 | int bytes = 0; |
1145 | |
1146 | usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); |
1147 | symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); |
1148 | bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; |
1149 | bits -= OFDM_PLCP_BITS; |
1150 | bytes = bits / 8; |
1151 | if (bytes > 65532) |
1152 | bytes = 65532; |
1153 | |
1154 | return bytes; |
1155 | } |
1156 | |
1157 | void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) |
1158 | { |
1159 | u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi; |
1160 | int mcs; |
1161 | |
1162 | /* 4ms is the default (and maximum) duration */ |
1163 | if (!txop || txop > 4096) |
1164 | txop = 4096; |
1165 | |
1166 | cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; |
1167 | cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; |
1168 | cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; |
1169 | cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; |
1170 | for (mcs = 0; mcs < 32; mcs++) { |
1171 | cur_ht20[mcs] = ath_max_framelen(usec: txop, mcs, ht40: false, sgi: false); |
1172 | cur_ht20_sgi[mcs] = ath_max_framelen(usec: txop, mcs, ht40: false, sgi: true); |
1173 | cur_ht40[mcs] = ath_max_framelen(usec: txop, mcs, ht40: true, sgi: false); |
1174 | cur_ht40_sgi[mcs] = ath_max_framelen(usec: txop, mcs, ht40: true, sgi: true); |
1175 | } |
1176 | } |
1177 | |
1178 | static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, |
1179 | u8 rateidx, bool is_40, bool is_cck, bool is_mcs) |
1180 | { |
1181 | u8 max_power; |
1182 | struct sk_buff *skb; |
1183 | struct ath_frame_info *fi; |
1184 | struct ieee80211_tx_info *info; |
1185 | struct ath_hw *ah = sc->sc_ah; |
1186 | bool is_2ghz, is_5ghz, use_stbc; |
1187 | |
1188 | if (sc->tx99_state || !ah->tpc_enabled) |
1189 | return MAX_RATE_POWER; |
1190 | |
1191 | skb = bf->bf_mpdu; |
1192 | fi = get_frame_info(skb); |
1193 | info = IEEE80211_SKB_CB(skb); |
1194 | |
1195 | is_2ghz = info->band == NL80211_BAND_2GHZ; |
1196 | is_5ghz = info->band == NL80211_BAND_5GHZ; |
1197 | use_stbc = is_mcs && rateidx < 8 && (info->flags & |
1198 | IEEE80211_TX_CTL_STBC); |
1199 | |
1200 | if (is_mcs) |
1201 | rateidx += is_5ghz ? ATH9K_PWRTBL_11NA_HT_SHIFT |
1202 | : ATH9K_PWRTBL_11NG_HT_SHIFT; |
1203 | else if (is_2ghz && !is_cck) |
1204 | rateidx += ATH9K_PWRTBL_11NG_OFDM_SHIFT; |
1205 | else |
1206 | rateidx += ATH9K_PWRTBL_11NA_OFDM_SHIFT; |
1207 | |
1208 | if (!AR_SREV_9300_20_OR_LATER(ah)) { |
1209 | int txpower = fi->tx_power; |
1210 | |
1211 | if (is_40) { |
1212 | u8 power_ht40delta; |
1213 | struct ar5416_eeprom_def *eep = &ah->eeprom.def; |
1214 | u16 eeprom_rev = ah->eep_ops->get_eeprom_rev(ah); |
1215 | |
1216 | if (eeprom_rev >= AR5416_EEP_MINOR_VER_2) { |
1217 | struct modal_eep_header *pmodal; |
1218 | |
1219 | pmodal = &eep->modalHeader[is_2ghz]; |
1220 | power_ht40delta = pmodal->ht40PowerIncForPdadc; |
1221 | } else { |
1222 | power_ht40delta = 2; |
1223 | } |
1224 | txpower += power_ht40delta; |
1225 | } |
1226 | |
1227 | if (AR_SREV_9287(ah) || AR_SREV_9285(ah) || |
1228 | AR_SREV_9271(ah)) { |
1229 | txpower -= 2 * AR9287_PWR_TABLE_OFFSET_DB; |
1230 | } else if (AR_SREV_9280_20_OR_LATER(ah)) { |
1231 | s8 power_offset; |
1232 | |
1233 | power_offset = ah->eep_ops->get_eeprom(ah, |
1234 | EEP_PWR_TABLE_OFFSET); |
1235 | txpower -= 2 * power_offset; |
1236 | } |
1237 | |
1238 | if (OLC_FOR_AR9280_20_LATER(ah) && is_cck) |
1239 | txpower -= 2; |
1240 | |
1241 | txpower = max(txpower, 0); |
1242 | max_power = min_t(u8, ah->tx_power[rateidx], txpower); |
1243 | |
1244 | /* XXX: clamp minimum TX power at 1 for AR9160 since if |
1245 | * max_power is set to 0, frames are transmitted at max |
1246 | * TX power |
1247 | */ |
1248 | if (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) |
1249 | max_power = 1; |
1250 | } else if (!bf->bf_state.bfs_paprd) { |
1251 | if (use_stbc) |
1252 | max_power = min_t(u8, ah->tx_power_stbc[rateidx], |
1253 | fi->tx_power); |
1254 | else |
1255 | max_power = min_t(u8, ah->tx_power[rateidx], |
1256 | fi->tx_power); |
1257 | } else { |
1258 | max_power = ah->paprd_training_power; |
1259 | } |
1260 | |
1261 | return max_power; |
1262 | } |
1263 | |
1264 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, |
1265 | struct ath_tx_info *info, int len, bool rts) |
1266 | { |
1267 | struct ath_hw *ah = sc->sc_ah; |
1268 | struct ath_common *common = ath9k_hw_common(ah); |
1269 | struct sk_buff *skb; |
1270 | struct ieee80211_tx_info *tx_info; |
1271 | struct ieee80211_tx_rate *rates; |
1272 | const struct ieee80211_rate *rate; |
1273 | struct ieee80211_hdr *hdr; |
1274 | struct ath_frame_info *fi = get_frame_info(skb: bf->bf_mpdu); |
1275 | u32 rts_thresh = sc->hw->wiphy->rts_threshold; |
1276 | int i; |
1277 | u8 rix = 0; |
1278 | |
1279 | skb = bf->bf_mpdu; |
1280 | tx_info = IEEE80211_SKB_CB(skb); |
1281 | rates = bf->rates; |
1282 | hdr = (struct ieee80211_hdr *)skb->data; |
1283 | |
1284 | /* set dur_update_en for l-sig computation except for PS-Poll frames */ |
1285 | info->dur_update = !ieee80211_is_pspoll(fc: hdr->frame_control); |
1286 | info->rtscts_rate = fi->rtscts_rate; |
1287 | |
1288 | for (i = 0; i < ARRAY_SIZE(bf->rates); i++) { |
1289 | bool is_40, is_sgi, is_sp, is_cck; |
1290 | int phy; |
1291 | |
1292 | if (!rates[i].count || (rates[i].idx < 0)) |
1293 | break; |
1294 | |
1295 | rix = rates[i].idx; |
1296 | info->rates[i].Tries = rates[i].count; |
1297 | |
1298 | /* |
1299 | * Handle RTS threshold for unaggregated HT frames. |
1300 | */ |
1301 | if (bf_isampdu(bf) && !bf_isaggr(bf) && |
1302 | (rates[i].flags & IEEE80211_TX_RC_MCS) && |
1303 | unlikely(rts_thresh != (u32) -1)) { |
1304 | if (!rts_thresh || (len > rts_thresh)) |
1305 | rts = true; |
1306 | } |
1307 | |
1308 | if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { |
1309 | info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; |
1310 | info->flags |= ATH9K_TXDESC_RTSENA; |
1311 | } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { |
1312 | info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; |
1313 | info->flags |= ATH9K_TXDESC_CTSENA; |
1314 | } |
1315 | |
1316 | if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) |
1317 | info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; |
1318 | if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) |
1319 | info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; |
1320 | |
1321 | is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); |
1322 | is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); |
1323 | is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); |
1324 | |
1325 | if (rates[i].flags & IEEE80211_TX_RC_MCS) { |
1326 | /* MCS rates */ |
1327 | info->rates[i].Rate = rix | 0x80; |
1328 | info->rates[i].ChSel = ath_txchainmask_reduction(sc, |
1329 | chainmask: ah->txchainmask, rate: info->rates[i].Rate); |
1330 | info->rates[i].PktDuration = ath_pkt_duration(sc, rix, pktlen: len, |
1331 | width: is_40, half_gi: is_sgi, shortPreamble: is_sp); |
1332 | if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) |
1333 | info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; |
1334 | if (rix >= 8 && fi->dyn_smps) { |
1335 | info->rates[i].RateFlags |= |
1336 | ATH9K_RATESERIES_RTS_CTS; |
1337 | info->flags |= ATH9K_TXDESC_CTSENA; |
1338 | } |
1339 | |
1340 | info->txpower[i] = ath_get_rate_txpower(sc, bf, rateidx: rix, |
1341 | is_40, is_cck: false, is_mcs: true); |
1342 | continue; |
1343 | } |
1344 | |
1345 | /* legacy rates */ |
1346 | rate = &common->sbands[tx_info->band].bitrates[rates[i].idx]; |
1347 | if ((tx_info->band == NL80211_BAND_2GHZ) && |
1348 | !(rate->flags & IEEE80211_RATE_ERP_G)) |
1349 | phy = WLAN_RC_PHY_CCK; |
1350 | else |
1351 | phy = WLAN_RC_PHY_OFDM; |
1352 | |
1353 | info->rates[i].Rate = rate->hw_value; |
1354 | if (rate->hw_value_short) { |
1355 | if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) |
1356 | info->rates[i].Rate |= rate->hw_value_short; |
1357 | } else { |
1358 | is_sp = false; |
1359 | } |
1360 | |
1361 | if (bf->bf_state.bfs_paprd) |
1362 | info->rates[i].ChSel = ah->txchainmask; |
1363 | else |
1364 | info->rates[i].ChSel = ath_txchainmask_reduction(sc, |
1365 | chainmask: ah->txchainmask, rate: info->rates[i].Rate); |
1366 | |
1367 | info->rates[i].PktDuration = ath9k_hw_computetxtime(ah: sc->sc_ah, |
1368 | phy, kbps: rate->bitrate * 100, frameLen: len, rateix: rix, shortPreamble: is_sp); |
1369 | |
1370 | is_cck = IS_CCK_RATE(info->rates[i].Rate); |
1371 | info->txpower[i] = ath_get_rate_txpower(sc, bf, rateidx: rix, is_40: false, |
1372 | is_cck, is_mcs: false); |
1373 | } |
1374 | |
1375 | /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ |
1376 | if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) |
1377 | info->flags &= ~ATH9K_TXDESC_RTSENA; |
1378 | |
1379 | /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ |
1380 | if (info->flags & ATH9K_TXDESC_RTSENA) |
1381 | info->flags &= ~ATH9K_TXDESC_CTSENA; |
1382 | } |
1383 | |
1384 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) |
1385 | { |
1386 | struct ieee80211_hdr *hdr; |
1387 | enum ath9k_pkt_type htype; |
1388 | __le16 fc; |
1389 | |
1390 | hdr = (struct ieee80211_hdr *)skb->data; |
1391 | fc = hdr->frame_control; |
1392 | |
1393 | if (ieee80211_is_beacon(fc)) |
1394 | htype = ATH9K_PKT_TYPE_BEACON; |
1395 | else if (ieee80211_is_probe_resp(fc)) |
1396 | htype = ATH9K_PKT_TYPE_PROBE_RESP; |
1397 | else if (ieee80211_is_atim(fc)) |
1398 | htype = ATH9K_PKT_TYPE_ATIM; |
1399 | else if (ieee80211_is_pspoll(fc)) |
1400 | htype = ATH9K_PKT_TYPE_PSPOLL; |
1401 | else |
1402 | htype = ATH9K_PKT_TYPE_NORMAL; |
1403 | |
1404 | return htype; |
1405 | } |
1406 | |
1407 | static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, |
1408 | struct ath_txq *txq, int len) |
1409 | { |
1410 | struct ath_hw *ah = sc->sc_ah; |
1411 | struct ath_buf *bf_first = NULL; |
1412 | struct ath_tx_info info; |
1413 | u32 rts_thresh = sc->hw->wiphy->rts_threshold; |
1414 | bool rts = false; |
1415 | |
1416 | memset(&info, 0, sizeof(info)); |
1417 | info.is_first = true; |
1418 | info.is_last = true; |
1419 | info.qcu = txq->axq_qnum; |
1420 | |
1421 | while (bf) { |
1422 | struct sk_buff *skb = bf->bf_mpdu; |
1423 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1424 | struct ath_frame_info *fi = get_frame_info(skb); |
1425 | bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); |
1426 | |
1427 | info.type = get_hw_packet_type(skb); |
1428 | if (bf->bf_next) |
1429 | info.link = bf->bf_next->bf_daddr; |
1430 | else |
1431 | info.link = (sc->tx99_state) ? bf->bf_daddr : 0; |
1432 | |
1433 | if (!bf_first) { |
1434 | bf_first = bf; |
1435 | |
1436 | if (!sc->tx99_state) |
1437 | info.flags = ATH9K_TXDESC_INTREQ; |
1438 | if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) || |
1439 | txq == sc->tx.uapsdq) |
1440 | info.flags |= ATH9K_TXDESC_CLRDMASK; |
1441 | |
1442 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) |
1443 | info.flags |= ATH9K_TXDESC_NOACK; |
1444 | if (tx_info->flags & IEEE80211_TX_CTL_LDPC) |
1445 | info.flags |= ATH9K_TXDESC_LDPC; |
1446 | |
1447 | if (bf->bf_state.bfs_paprd) |
1448 | info.flags |= (u32) bf->bf_state.bfs_paprd << |
1449 | ATH9K_TXDESC_PAPRD_S; |
1450 | |
1451 | /* |
1452 | * mac80211 doesn't handle RTS threshold for HT because |
1453 | * the decision has to be taken based on AMPDU length |
1454 | * and aggregation is done entirely inside ath9k. |
1455 | * Set the RTS/CTS flag for the first subframe based |
1456 | * on the threshold. |
1457 | */ |
1458 | if (aggr && (bf == bf_first) && |
1459 | unlikely(rts_thresh != (u32) -1)) { |
1460 | /* |
1461 | * "len" is the size of the entire AMPDU. |
1462 | */ |
1463 | if (!rts_thresh || (len > rts_thresh)) |
1464 | rts = true; |
1465 | } |
1466 | |
1467 | if (!aggr) |
1468 | len = fi->framelen; |
1469 | |
1470 | ath_buf_set_rate(sc, bf, info: &info, len, rts); |
1471 | } |
1472 | |
1473 | info.buf_addr[0] = bf->bf_buf_addr; |
1474 | info.buf_len[0] = skb->len; |
1475 | info.pkt_len = fi->framelen; |
1476 | info.keyix = fi->keyix; |
1477 | info.keytype = fi->keytype; |
1478 | |
1479 | if (aggr) { |
1480 | if (bf == bf_first) |
1481 | info.aggr = AGGR_BUF_FIRST; |
1482 | else if (bf == bf_first->bf_lastbf) |
1483 | info.aggr = AGGR_BUF_LAST; |
1484 | else |
1485 | info.aggr = AGGR_BUF_MIDDLE; |
1486 | |
1487 | info.ndelim = bf->bf_state.ndelim; |
1488 | info.aggr_len = len; |
1489 | } |
1490 | |
1491 | if (bf == bf_first->bf_lastbf) |
1492 | bf_first = NULL; |
1493 | |
1494 | ath9k_hw_set_txdesc(ah, ds: bf->bf_desc, i: &info); |
1495 | bf = bf->bf_next; |
1496 | } |
1497 | } |
1498 | |
1499 | static void |
1500 | ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, |
1501 | struct ath_atx_tid *tid, struct list_head *bf_q, |
1502 | struct ath_buf *bf_first) |
1503 | { |
1504 | struct ath_buf *bf = bf_first, *bf_prev = NULL; |
1505 | int nframes = 0, ret; |
1506 | |
1507 | do { |
1508 | struct ieee80211_tx_info *tx_info; |
1509 | |
1510 | nframes++; |
1511 | list_add_tail(new: &bf->list, head: bf_q); |
1512 | if (bf_prev) |
1513 | bf_prev->bf_next = bf; |
1514 | bf_prev = bf; |
1515 | |
1516 | if (nframes >= 2) |
1517 | break; |
1518 | |
1519 | ret = ath_tx_get_tid_subframe(sc, txq, tid, buf: &bf); |
1520 | if (ret < 0) |
1521 | break; |
1522 | |
1523 | tx_info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
1524 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
1525 | __skb_queue_tail(list: &tid->retry_q, newsk: bf->bf_mpdu); |
1526 | break; |
1527 | } |
1528 | |
1529 | ath_set_rates(vif: tid->an->vif, sta: tid->an->sta, bf); |
1530 | } while (1); |
1531 | } |
1532 | |
1533 | static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, |
1534 | struct ath_atx_tid *tid) |
1535 | { |
1536 | struct ath_buf *bf = NULL; |
1537 | struct ieee80211_tx_info *tx_info; |
1538 | struct list_head bf_q; |
1539 | int aggr_len = 0, ret; |
1540 | bool aggr; |
1541 | |
1542 | INIT_LIST_HEAD(list: &bf_q); |
1543 | |
1544 | ret = ath_tx_get_tid_subframe(sc, txq, tid, buf: &bf); |
1545 | if (ret < 0) |
1546 | return ret; |
1547 | |
1548 | tx_info = IEEE80211_SKB_CB(skb: bf->bf_mpdu); |
1549 | aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); |
1550 | if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || |
1551 | (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { |
1552 | __skb_queue_tail(list: &tid->retry_q, newsk: bf->bf_mpdu); |
1553 | return -EBUSY; |
1554 | } |
1555 | |
1556 | ath_set_rates(vif: tid->an->vif, sta: tid->an->sta, bf); |
1557 | if (aggr) |
1558 | aggr_len = ath_tx_form_aggr(sc, txq, tid, bf_q: &bf_q, bf_first: bf); |
1559 | else |
1560 | ath_tx_form_burst(sc, txq, tid, bf_q: &bf_q, bf_first: bf); |
1561 | |
1562 | if (list_empty(head: &bf_q)) |
1563 | return -EAGAIN; |
1564 | |
1565 | if (tid->clear_ps_filter || tid->an->no_ps_filter) { |
1566 | tid->clear_ps_filter = false; |
1567 | tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1568 | } |
1569 | |
1570 | ath_tx_fill_desc(sc, bf, txq, len: aggr_len); |
1571 | ath_tx_txqaddbuf(sc, txq, head: &bf_q, internal: false); |
1572 | return 0; |
1573 | } |
1574 | |
1575 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, |
1576 | u16 tid, u16 *ssn) |
1577 | { |
1578 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1579 | struct ath_atx_tid *txtid; |
1580 | struct ath_txq *txq; |
1581 | struct ath_node *an; |
1582 | u8 density; |
1583 | |
1584 | ath_dbg(common, XMIT, "%s called\n" , __func__); |
1585 | |
1586 | an = (struct ath_node *)sta->drv_priv; |
1587 | txtid = ATH_AN_2_TID(an, tid); |
1588 | txq = txtid->txq; |
1589 | |
1590 | ath_txq_lock(sc, txq); |
1591 | |
1592 | /* update ampdu factor/density, they may have changed. This may happen |
1593 | * in HT IBSS when a beacon with HT-info is received after the station |
1594 | * has already been added. |
1595 | */ |
1596 | if (sta->deflink.ht_cap.ht_supported) { |
1597 | an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + |
1598 | sta->deflink.ht_cap.ampdu_factor)) - 1; |
1599 | density = ath9k_parse_mpdudensity(mpdudensity: sta->deflink.ht_cap.ampdu_density); |
1600 | an->mpdudensity = density; |
1601 | } |
1602 | |
1603 | txtid->active = true; |
1604 | *ssn = txtid->seq_start = txtid->seq_next; |
1605 | txtid->bar_index = -1; |
1606 | |
1607 | memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); |
1608 | txtid->baw_head = txtid->baw_tail = 0; |
1609 | |
1610 | ath_txq_unlock_complete(sc, txq); |
1611 | |
1612 | return 0; |
1613 | } |
1614 | |
1615 | void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) |
1616 | { |
1617 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1618 | struct ath_node *an = (struct ath_node *)sta->drv_priv; |
1619 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); |
1620 | struct ath_txq *txq = txtid->txq; |
1621 | |
1622 | ath_dbg(common, XMIT, "%s called\n" , __func__); |
1623 | |
1624 | ath_txq_lock(sc, txq); |
1625 | txtid->active = false; |
1626 | ath_tx_flush_tid(sc, tid: txtid); |
1627 | ath_txq_unlock_complete(sc, txq); |
1628 | } |
1629 | |
1630 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, |
1631 | struct ath_node *an) |
1632 | { |
1633 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1634 | struct ath_atx_tid *tid; |
1635 | int tidno; |
1636 | |
1637 | ath_dbg(common, XMIT, "%s called\n" , __func__); |
1638 | |
1639 | for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { |
1640 | tid = ath_node_to_tid(an, tidno); |
1641 | |
1642 | if (!skb_queue_empty(list: &tid->retry_q)) |
1643 | ieee80211_sta_set_buffered(sta, tid: tid->tidno, buffered: true); |
1644 | |
1645 | } |
1646 | } |
1647 | |
1648 | void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) |
1649 | { |
1650 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1651 | struct ath_atx_tid *tid; |
1652 | struct ath_txq *txq; |
1653 | int tidno; |
1654 | |
1655 | ath_dbg(common, XMIT, "%s called\n" , __func__); |
1656 | |
1657 | for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { |
1658 | tid = ath_node_to_tid(an, tidno); |
1659 | txq = tid->txq; |
1660 | |
1661 | ath_txq_lock(sc, txq); |
1662 | tid->clear_ps_filter = true; |
1663 | if (!skb_queue_empty(list: &tid->retry_q)) { |
1664 | ath_tx_queue_tid(sc, tid); |
1665 | ath_txq_schedule(sc, txq); |
1666 | } |
1667 | ath_txq_unlock_complete(sc, txq); |
1668 | |
1669 | } |
1670 | } |
1671 | |
1672 | |
1673 | static void |
1674 | ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val) |
1675 | { |
1676 | struct ieee80211_hdr *hdr; |
1677 | u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
1678 | u16 mask_val = mask * val; |
1679 | |
1680 | hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; |
1681 | if ((hdr->frame_control & mask) != mask_val) { |
1682 | hdr->frame_control = (hdr->frame_control & ~mask) | mask_val; |
1683 | dma_sync_single_for_device(dev: sc->dev, addr: bf->bf_buf_addr, |
1684 | size: sizeof(*hdr), dir: DMA_TO_DEVICE); |
1685 | } |
1686 | } |
1687 | |
1688 | void ath9k_release_buffered_frames(struct ieee80211_hw *hw, |
1689 | struct ieee80211_sta *sta, |
1690 | u16 tids, int nframes, |
1691 | enum ieee80211_frame_release_type reason, |
1692 | bool more_data) |
1693 | { |
1694 | struct ath_softc *sc = hw->priv; |
1695 | struct ath_node *an = (struct ath_node *)sta->drv_priv; |
1696 | struct ath_txq *txq = sc->tx.uapsdq; |
1697 | struct ieee80211_tx_info *info; |
1698 | struct list_head bf_q; |
1699 | struct ath_buf *bf_tail = NULL, *bf = NULL; |
1700 | int i, ret; |
1701 | |
1702 | INIT_LIST_HEAD(list: &bf_q); |
1703 | for (i = 0; tids && nframes; i++, tids >>= 1) { |
1704 | struct ath_atx_tid *tid; |
1705 | |
1706 | if (!(tids & 1)) |
1707 | continue; |
1708 | |
1709 | tid = ATH_AN_2_TID(an, i); |
1710 | |
1711 | ath_txq_lock(sc, txq: tid->txq); |
1712 | while (nframes > 0) { |
1713 | ret = ath_tx_get_tid_subframe(sc, txq: sc->tx.uapsdq, |
1714 | tid, buf: &bf); |
1715 | if (ret < 0) |
1716 | break; |
1717 | |
1718 | ath9k_set_moredata(sc, bf, val: true); |
1719 | list_add_tail(new: &bf->list, head: &bf_q); |
1720 | ath_set_rates(vif: tid->an->vif, sta: tid->an->sta, bf); |
1721 | if (bf_isampdu(bf)) |
1722 | bf->bf_state.bf_type &= ~BUF_AGGR; |
1723 | if (bf_tail) |
1724 | bf_tail->bf_next = bf; |
1725 | |
1726 | bf_tail = bf; |
1727 | nframes--; |
1728 | TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw); |
1729 | |
1730 | if (an->sta && skb_queue_empty(list: &tid->retry_q)) |
1731 | ieee80211_sta_set_buffered(sta: an->sta, tid: i, buffered: false); |
1732 | } |
1733 | ath_txq_unlock_complete(sc, txq: tid->txq); |
1734 | } |
1735 | |
1736 | if (list_empty(head: &bf_q)) |
1737 | return; |
1738 | |
1739 | if (!more_data) |
1740 | ath9k_set_moredata(sc, bf: bf_tail, val: false); |
1741 | |
1742 | info = IEEE80211_SKB_CB(skb: bf_tail->bf_mpdu); |
1743 | info->flags |= IEEE80211_TX_STATUS_EOSP; |
1744 | |
1745 | bf = list_first_entry(&bf_q, struct ath_buf, list); |
1746 | ath_txq_lock(sc, txq); |
1747 | ath_tx_fill_desc(sc, bf, txq, len: 0); |
1748 | ath_tx_txqaddbuf(sc, txq, head: &bf_q, internal: false); |
1749 | ath_txq_unlock(sc, txq); |
1750 | } |
1751 | |
1752 | /********************/ |
1753 | /* Queue Management */ |
1754 | /********************/ |
1755 | |
1756 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) |
1757 | { |
1758 | struct ath_hw *ah = sc->sc_ah; |
1759 | struct ath9k_tx_queue_info qi; |
1760 | static const int subtype_txq_to_hwq[] = { |
1761 | [IEEE80211_AC_BE] = ATH_TXQ_AC_BE, |
1762 | [IEEE80211_AC_BK] = ATH_TXQ_AC_BK, |
1763 | [IEEE80211_AC_VI] = ATH_TXQ_AC_VI, |
1764 | [IEEE80211_AC_VO] = ATH_TXQ_AC_VO, |
1765 | }; |
1766 | int axq_qnum, i; |
1767 | |
1768 | memset(&qi, 0, sizeof(qi)); |
1769 | qi.tqi_subtype = subtype_txq_to_hwq[subtype]; |
1770 | qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; |
1771 | qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; |
1772 | qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; |
1773 | qi.tqi_physCompBuf = 0; |
1774 | |
1775 | /* |
1776 | * Enable interrupts only for EOL and DESC conditions. |
1777 | * We mark tx descriptors to receive a DESC interrupt |
1778 | * when a tx queue gets deep; otherwise waiting for the |
1779 | * EOL to reap descriptors. Note that this is done to |
1780 | * reduce interrupt load and this only defers reaping |
1781 | * descriptors, never transmitting frames. Aside from |
1782 | * reducing interrupts this also permits more concurrency. |
1783 | * The only potential downside is if the tx queue backs |
1784 | * up in which case the top half of the kernel may backup |
1785 | * due to a lack of tx descriptors. |
1786 | * |
1787 | * The UAPSD queue is an exception, since we take a desc- |
1788 | * based intr on the EOSP frames. |
1789 | */ |
1790 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
1791 | qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; |
1792 | } else { |
1793 | if (qtype == ATH9K_TX_QUEUE_UAPSD) |
1794 | qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; |
1795 | else |
1796 | qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | |
1797 | TXQ_FLAG_TXDESCINT_ENABLE; |
1798 | } |
1799 | axq_qnum = ath9k_hw_setuptxqueue(ah, type: qtype, qinfo: &qi); |
1800 | if (axq_qnum == -1) { |
1801 | /* |
1802 | * NB: don't print a message, this happens |
1803 | * normally on parts with too few tx queues |
1804 | */ |
1805 | return NULL; |
1806 | } |
1807 | if (!ATH_TXQ_SETUP(sc, axq_qnum)) { |
1808 | struct ath_txq *txq = &sc->tx.txq[axq_qnum]; |
1809 | |
1810 | txq->axq_qnum = axq_qnum; |
1811 | txq->mac80211_qnum = -1; |
1812 | txq->axq_link = NULL; |
1813 | __skb_queue_head_init(list: &txq->complete_q); |
1814 | INIT_LIST_HEAD(list: &txq->axq_q); |
1815 | spin_lock_init(&txq->axq_lock); |
1816 | txq->axq_depth = 0; |
1817 | txq->axq_ampdu_depth = 0; |
1818 | txq->axq_tx_inprogress = false; |
1819 | sc->tx.txqsetup |= 1<<axq_qnum; |
1820 | |
1821 | txq->txq_headidx = txq->txq_tailidx = 0; |
1822 | for (i = 0; i < ATH_TXFIFO_DEPTH; i++) |
1823 | INIT_LIST_HEAD(list: &txq->txq_fifo[i]); |
1824 | } |
1825 | return &sc->tx.txq[axq_qnum]; |
1826 | } |
1827 | |
1828 | int ath_txq_update(struct ath_softc *sc, int qnum, |
1829 | struct ath9k_tx_queue_info *qinfo) |
1830 | { |
1831 | struct ath_hw *ah = sc->sc_ah; |
1832 | int error = 0; |
1833 | struct ath9k_tx_queue_info qi; |
1834 | |
1835 | BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); |
1836 | |
1837 | ath9k_hw_get_txq_props(ah, q: qnum, qinfo: &qi); |
1838 | qi.tqi_aifs = qinfo->tqi_aifs; |
1839 | qi.tqi_cwmin = qinfo->tqi_cwmin; |
1840 | qi.tqi_cwmax = qinfo->tqi_cwmax; |
1841 | qi.tqi_burstTime = qinfo->tqi_burstTime; |
1842 | qi.tqi_readyTime = qinfo->tqi_readyTime; |
1843 | |
1844 | if (!ath9k_hw_set_txq_props(ah, q: qnum, qinfo: &qi)) { |
1845 | ath_err(ath9k_hw_common(sc->sc_ah), |
1846 | "Unable to update hardware queue %u!\n" , qnum); |
1847 | error = -EIO; |
1848 | } else { |
1849 | ath9k_hw_resettxqueue(ah, q: qnum); |
1850 | } |
1851 | |
1852 | return error; |
1853 | } |
1854 | |
1855 | int ath_cabq_update(struct ath_softc *sc) |
1856 | { |
1857 | struct ath9k_tx_queue_info qi; |
1858 | struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; |
1859 | int qnum = sc->beacon.cabq->axq_qnum; |
1860 | |
1861 | ath9k_hw_get_txq_props(ah: sc->sc_ah, q: qnum, qinfo: &qi); |
1862 | |
1863 | qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) * |
1864 | ATH_CABQ_READY_TIME) / 100; |
1865 | ath_txq_update(sc, qnum, qinfo: &qi); |
1866 | |
1867 | return 0; |
1868 | } |
1869 | |
1870 | static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, |
1871 | struct list_head *list) |
1872 | { |
1873 | struct ath_buf *bf, *lastbf; |
1874 | struct list_head bf_head; |
1875 | struct ath_tx_status ts; |
1876 | |
1877 | memset(&ts, 0, sizeof(ts)); |
1878 | ts.ts_status = ATH9K_TX_FLUSH; |
1879 | INIT_LIST_HEAD(list: &bf_head); |
1880 | |
1881 | while (!list_empty(head: list)) { |
1882 | bf = list_first_entry(list, struct ath_buf, list); |
1883 | |
1884 | if (bf->bf_state.stale) { |
1885 | list_del(entry: &bf->list); |
1886 | |
1887 | ath_tx_return_buffer(sc, bf); |
1888 | continue; |
1889 | } |
1890 | |
1891 | lastbf = bf->bf_lastbf; |
1892 | list_cut_position(list: &bf_head, head: list, entry: &lastbf->list); |
1893 | ath_tx_process_buffer(sc, txq, ts: &ts, bf, bf_head: &bf_head); |
1894 | } |
1895 | } |
1896 | |
1897 | /* |
1898 | * Drain a given TX queue (could be Beacon or Data) |
1899 | * |
1900 | * This assumes output has been stopped and |
1901 | * we do not need to block ath_tx_tasklet. |
1902 | */ |
1903 | void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq) |
1904 | { |
1905 | rcu_read_lock(); |
1906 | ath_txq_lock(sc, txq); |
1907 | |
1908 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
1909 | int idx = txq->txq_tailidx; |
1910 | |
1911 | while (!list_empty(head: &txq->txq_fifo[idx])) { |
1912 | ath_drain_txq_list(sc, txq, list: &txq->txq_fifo[idx]); |
1913 | |
1914 | INCR(idx, ATH_TXFIFO_DEPTH); |
1915 | } |
1916 | txq->txq_tailidx = idx; |
1917 | } |
1918 | |
1919 | txq->axq_link = NULL; |
1920 | txq->axq_tx_inprogress = false; |
1921 | ath_drain_txq_list(sc, txq, list: &txq->axq_q); |
1922 | |
1923 | ath_txq_unlock_complete(sc, txq); |
1924 | rcu_read_unlock(); |
1925 | } |
1926 | |
1927 | bool ath_drain_all_txq(struct ath_softc *sc) |
1928 | { |
1929 | struct ath_hw *ah = sc->sc_ah; |
1930 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1931 | struct ath_txq *txq; |
1932 | int i; |
1933 | u32 npend = 0; |
1934 | |
1935 | if (test_bit(ATH_OP_INVALID, &common->op_flags)) |
1936 | return true; |
1937 | |
1938 | ath9k_hw_abort_tx_dma(ah); |
1939 | |
1940 | /* Check if any queue remains active */ |
1941 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1942 | if (!ATH_TXQ_SETUP(sc, i)) |
1943 | continue; |
1944 | |
1945 | if (!sc->tx.txq[i].axq_depth) |
1946 | continue; |
1947 | |
1948 | if (ath9k_hw_numtxpending(ah, q: sc->tx.txq[i].axq_qnum)) |
1949 | npend |= BIT(i); |
1950 | } |
1951 | |
1952 | if (npend) { |
1953 | RESET_STAT_INC(sc, RESET_TX_DMA_ERROR); |
1954 | ath_dbg(common, RESET, |
1955 | "Failed to stop TX DMA, queues=0x%03x!\n" , npend); |
1956 | } |
1957 | |
1958 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1959 | if (!ATH_TXQ_SETUP(sc, i)) |
1960 | continue; |
1961 | |
1962 | txq = &sc->tx.txq[i]; |
1963 | ath_draintxq(sc, txq); |
1964 | } |
1965 | |
1966 | return !npend; |
1967 | } |
1968 | |
1969 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
1970 | { |
1971 | ath9k_hw_releasetxqueue(ah: sc->sc_ah, q: txq->axq_qnum); |
1972 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); |
1973 | } |
1974 | |
1975 | /* For each acq entry, for each tid, try to schedule packets |
1976 | * for transmit until ampdu_depth has reached min Q depth. |
1977 | */ |
1978 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) |
1979 | { |
1980 | struct ieee80211_hw *hw = sc->hw; |
1981 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
1982 | struct ieee80211_txq *queue; |
1983 | struct ath_atx_tid *tid; |
1984 | int ret; |
1985 | |
1986 | if (txq->mac80211_qnum < 0) |
1987 | return; |
1988 | |
1989 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
1990 | return; |
1991 | |
1992 | ieee80211_txq_schedule_start(hw, ac: txq->mac80211_qnum); |
1993 | spin_lock_bh(lock: &sc->chan_lock); |
1994 | rcu_read_lock(); |
1995 | |
1996 | if (sc->cur_chan->stopped) |
1997 | goto out; |
1998 | |
1999 | while ((queue = ieee80211_next_txq(hw, ac: txq->mac80211_qnum))) { |
2000 | bool force; |
2001 | |
2002 | tid = (struct ath_atx_tid *)queue->drv_priv; |
2003 | |
2004 | ret = ath_tx_sched_aggr(sc, txq, tid); |
2005 | ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n" , ret); |
2006 | |
2007 | force = !skb_queue_empty(list: &tid->retry_q); |
2008 | ieee80211_return_txq(hw, txq: queue, force); |
2009 | } |
2010 | |
2011 | out: |
2012 | rcu_read_unlock(); |
2013 | spin_unlock_bh(lock: &sc->chan_lock); |
2014 | ieee80211_txq_schedule_end(hw, ac: txq->mac80211_qnum); |
2015 | } |
2016 | |
2017 | void ath_txq_schedule_all(struct ath_softc *sc) |
2018 | { |
2019 | struct ath_txq *txq; |
2020 | int i; |
2021 | |
2022 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
2023 | txq = sc->tx.txq_map[i]; |
2024 | |
2025 | spin_lock_bh(lock: &txq->axq_lock); |
2026 | ath_txq_schedule(sc, txq); |
2027 | spin_unlock_bh(lock: &txq->axq_lock); |
2028 | } |
2029 | } |
2030 | |
2031 | /***********/ |
2032 | /* TX, DMA */ |
2033 | /***********/ |
2034 | |
2035 | /* |
2036 | * Insert a chain of ath_buf (descriptors) on a txq and |
2037 | * assume the descriptors are already chained together by caller. |
2038 | */ |
2039 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, |
2040 | struct list_head *head, bool internal) |
2041 | { |
2042 | struct ath_hw *ah = sc->sc_ah; |
2043 | struct ath_common *common = ath9k_hw_common(ah); |
2044 | struct ath_buf *bf, *bf_last; |
2045 | bool puttxbuf = false; |
2046 | bool edma; |
2047 | |
2048 | /* |
2049 | * Insert the frame on the outbound list and |
2050 | * pass it on to the hardware. |
2051 | */ |
2052 | |
2053 | if (list_empty(head)) |
2054 | return; |
2055 | |
2056 | edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); |
2057 | bf = list_first_entry(head, struct ath_buf, list); |
2058 | bf_last = list_entry(head->prev, struct ath_buf, list); |
2059 | |
2060 | ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n" , |
2061 | txq->axq_qnum, txq->axq_depth); |
2062 | |
2063 | if (edma && list_empty(head: &txq->txq_fifo[txq->txq_headidx])) { |
2064 | list_splice_tail_init(list: head, head: &txq->txq_fifo[txq->txq_headidx]); |
2065 | INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); |
2066 | puttxbuf = true; |
2067 | } else { |
2068 | list_splice_tail_init(list: head, head: &txq->axq_q); |
2069 | |
2070 | if (txq->axq_link) { |
2071 | ath9k_hw_set_desc_link(ah, ds: txq->axq_link, link: bf->bf_daddr); |
2072 | ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n" , |
2073 | txq->axq_qnum, txq->axq_link, |
2074 | ito64(bf->bf_daddr), bf->bf_desc); |
2075 | } else if (!edma) |
2076 | puttxbuf = true; |
2077 | |
2078 | txq->axq_link = bf_last->bf_desc; |
2079 | } |
2080 | |
2081 | if (puttxbuf) { |
2082 | TX_STAT_INC(sc, txq->axq_qnum, puttxbuf); |
2083 | ath9k_hw_puttxbuf(ah, q: txq->axq_qnum, txdp: bf->bf_daddr); |
2084 | ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n" , |
2085 | txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); |
2086 | } |
2087 | |
2088 | if (!edma || sc->tx99_state) { |
2089 | TX_STAT_INC(sc, txq->axq_qnum, txstart); |
2090 | ath9k_hw_txstart(ah, q: txq->axq_qnum); |
2091 | } |
2092 | |
2093 | if (!internal) { |
2094 | while (bf) { |
2095 | txq->axq_depth++; |
2096 | if (bf_is_ampdu_not_probing(bf)) |
2097 | txq->axq_ampdu_depth++; |
2098 | |
2099 | bf_last = bf->bf_lastbf; |
2100 | bf = bf_last->bf_next; |
2101 | bf_last->bf_next = NULL; |
2102 | } |
2103 | } |
2104 | } |
2105 | |
2106 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
2107 | struct ath_atx_tid *tid, struct sk_buff *skb) |
2108 | { |
2109 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2110 | struct ath_frame_info *fi = get_frame_info(skb); |
2111 | struct list_head bf_head; |
2112 | struct ath_buf *bf = fi->bf; |
2113 | |
2114 | INIT_LIST_HEAD(list: &bf_head); |
2115 | list_add_tail(new: &bf->list, head: &bf_head); |
2116 | bf->bf_state.bf_type = 0; |
2117 | if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { |
2118 | bf->bf_state.bf_type = BUF_AMPDU; |
2119 | ath_tx_addto_baw(sc, tid, bf); |
2120 | } |
2121 | |
2122 | bf->bf_next = NULL; |
2123 | bf->bf_lastbf = bf; |
2124 | ath_tx_fill_desc(sc, bf, txq, len: fi->framelen); |
2125 | ath_tx_txqaddbuf(sc, txq, head: &bf_head, internal: false); |
2126 | TX_STAT_INC(sc, txq->axq_qnum, queued); |
2127 | } |
2128 | |
2129 | static void setup_frame_info(struct ieee80211_hw *hw, |
2130 | struct ieee80211_sta *sta, |
2131 | struct sk_buff *skb, |
2132 | int framelen) |
2133 | { |
2134 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2135 | struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; |
2136 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
2137 | const struct ieee80211_rate *rate; |
2138 | struct ath_frame_info *fi = get_frame_info(skb); |
2139 | struct ath_node *an = NULL; |
2140 | enum ath9k_key_type keytype; |
2141 | bool short_preamble = false; |
2142 | u8 txpower; |
2143 | |
2144 | /* |
2145 | * We check if Short Preamble is needed for the CTS rate by |
2146 | * checking the BSS's global flag. |
2147 | * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. |
2148 | */ |
2149 | if (tx_info->control.vif && |
2150 | tx_info->control.vif->bss_conf.use_short_preamble) |
2151 | short_preamble = true; |
2152 | |
2153 | rate = ieee80211_get_rts_cts_rate(hw, c: tx_info); |
2154 | keytype = ath9k_cmn_get_hw_crypto_keytype(skb); |
2155 | |
2156 | if (sta) |
2157 | an = (struct ath_node *) sta->drv_priv; |
2158 | |
2159 | if (tx_info->control.vif) { |
2160 | struct ieee80211_vif *vif = tx_info->control.vif; |
2161 | if (vif->bss_conf.txpower == INT_MIN) |
2162 | goto nonvifpower; |
2163 | txpower = 2 * vif->bss_conf.txpower; |
2164 | } else { |
2165 | struct ath_softc *sc; |
2166 | nonvifpower: |
2167 | sc = hw->priv; |
2168 | |
2169 | txpower = sc->cur_chan->cur_txpower; |
2170 | } |
2171 | |
2172 | memset(fi, 0, sizeof(*fi)); |
2173 | fi->txq = -1; |
2174 | if (hw_key) |
2175 | fi->keyix = hw_key->hw_key_idx; |
2176 | else if (an && ieee80211_is_data(fc: hdr->frame_control) && an->ps_key > 0) |
2177 | fi->keyix = an->ps_key; |
2178 | else |
2179 | fi->keyix = ATH9K_TXKEYIX_INVALID; |
2180 | fi->dyn_smps = sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC; |
2181 | fi->keytype = keytype; |
2182 | fi->framelen = framelen; |
2183 | fi->tx_power = txpower; |
2184 | |
2185 | if (!rate) |
2186 | return; |
2187 | fi->rtscts_rate = rate->hw_value; |
2188 | if (short_preamble) |
2189 | fi->rtscts_rate |= rate->hw_value_short; |
2190 | } |
2191 | |
2192 | u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) |
2193 | { |
2194 | struct ath_hw *ah = sc->sc_ah; |
2195 | struct ath9k_channel *curchan = ah->curchan; |
2196 | |
2197 | if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) && |
2198 | (chainmask == 0x7) && (rate < 0x90)) |
2199 | return 0x3; |
2200 | else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && |
2201 | IS_CCK_RATE(rate)) |
2202 | return 0x2; |
2203 | else |
2204 | return chainmask; |
2205 | } |
2206 | |
2207 | /* |
2208 | * Assign a descriptor (and sequence number if necessary, |
2209 | * and map buffer for DMA. Frees skb on error |
2210 | */ |
2211 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, |
2212 | struct ath_txq *txq, |
2213 | struct ath_atx_tid *tid, |
2214 | struct sk_buff *skb) |
2215 | { |
2216 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
2217 | struct ath_frame_info *fi = get_frame_info(skb); |
2218 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
2219 | struct ath_buf *bf; |
2220 | int fragno; |
2221 | u16 seqno; |
2222 | |
2223 | bf = ath_tx_get_buffer(sc); |
2224 | if (!bf) { |
2225 | ath_dbg(common, XMIT, "TX buffers are full\n" ); |
2226 | return NULL; |
2227 | } |
2228 | |
2229 | ATH_TXBUF_RESET(bf); |
2230 | |
2231 | if (tid && ieee80211_is_data_present(fc: hdr->frame_control)) { |
2232 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
2233 | seqno = tid->seq_next; |
2234 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
2235 | |
2236 | if (fragno) |
2237 | hdr->seq_ctrl |= cpu_to_le16(fragno); |
2238 | |
2239 | if (!ieee80211_has_morefrags(fc: hdr->frame_control)) |
2240 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); |
2241 | |
2242 | bf->bf_state.seqno = seqno; |
2243 | } |
2244 | |
2245 | bf->bf_mpdu = skb; |
2246 | |
2247 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
2248 | skb->len, DMA_TO_DEVICE); |
2249 | if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { |
2250 | bf->bf_mpdu = NULL; |
2251 | bf->bf_buf_addr = 0; |
2252 | ath_err(ath9k_hw_common(sc->sc_ah), |
2253 | "dma_mapping_error() on TX\n" ); |
2254 | ath_tx_return_buffer(sc, bf); |
2255 | return NULL; |
2256 | } |
2257 | |
2258 | fi->bf = bf; |
2259 | |
2260 | return bf; |
2261 | } |
2262 | |
2263 | void ath_assign_seq(struct ath_common *common, struct sk_buff *skb) |
2264 | { |
2265 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2266 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2267 | struct ieee80211_vif *vif = info->control.vif; |
2268 | struct ath_vif *avp; |
2269 | |
2270 | if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
2271 | return; |
2272 | |
2273 | if (!vif) |
2274 | return; |
2275 | |
2276 | avp = (struct ath_vif *)vif->drv_priv; |
2277 | |
2278 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) |
2279 | avp->seq_no += 0x10; |
2280 | |
2281 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
2282 | hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); |
2283 | } |
2284 | |
2285 | static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, |
2286 | struct ath_tx_control *txctl) |
2287 | { |
2288 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2289 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2290 | struct ieee80211_sta *sta = txctl->sta; |
2291 | struct ieee80211_vif *vif = info->control.vif; |
2292 | struct ath_vif *avp; |
2293 | struct ath_softc *sc = hw->priv; |
2294 | int frmlen = skb->len + FCS_LEN; |
2295 | int padpos, padsize; |
2296 | |
2297 | /* NOTE: sta can be NULL according to net/mac80211.h */ |
2298 | if (sta) |
2299 | txctl->an = (struct ath_node *)sta->drv_priv; |
2300 | else if (vif && ieee80211_is_data(fc: hdr->frame_control)) { |
2301 | avp = (void *)vif->drv_priv; |
2302 | txctl->an = &avp->mcast_node; |
2303 | } |
2304 | |
2305 | if (info->control.hw_key) |
2306 | frmlen += info->control.hw_key->icv_len; |
2307 | |
2308 | ath_assign_seq(common: ath9k_hw_common(ah: sc->sc_ah), skb); |
2309 | |
2310 | if ((vif && vif->type != NL80211_IFTYPE_AP && |
2311 | vif->type != NL80211_IFTYPE_AP_VLAN) || |
2312 | !ieee80211_is_data(fc: hdr->frame_control)) |
2313 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
2314 | |
2315 | /* Add the padding after the header if this is not already done */ |
2316 | padpos = ieee80211_hdrlen(fc: hdr->frame_control); |
2317 | padsize = padpos & 3; |
2318 | if (padsize && skb->len > padpos) { |
2319 | if (skb_headroom(skb) < padsize) |
2320 | return -ENOMEM; |
2321 | |
2322 | skb_push(skb, len: padsize); |
2323 | memmove(skb->data, skb->data + padsize, padpos); |
2324 | } |
2325 | |
2326 | setup_frame_info(hw, sta, skb, framelen: frmlen); |
2327 | return 0; |
2328 | } |
2329 | |
2330 | |
2331 | /* Upon failure caller should free skb */ |
2332 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, |
2333 | struct ath_tx_control *txctl) |
2334 | { |
2335 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2336 | struct ieee80211_sta *sta = txctl->sta; |
2337 | struct ieee80211_vif *vif = info->control.vif; |
2338 | struct ath_frame_info *fi = get_frame_info(skb); |
2339 | struct ath_softc *sc = hw->priv; |
2340 | struct ath_txq *txq = txctl->txq; |
2341 | struct ath_atx_tid *tid = NULL; |
2342 | struct ath_node *an = NULL; |
2343 | struct ath_buf *bf; |
2344 | bool ps_resp; |
2345 | int q, ret; |
2346 | |
2347 | ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE); |
2348 | |
2349 | ret = ath_tx_prepare(hw, skb, txctl); |
2350 | if (ret) |
2351 | return ret; |
2352 | |
2353 | /* |
2354 | * At this point, the vif, hw_key and sta pointers in the tx control |
2355 | * info are no longer valid (overwritten by the ath_frame_info data. |
2356 | */ |
2357 | |
2358 | q = skb_get_queue_mapping(skb); |
2359 | |
2360 | if (ps_resp) |
2361 | txq = sc->tx.uapsdq; |
2362 | |
2363 | if (txctl->sta) { |
2364 | an = (struct ath_node *) sta->drv_priv; |
2365 | tid = ath_get_skb_tid(sc, an, skb); |
2366 | } |
2367 | |
2368 | ath_txq_lock(sc, txq); |
2369 | if (txq == sc->tx.txq_map[q]) { |
2370 | fi->txq = q; |
2371 | ++txq->pending_frames; |
2372 | } |
2373 | |
2374 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
2375 | if (!bf) { |
2376 | ath_txq_skb_done(sc, txq, skb); |
2377 | if (txctl->paprd) |
2378 | dev_kfree_skb_any(skb); |
2379 | else |
2380 | ieee80211_free_txskb(hw: sc->hw, skb); |
2381 | goto out; |
2382 | } |
2383 | |
2384 | bf->bf_state.bfs_paprd = txctl->paprd; |
2385 | |
2386 | if (txctl->paprd) |
2387 | bf->bf_state.bfs_paprd_timestamp = jiffies; |
2388 | |
2389 | ath_set_rates(vif, sta, bf); |
2390 | ath_tx_send_normal(sc, txq, tid, skb); |
2391 | |
2392 | out: |
2393 | ath_txq_unlock(sc, txq); |
2394 | |
2395 | return 0; |
2396 | } |
2397 | |
2398 | void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
2399 | struct sk_buff *skb) |
2400 | { |
2401 | struct ath_softc *sc = hw->priv; |
2402 | struct ath_tx_control txctl = { |
2403 | .txq = sc->beacon.cabq |
2404 | }; |
2405 | struct ath_tx_info info = {}; |
2406 | struct ath_buf *bf_tail = NULL; |
2407 | struct ath_buf *bf; |
2408 | LIST_HEAD(bf_q); |
2409 | int duration = 0; |
2410 | int max_duration; |
2411 | |
2412 | max_duration = |
2413 | sc->cur_chan->beacon.beacon_interval * 1000 * |
2414 | sc->cur_chan->beacon.dtim_period / ATH_BCBUF; |
2415 | |
2416 | do { |
2417 | struct ath_frame_info *fi = get_frame_info(skb); |
2418 | |
2419 | if (ath_tx_prepare(hw, skb, txctl: &txctl)) |
2420 | break; |
2421 | |
2422 | bf = ath_tx_setup_buffer(sc, txq: txctl.txq, NULL, skb); |
2423 | if (!bf) |
2424 | break; |
2425 | |
2426 | bf->bf_lastbf = bf; |
2427 | ath_set_rates(vif, NULL, bf); |
2428 | ath_buf_set_rate(sc, bf, info: &info, len: fi->framelen, rts: false); |
2429 | duration += info.rates[0].PktDuration; |
2430 | if (bf_tail) |
2431 | bf_tail->bf_next = bf; |
2432 | |
2433 | list_add_tail(new: &bf->list, head: &bf_q); |
2434 | bf_tail = bf; |
2435 | skb = NULL; |
2436 | |
2437 | if (duration > max_duration) |
2438 | break; |
2439 | |
2440 | skb = ieee80211_get_buffered_bc(hw, vif); |
2441 | } while(skb); |
2442 | |
2443 | if (skb) |
2444 | ieee80211_free_txskb(hw, skb); |
2445 | |
2446 | if (list_empty(head: &bf_q)) |
2447 | return; |
2448 | |
2449 | bf = list_last_entry(&bf_q, struct ath_buf, list); |
2450 | ath9k_set_moredata(sc, bf, val: false); |
2451 | |
2452 | bf = list_first_entry(&bf_q, struct ath_buf, list); |
2453 | ath_txq_lock(sc, txq: txctl.txq); |
2454 | ath_tx_fill_desc(sc, bf, txq: txctl.txq, len: 0); |
2455 | ath_tx_txqaddbuf(sc, txq: txctl.txq, head: &bf_q, internal: false); |
2456 | TX_STAT_INC(sc, txctl.txq->axq_qnum, queued); |
2457 | ath_txq_unlock(sc, txq: txctl.txq); |
2458 | } |
2459 | |
2460 | /*****************/ |
2461 | /* TX Completion */ |
2462 | /*****************/ |
2463 | |
2464 | static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, |
2465 | int tx_flags, struct ath_txq *txq, |
2466 | struct ieee80211_sta *sta) |
2467 | { |
2468 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2469 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
2470 | struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; |
2471 | int padpos, padsize; |
2472 | unsigned long flags; |
2473 | |
2474 | ath_dbg(common, XMIT, "TX complete: skb: %p\n" , skb); |
2475 | |
2476 | if (sc->sc_ah->caldata) |
2477 | set_bit(nr: PAPRD_PACKET_SENT, addr: &sc->sc_ah->caldata->cal_flags); |
2478 | |
2479 | if (!(tx_flags & ATH_TX_ERROR)) { |
2480 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) |
2481 | tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; |
2482 | else |
2483 | tx_info->flags |= IEEE80211_TX_STAT_ACK; |
2484 | } |
2485 | |
2486 | if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { |
2487 | padpos = ieee80211_hdrlen(fc: hdr->frame_control); |
2488 | padsize = padpos & 3; |
2489 | if (padsize && skb->len>padpos+padsize) { |
2490 | /* |
2491 | * Remove MAC header padding before giving the frame back to |
2492 | * mac80211. |
2493 | */ |
2494 | memmove(skb->data + padsize, skb->data, padpos); |
2495 | skb_pull(skb, len: padsize); |
2496 | } |
2497 | } |
2498 | |
2499 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
2500 | if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { |
2501 | sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; |
2502 | ath_dbg(common, PS, |
2503 | "Going back to sleep after having received TX status (0x%lx)\n" , |
2504 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
2505 | PS_WAIT_FOR_CAB | |
2506 | PS_WAIT_FOR_PSPOLL_DATA | |
2507 | PS_WAIT_FOR_TX_ACK)); |
2508 | } |
2509 | spin_unlock_irqrestore(lock: &sc->sc_pm_lock, flags); |
2510 | |
2511 | ath_txq_skb_done(sc, txq, skb); |
2512 | tx_info->status.status_driver_data[0] = sta; |
2513 | __skb_queue_tail(list: &txq->complete_q, newsk: skb); |
2514 | } |
2515 | |
2516 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
2517 | struct ath_txq *txq, struct list_head *bf_q, |
2518 | struct ieee80211_sta *sta, |
2519 | struct ath_tx_status *ts, int txok) |
2520 | { |
2521 | struct sk_buff *skb = bf->bf_mpdu; |
2522 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2523 | unsigned long flags; |
2524 | int tx_flags = 0; |
2525 | |
2526 | if (!txok) |
2527 | tx_flags |= ATH_TX_ERROR; |
2528 | |
2529 | if (ts->ts_status & ATH9K_TXERR_FILT) |
2530 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
2531 | |
2532 | dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); |
2533 | bf->bf_buf_addr = 0; |
2534 | if (sc->tx99_state) |
2535 | goto skip_tx_complete; |
2536 | |
2537 | if (bf->bf_state.bfs_paprd) { |
2538 | if (time_after(jiffies, |
2539 | bf->bf_state.bfs_paprd_timestamp + |
2540 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) |
2541 | dev_kfree_skb_any(skb); |
2542 | else |
2543 | complete(&sc->paprd_complete); |
2544 | } else { |
2545 | ath_debug_stat_tx(sc, bf, ts, txq, flags: tx_flags); |
2546 | ath_tx_complete(sc, skb, tx_flags, txq, sta); |
2547 | } |
2548 | skip_tx_complete: |
2549 | /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't |
2550 | * accidentally reference it later. |
2551 | */ |
2552 | bf->bf_mpdu = NULL; |
2553 | |
2554 | /* |
2555 | * Return the list of ath_buf of this mpdu to free queue |
2556 | */ |
2557 | spin_lock_irqsave(&sc->tx.txbuflock, flags); |
2558 | list_splice_tail_init(list: bf_q, head: &sc->tx.txbuf); |
2559 | spin_unlock_irqrestore(lock: &sc->tx.txbuflock, flags); |
2560 | } |
2561 | |
2562 | static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) |
2563 | { |
2564 | void *ptr = &tx_info->status; |
2565 | |
2566 | memset(ptr + sizeof(tx_info->status.rates), 0, |
2567 | sizeof(tx_info->status) - |
2568 | sizeof(tx_info->status.rates) - |
2569 | sizeof(tx_info->status.status_driver_data)); |
2570 | } |
2571 | |
2572 | static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, |
2573 | struct ath_tx_status *ts, int nframes, int nbad, |
2574 | int txok) |
2575 | { |
2576 | struct sk_buff *skb = bf->bf_mpdu; |
2577 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
2578 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2579 | struct ieee80211_hw *hw = sc->hw; |
2580 | struct ath_hw *ah = sc->sc_ah; |
2581 | u8 i, tx_rateindex; |
2582 | |
2583 | ath_clear_tx_status(tx_info); |
2584 | |
2585 | if (txok) |
2586 | tx_info->status.ack_signal = ts->ts_rssi; |
2587 | |
2588 | tx_rateindex = ts->ts_rateindex; |
2589 | WARN_ON(tx_rateindex >= hw->max_rates); |
2590 | |
2591 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
2592 | tx_info->flags |= IEEE80211_TX_STAT_AMPDU; |
2593 | |
2594 | BUG_ON(nbad > nframes); |
2595 | } |
2596 | tx_info->status.ampdu_len = nframes; |
2597 | tx_info->status.ampdu_ack_len = nframes - nbad; |
2598 | |
2599 | tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; |
2600 | |
2601 | for (i = tx_rateindex + 1; i < hw->max_rates; i++) { |
2602 | tx_info->status.rates[i].count = 0; |
2603 | tx_info->status.rates[i].idx = -1; |
2604 | } |
2605 | |
2606 | if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && |
2607 | (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { |
2608 | /* |
2609 | * If an underrun error is seen assume it as an excessive |
2610 | * retry only if max frame trigger level has been reached |
2611 | * (2 KB for single stream, and 4 KB for dual stream). |
2612 | * Adjust the long retry as if the frame was tried |
2613 | * hw->max_rate_tries times to affect how rate control updates |
2614 | * PER for the failed rate. |
2615 | * In case of congestion on the bus penalizing this type of |
2616 | * underruns should help hardware actually transmit new frames |
2617 | * successfully by eventually preferring slower rates. |
2618 | * This itself should also alleviate congestion on the bus. |
2619 | */ |
2620 | if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | |
2621 | ATH9K_TX_DELIM_UNDERRUN)) && |
2622 | ieee80211_is_data(fc: hdr->frame_control) && |
2623 | ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) |
2624 | tx_info->status.rates[tx_rateindex].count = |
2625 | hw->max_rate_tries; |
2626 | } |
2627 | } |
2628 | |
2629 | static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) |
2630 | { |
2631 | struct ath_hw *ah = sc->sc_ah; |
2632 | struct ath_common *common = ath9k_hw_common(ah); |
2633 | struct ath_buf *bf, *lastbf, *bf_held = NULL; |
2634 | struct list_head bf_head; |
2635 | struct ath_desc *ds; |
2636 | struct ath_tx_status ts; |
2637 | int status; |
2638 | |
2639 | ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n" , |
2640 | txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), |
2641 | txq->axq_link); |
2642 | |
2643 | ath_txq_lock(sc, txq); |
2644 | for (;;) { |
2645 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
2646 | break; |
2647 | |
2648 | if (list_empty(head: &txq->axq_q)) { |
2649 | txq->axq_link = NULL; |
2650 | ath_txq_schedule(sc, txq); |
2651 | break; |
2652 | } |
2653 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); |
2654 | |
2655 | /* |
2656 | * There is a race condition that a BH gets scheduled |
2657 | * after sw writes TxE and before hw re-load the last |
2658 | * descriptor to get the newly chained one. |
2659 | * Software must keep the last DONE descriptor as a |
2660 | * holding descriptor - software does so by marking |
2661 | * it with the STALE flag. |
2662 | */ |
2663 | bf_held = NULL; |
2664 | if (bf->bf_state.stale) { |
2665 | bf_held = bf; |
2666 | if (list_is_last(list: &bf_held->list, head: &txq->axq_q)) |
2667 | break; |
2668 | |
2669 | bf = list_entry(bf_held->list.next, struct ath_buf, |
2670 | list); |
2671 | } |
2672 | |
2673 | lastbf = bf->bf_lastbf; |
2674 | ds = lastbf->bf_desc; |
2675 | |
2676 | memset(&ts, 0, sizeof(ts)); |
2677 | status = ath9k_hw_txprocdesc(ah, ds, ts: &ts); |
2678 | if (status == -EINPROGRESS) |
2679 | break; |
2680 | |
2681 | TX_STAT_INC(sc, txq->axq_qnum, txprocdesc); |
2682 | |
2683 | /* |
2684 | * Remove ath_buf's of the same transmit unit from txq, |
2685 | * however leave the last descriptor back as the holding |
2686 | * descriptor for hw. |
2687 | */ |
2688 | lastbf->bf_state.stale = true; |
2689 | INIT_LIST_HEAD(list: &bf_head); |
2690 | if (!list_is_singular(head: &lastbf->list)) |
2691 | list_cut_position(list: &bf_head, |
2692 | head: &txq->axq_q, entry: lastbf->list.prev); |
2693 | |
2694 | if (bf_held) { |
2695 | list_del(entry: &bf_held->list); |
2696 | ath_tx_return_buffer(sc, bf: bf_held); |
2697 | } |
2698 | |
2699 | ath_tx_process_buffer(sc, txq, ts: &ts, bf, bf_head: &bf_head); |
2700 | } |
2701 | ath_txq_unlock_complete(sc, txq); |
2702 | } |
2703 | |
2704 | void ath_tx_tasklet(struct ath_softc *sc) |
2705 | { |
2706 | struct ath_hw *ah = sc->sc_ah; |
2707 | u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs; |
2708 | int i; |
2709 | |
2710 | rcu_read_lock(); |
2711 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
2712 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) |
2713 | ath_tx_processq(sc, txq: &sc->tx.txq[i]); |
2714 | } |
2715 | rcu_read_unlock(); |
2716 | } |
2717 | |
2718 | void ath_tx_edma_tasklet(struct ath_softc *sc) |
2719 | { |
2720 | struct ath_tx_status ts; |
2721 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
2722 | struct ath_hw *ah = sc->sc_ah; |
2723 | struct ath_txq *txq; |
2724 | struct ath_buf *bf, *lastbf; |
2725 | struct list_head bf_head; |
2726 | struct list_head *fifo_list; |
2727 | int status; |
2728 | |
2729 | rcu_read_lock(); |
2730 | for (;;) { |
2731 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
2732 | break; |
2733 | |
2734 | status = ath9k_hw_txprocdesc(ah, NULL, ts: (void *)&ts); |
2735 | if (status == -EINPROGRESS) |
2736 | break; |
2737 | if (status == -EIO) { |
2738 | ath_dbg(common, XMIT, "Error processing tx status\n" ); |
2739 | break; |
2740 | } |
2741 | |
2742 | /* Process beacon completions separately */ |
2743 | if (ts.qid == sc->beacon.beaconq) { |
2744 | sc->beacon.tx_processed = true; |
2745 | sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); |
2746 | |
2747 | if (ath9k_is_chanctx_enabled()) { |
2748 | ath_chanctx_event(sc, NULL, |
2749 | ev: ATH_CHANCTX_EVENT_BEACON_SENT); |
2750 | } |
2751 | |
2752 | ath9k_csa_update(sc); |
2753 | continue; |
2754 | } |
2755 | |
2756 | txq = &sc->tx.txq[ts.qid]; |
2757 | |
2758 | ath_txq_lock(sc, txq); |
2759 | |
2760 | TX_STAT_INC(sc, txq->axq_qnum, txprocdesc); |
2761 | |
2762 | fifo_list = &txq->txq_fifo[txq->txq_tailidx]; |
2763 | if (list_empty(head: fifo_list)) { |
2764 | ath_txq_unlock(sc, txq); |
2765 | break; |
2766 | } |
2767 | |
2768 | bf = list_first_entry(fifo_list, struct ath_buf, list); |
2769 | if (bf->bf_state.stale) { |
2770 | list_del(entry: &bf->list); |
2771 | ath_tx_return_buffer(sc, bf); |
2772 | bf = list_first_entry(fifo_list, struct ath_buf, list); |
2773 | } |
2774 | |
2775 | lastbf = bf->bf_lastbf; |
2776 | |
2777 | INIT_LIST_HEAD(list: &bf_head); |
2778 | if (list_is_last(list: &lastbf->list, head: fifo_list)) { |
2779 | list_splice_tail_init(list: fifo_list, head: &bf_head); |
2780 | INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); |
2781 | |
2782 | if (!list_empty(head: &txq->axq_q)) { |
2783 | struct list_head bf_q; |
2784 | |
2785 | INIT_LIST_HEAD(list: &bf_q); |
2786 | txq->axq_link = NULL; |
2787 | list_splice_tail_init(list: &txq->axq_q, head: &bf_q); |
2788 | ath_tx_txqaddbuf(sc, txq, head: &bf_q, internal: true); |
2789 | } |
2790 | } else { |
2791 | lastbf->bf_state.stale = true; |
2792 | if (bf != lastbf) |
2793 | list_cut_position(list: &bf_head, head: fifo_list, |
2794 | entry: lastbf->list.prev); |
2795 | } |
2796 | |
2797 | ath_tx_process_buffer(sc, txq, ts: &ts, bf, bf_head: &bf_head); |
2798 | ath_txq_unlock_complete(sc, txq); |
2799 | } |
2800 | rcu_read_unlock(); |
2801 | } |
2802 | |
2803 | /*****************/ |
2804 | /* Init, Cleanup */ |
2805 | /*****************/ |
2806 | |
2807 | static int ath_txstatus_setup(struct ath_softc *sc, int size) |
2808 | { |
2809 | struct ath_descdma *dd = &sc->txsdma; |
2810 | u8 txs_len = sc->sc_ah->caps.txs_len; |
2811 | |
2812 | dd->dd_desc_len = size * txs_len; |
2813 | dd->dd_desc = dmam_alloc_coherent(dev: sc->dev, size: dd->dd_desc_len, |
2814 | dma_handle: &dd->dd_desc_paddr, GFP_KERNEL); |
2815 | if (!dd->dd_desc) |
2816 | return -ENOMEM; |
2817 | |
2818 | return 0; |
2819 | } |
2820 | |
2821 | static int ath_tx_edma_init(struct ath_softc *sc) |
2822 | { |
2823 | int err; |
2824 | |
2825 | err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); |
2826 | if (!err) |
2827 | ath9k_hw_setup_statusring(ah: sc->sc_ah, ts_start: sc->txsdma.dd_desc, |
2828 | ts_paddr_start: sc->txsdma.dd_desc_paddr, |
2829 | ATH_TXSTATUS_RING_SIZE); |
2830 | |
2831 | return err; |
2832 | } |
2833 | |
2834 | int ath_tx_init(struct ath_softc *sc, int nbufs) |
2835 | { |
2836 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
2837 | int error = 0; |
2838 | |
2839 | spin_lock_init(&sc->tx.txbuflock); |
2840 | |
2841 | error = ath_descdma_setup(sc, dd: &sc->tx.txdma, head: &sc->tx.txbuf, |
2842 | name: "tx" , nbuf: nbufs, ndesc: 1, is_tx: 1); |
2843 | if (error != 0) { |
2844 | ath_err(common, |
2845 | "Failed to allocate tx descriptors: %d\n" , error); |
2846 | return error; |
2847 | } |
2848 | |
2849 | error = ath_descdma_setup(sc, dd: &sc->beacon.bdma, head: &sc->beacon.bbuf, |
2850 | name: "beacon" , ATH_BCBUF, ndesc: 1, is_tx: 1); |
2851 | if (error != 0) { |
2852 | ath_err(common, |
2853 | "Failed to allocate beacon descriptors: %d\n" , error); |
2854 | return error; |
2855 | } |
2856 | |
2857 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
2858 | error = ath_tx_edma_init(sc); |
2859 | |
2860 | return error; |
2861 | } |
2862 | |
2863 | void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) |
2864 | { |
2865 | struct ath_atx_tid *tid; |
2866 | int tidno, acno; |
2867 | |
2868 | for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { |
2869 | tid = ath_node_to_tid(an, tidno); |
2870 | tid->an = an; |
2871 | tid->tidno = tidno; |
2872 | tid->seq_start = tid->seq_next = 0; |
2873 | tid->baw_size = WME_MAX_BA; |
2874 | tid->baw_head = tid->baw_tail = 0; |
2875 | tid->active = false; |
2876 | tid->clear_ps_filter = true; |
2877 | __skb_queue_head_init(list: &tid->retry_q); |
2878 | INIT_LIST_HEAD(list: &tid->list); |
2879 | acno = TID_TO_WME_AC(tidno); |
2880 | tid->txq = sc->tx.txq_map[acno]; |
2881 | |
2882 | if (!an->sta) |
2883 | break; /* just one multicast ath_atx_tid */ |
2884 | } |
2885 | } |
2886 | |
2887 | void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) |
2888 | { |
2889 | struct ath_atx_tid *tid; |
2890 | struct ath_txq *txq; |
2891 | int tidno; |
2892 | |
2893 | rcu_read_lock(); |
2894 | |
2895 | for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { |
2896 | tid = ath_node_to_tid(an, tidno); |
2897 | txq = tid->txq; |
2898 | |
2899 | ath_txq_lock(sc, txq); |
2900 | |
2901 | if (!list_empty(head: &tid->list)) |
2902 | list_del_init(entry: &tid->list); |
2903 | |
2904 | ath_tid_drain(sc, txq, tid); |
2905 | tid->active = false; |
2906 | |
2907 | ath_txq_unlock(sc, txq); |
2908 | |
2909 | if (!an->sta) |
2910 | break; /* just one multicast ath_atx_tid */ |
2911 | } |
2912 | |
2913 | rcu_read_unlock(); |
2914 | } |
2915 | |
2916 | #ifdef CONFIG_ATH9K_TX99 |
2917 | |
2918 | int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb, |
2919 | struct ath_tx_control *txctl) |
2920 | { |
2921 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2922 | struct ath_frame_info *fi = get_frame_info(skb); |
2923 | struct ath_common *common = ath9k_hw_common(ah: sc->sc_ah); |
2924 | struct ath_buf *bf; |
2925 | int padpos, padsize; |
2926 | |
2927 | padpos = ieee80211_hdrlen(fc: hdr->frame_control); |
2928 | padsize = padpos & 3; |
2929 | |
2930 | if (padsize && skb->len > padpos) { |
2931 | if (skb_headroom(skb) < padsize) { |
2932 | ath_dbg(common, XMIT, |
2933 | "tx99 padding failed\n" ); |
2934 | return -EINVAL; |
2935 | } |
2936 | |
2937 | skb_push(skb, len: padsize); |
2938 | memmove(skb->data, skb->data + padsize, padpos); |
2939 | } |
2940 | |
2941 | fi->keyix = ATH9K_TXKEYIX_INVALID; |
2942 | fi->framelen = skb->len + FCS_LEN; |
2943 | fi->keytype = ATH9K_KEY_TYPE_CLEAR; |
2944 | |
2945 | bf = ath_tx_setup_buffer(sc, txq: txctl->txq, NULL, skb); |
2946 | if (!bf) { |
2947 | ath_dbg(common, XMIT, "tx99 buffer setup failed\n" ); |
2948 | return -EINVAL; |
2949 | } |
2950 | |
2951 | ath_set_rates(vif: sc->tx99_vif, NULL, bf); |
2952 | |
2953 | ath9k_hw_set_desc_link(ah: sc->sc_ah, ds: bf->bf_desc, link: bf->bf_daddr); |
2954 | ath9k_hw_tx99_start(ah: sc->sc_ah, qnum: txctl->txq->axq_qnum); |
2955 | |
2956 | ath_tx_send_normal(sc, txq: txctl->txq, NULL, skb); |
2957 | |
2958 | return 0; |
2959 | } |
2960 | |
2961 | #endif /* CONFIG_ATH9K_TX99 */ |
2962 | |