1 | // SPDX-License-Identifier: ISC |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
4 | */ |
5 | |
6 | #include "mt76.h" |
7 | |
8 | static int |
9 | mt76_txq_get_qid(struct ieee80211_txq *txq) |
10 | { |
11 | if (!txq->sta) |
12 | return MT_TXQ_BE; |
13 | |
14 | return txq->ac; |
15 | } |
16 | |
17 | void |
18 | mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) |
19 | { |
20 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
21 | struct ieee80211_txq *txq; |
22 | struct mt76_txq *mtxq; |
23 | u8 tid; |
24 | |
25 | if (!sta || !ieee80211_is_data_qos(fc: hdr->frame_control) || |
26 | !ieee80211_is_data_present(fc: hdr->frame_control)) |
27 | return; |
28 | |
29 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
30 | txq = sta->txq[tid]; |
31 | mtxq = (struct mt76_txq *)txq->drv_priv; |
32 | if (!mtxq->aggr) |
33 | return; |
34 | |
35 | mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; |
36 | } |
37 | EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); |
38 | |
39 | void |
40 | mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) |
41 | __acquires(&dev->status_lock) |
42 | { |
43 | __skb_queue_head_init(list); |
44 | spin_lock_bh(lock: &dev->status_lock); |
45 | } |
46 | EXPORT_SYMBOL_GPL(mt76_tx_status_lock); |
47 | |
48 | void |
49 | mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) |
50 | __releases(&dev->status_lock) |
51 | { |
52 | struct ieee80211_hw *hw; |
53 | struct sk_buff *skb; |
54 | |
55 | spin_unlock_bh(lock: &dev->status_lock); |
56 | |
57 | rcu_read_lock(); |
58 | while ((skb = __skb_dequeue(list)) != NULL) { |
59 | struct ieee80211_tx_status status = { |
60 | .skb = skb, |
61 | .info = IEEE80211_SKB_CB(skb), |
62 | }; |
63 | struct ieee80211_rate_status rs = {}; |
64 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
65 | struct mt76_wcid *wcid; |
66 | |
67 | wcid = rcu_dereference(dev->wcid[cb->wcid]); |
68 | if (wcid) { |
69 | status.sta = wcid_to_sta(wcid); |
70 | if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { |
71 | rs.rate_idx = wcid->rate; |
72 | status.rates = &rs; |
73 | status.n_rates = 1; |
74 | } else { |
75 | status.n_rates = 0; |
76 | } |
77 | } |
78 | |
79 | hw = mt76_tx_status_get_hw(dev, skb); |
80 | spin_lock_bh(lock: &dev->rx_lock); |
81 | ieee80211_tx_status_ext(hw, status: &status); |
82 | spin_unlock_bh(lock: &dev->rx_lock); |
83 | } |
84 | rcu_read_unlock(); |
85 | } |
86 | EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); |
87 | |
88 | static void |
89 | __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, |
90 | struct sk_buff_head *list) |
91 | { |
92 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
93 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
94 | u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; |
95 | |
96 | flags |= cb->flags; |
97 | cb->flags = flags; |
98 | |
99 | if ((flags & done) != done) |
100 | return; |
101 | |
102 | /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ |
103 | if (flags & MT_TX_CB_TXS_FAILED) { |
104 | info->status.rates[0].count = 0; |
105 | info->status.rates[0].idx = -1; |
106 | info->flags |= IEEE80211_TX_STAT_ACK; |
107 | } |
108 | |
109 | __skb_queue_tail(list, newsk: skb); |
110 | } |
111 | |
112 | void |
113 | mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, |
114 | struct sk_buff_head *list) |
115 | { |
116 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); |
117 | } |
118 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); |
119 | |
120 | int |
121 | mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, |
122 | struct sk_buff *skb) |
123 | { |
124 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
125 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
126 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
127 | int pid; |
128 | |
129 | memset(cb, 0, sizeof(*cb)); |
130 | |
131 | if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) |
132 | return MT_PACKET_ID_NO_ACK; |
133 | |
134 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
135 | return MT_PACKET_ID_NO_ACK; |
136 | |
137 | if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | |
138 | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { |
139 | if (mtk_wed_device_active(&dev->mmio.wed) && |
140 | ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || |
141 | ieee80211_is_data(fc: hdr->frame_control))) |
142 | return MT_PACKET_ID_WED; |
143 | |
144 | return MT_PACKET_ID_NO_SKB; |
145 | } |
146 | |
147 | spin_lock_bh(lock: &dev->status_lock); |
148 | |
149 | pid = idr_alloc(&wcid->pktid, ptr: skb, MT_PACKET_ID_FIRST, |
150 | MT_PACKET_ID_MASK, GFP_ATOMIC); |
151 | if (pid < 0) { |
152 | pid = MT_PACKET_ID_NO_SKB; |
153 | goto out; |
154 | } |
155 | |
156 | cb->wcid = wcid->idx; |
157 | cb->pktid = pid; |
158 | |
159 | if (list_empty(head: &wcid->list)) |
160 | list_add_tail(new: &wcid->list, head: &dev->wcid_list); |
161 | |
162 | out: |
163 | spin_unlock_bh(lock: &dev->status_lock); |
164 | |
165 | return pid; |
166 | } |
167 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); |
168 | |
169 | struct sk_buff * |
170 | mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, |
171 | struct sk_buff_head *list) |
172 | { |
173 | struct sk_buff *skb; |
174 | int id; |
175 | |
176 | lockdep_assert_held(&dev->status_lock); |
177 | |
178 | skb = idr_remove(&wcid->pktid, id: pktid); |
179 | if (skb) |
180 | goto out; |
181 | |
182 | /* look for stale entries in the wcid idr queue */ |
183 | idr_for_each_entry(&wcid->pktid, skb, id) { |
184 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
185 | |
186 | if (pktid >= 0) { |
187 | if (!(cb->flags & MT_TX_CB_DMA_DONE)) |
188 | continue; |
189 | |
190 | if (time_is_after_jiffies(cb->jiffies + |
191 | MT_TX_STATUS_SKB_TIMEOUT)) |
192 | continue; |
193 | } |
194 | |
195 | /* It has been too long since DMA_DONE, time out this packet |
196 | * and stop waiting for TXS callback. |
197 | */ |
198 | idr_remove(&wcid->pktid, id: cb->pktid); |
199 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | |
200 | MT_TX_CB_TXS_DONE, list); |
201 | } |
202 | |
203 | out: |
204 | if (idr_is_empty(idr: &wcid->pktid)) |
205 | list_del_init(entry: &wcid->list); |
206 | |
207 | return skb; |
208 | } |
209 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); |
210 | |
211 | void |
212 | mt76_tx_status_check(struct mt76_dev *dev, bool flush) |
213 | { |
214 | struct mt76_wcid *wcid, *tmp; |
215 | struct sk_buff_head list; |
216 | |
217 | mt76_tx_status_lock(dev, &list); |
218 | list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) |
219 | mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); |
220 | mt76_tx_status_unlock(dev, &list); |
221 | } |
222 | EXPORT_SYMBOL_GPL(mt76_tx_status_check); |
223 | |
224 | static void |
225 | mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, |
226 | struct sk_buff *skb) |
227 | { |
228 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
229 | int pending; |
230 | |
231 | if (!wcid || info->tx_time_est) |
232 | return; |
233 | |
234 | pending = atomic_dec_return(v: &wcid->non_aql_packets); |
235 | if (pending < 0) |
236 | atomic_cmpxchg(v: &wcid->non_aql_packets, old: pending, new: 0); |
237 | } |
238 | |
239 | void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, |
240 | struct list_head *free_list) |
241 | { |
242 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
243 | struct ieee80211_tx_status status = { |
244 | .skb = skb, |
245 | .free_list = free_list, |
246 | }; |
247 | struct mt76_wcid *wcid = NULL; |
248 | struct ieee80211_hw *hw; |
249 | struct sk_buff_head list; |
250 | |
251 | rcu_read_lock(); |
252 | |
253 | if (wcid_idx < ARRAY_SIZE(dev->wcid)) |
254 | wcid = rcu_dereference(dev->wcid[wcid_idx]); |
255 | |
256 | mt76_tx_check_non_aql(dev, wcid, skb); |
257 | |
258 | #ifdef CONFIG_NL80211_TESTMODE |
259 | if (mt76_is_testmode_skb(dev, skb, hw: &hw)) { |
260 | struct mt76_phy *phy = hw->priv; |
261 | |
262 | if (skb == phy->test.tx_skb) |
263 | phy->test.tx_done++; |
264 | if (phy->test.tx_queued == phy->test.tx_done) |
265 | wake_up(&dev->tx_wait); |
266 | |
267 | dev_kfree_skb_any(skb); |
268 | goto out; |
269 | } |
270 | #endif |
271 | |
272 | if (cb->pktid < MT_PACKET_ID_FIRST) { |
273 | struct ieee80211_rate_status rs = {}; |
274 | |
275 | hw = mt76_tx_status_get_hw(dev, skb); |
276 | status.sta = wcid_to_sta(wcid); |
277 | if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { |
278 | rs.rate_idx = wcid->rate; |
279 | status.rates = &rs; |
280 | status.n_rates = 1; |
281 | } |
282 | spin_lock_bh(lock: &dev->rx_lock); |
283 | ieee80211_tx_status_ext(hw, status: &status); |
284 | spin_unlock_bh(lock: &dev->rx_lock); |
285 | goto out; |
286 | } |
287 | |
288 | mt76_tx_status_lock(dev, &list); |
289 | cb->jiffies = jiffies; |
290 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, list: &list); |
291 | mt76_tx_status_unlock(dev, &list); |
292 | |
293 | out: |
294 | rcu_read_unlock(); |
295 | } |
296 | EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); |
297 | |
298 | static int |
299 | __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, |
300 | struct mt76_wcid *wcid, struct ieee80211_sta *sta, |
301 | bool *stop) |
302 | { |
303 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
304 | struct mt76_queue *q = phy->q_tx[qid]; |
305 | struct mt76_dev *dev = phy->dev; |
306 | bool non_aql; |
307 | int pending; |
308 | int idx; |
309 | |
310 | non_aql = !info->tx_time_est; |
311 | idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); |
312 | if (idx < 0 || !sta) |
313 | return idx; |
314 | |
315 | wcid = (struct mt76_wcid *)sta->drv_priv; |
316 | q->entry[idx].wcid = wcid->idx; |
317 | |
318 | if (!non_aql) |
319 | return idx; |
320 | |
321 | pending = atomic_inc_return(v: &wcid->non_aql_packets); |
322 | if (stop && pending >= MT_MAX_NON_AQL_PKT) |
323 | *stop = true; |
324 | |
325 | return idx; |
326 | } |
327 | |
328 | void |
329 | mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, |
330 | struct mt76_wcid *wcid, struct sk_buff *skb) |
331 | { |
332 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
333 | |
334 | if (mt76_testmode_enabled(phy)) { |
335 | ieee80211_free_txskb(hw: phy->hw, skb); |
336 | return; |
337 | } |
338 | |
339 | if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) |
340 | skb_set_queue_mapping(skb, queue_mapping: MT_TXQ_BE); |
341 | |
342 | if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
343 | ieee80211_get_tx_rates(vif: info->control.vif, sta, skb, |
344 | dest: info->control.rates, max_rates: 1); |
345 | |
346 | info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); |
347 | |
348 | spin_lock_bh(lock: &wcid->tx_pending.lock); |
349 | __skb_queue_tail(list: &wcid->tx_pending, newsk: skb); |
350 | spin_unlock_bh(lock: &wcid->tx_pending.lock); |
351 | |
352 | spin_lock_bh(lock: &phy->tx_lock); |
353 | if (list_empty(head: &wcid->tx_list)) |
354 | list_add_tail(new: &wcid->tx_list, head: &phy->tx_list); |
355 | spin_unlock_bh(lock: &phy->tx_lock); |
356 | |
357 | mt76_worker_schedule(w: &phy->dev->tx_worker); |
358 | } |
359 | EXPORT_SYMBOL_GPL(mt76_tx); |
360 | |
361 | static struct sk_buff * |
362 | mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) |
363 | { |
364 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
365 | struct ieee80211_tx_info *info; |
366 | struct sk_buff *skb; |
367 | |
368 | skb = ieee80211_tx_dequeue(hw: phy->hw, txq); |
369 | if (!skb) |
370 | return NULL; |
371 | |
372 | info = IEEE80211_SKB_CB(skb); |
373 | info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); |
374 | |
375 | return skb; |
376 | } |
377 | |
378 | static void |
379 | mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, |
380 | struct sk_buff *skb, bool last) |
381 | { |
382 | struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; |
383 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
384 | |
385 | info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; |
386 | if (last) |
387 | info->flags |= IEEE80211_TX_STATUS_EOSP | |
388 | IEEE80211_TX_CTL_REQ_TX_STATUS; |
389 | |
390 | mt76_skb_set_moredata(skb, enable: !last); |
391 | __mt76_tx_queue_skb(phy, qid: MT_TXQ_PSD, skb, wcid, sta, NULL); |
392 | } |
393 | |
394 | void |
395 | mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, |
396 | u16 tids, int nframes, |
397 | enum ieee80211_frame_release_type reason, |
398 | bool more_data) |
399 | { |
400 | struct mt76_phy *phy = hw->priv; |
401 | struct mt76_dev *dev = phy->dev; |
402 | struct sk_buff *last_skb = NULL; |
403 | struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; |
404 | int i; |
405 | |
406 | spin_lock_bh(lock: &hwq->lock); |
407 | for (i = 0; tids && nframes; i++, tids >>= 1) { |
408 | struct ieee80211_txq *txq = sta->txq[i]; |
409 | struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; |
410 | struct sk_buff *skb; |
411 | |
412 | if (!(tids & 1)) |
413 | continue; |
414 | |
415 | do { |
416 | skb = mt76_txq_dequeue(phy, mtxq); |
417 | if (!skb) |
418 | break; |
419 | |
420 | nframes--; |
421 | if (last_skb) |
422 | mt76_queue_ps_skb(phy, sta, skb: last_skb, last: false); |
423 | |
424 | last_skb = skb; |
425 | } while (nframes); |
426 | } |
427 | |
428 | if (last_skb) { |
429 | mt76_queue_ps_skb(phy, sta, skb: last_skb, last: true); |
430 | dev->queue_ops->kick(dev, hwq); |
431 | } else { |
432 | ieee80211_sta_eosp(pubsta: sta); |
433 | } |
434 | |
435 | spin_unlock_bh(lock: &hwq->lock); |
436 | } |
437 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); |
438 | |
439 | static bool |
440 | mt76_txq_stopped(struct mt76_queue *q) |
441 | { |
442 | return q->stopped || q->blocked || |
443 | q->queued + MT_TXQ_FREE_THR >= q->ndesc; |
444 | } |
445 | |
446 | static int |
447 | mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, |
448 | struct mt76_txq *mtxq, struct mt76_wcid *wcid) |
449 | { |
450 | struct mt76_dev *dev = phy->dev; |
451 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
452 | enum mt76_txq_id qid = mt76_txq_get_qid(txq); |
453 | struct ieee80211_tx_info *info; |
454 | struct sk_buff *skb; |
455 | int n_frames = 1; |
456 | bool stop = false; |
457 | int idx; |
458 | |
459 | if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) |
460 | return 0; |
461 | |
462 | if (atomic_read(v: &wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) |
463 | return 0; |
464 | |
465 | skb = mt76_txq_dequeue(phy, mtxq); |
466 | if (!skb) |
467 | return 0; |
468 | |
469 | info = IEEE80211_SKB_CB(skb); |
470 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
471 | ieee80211_get_tx_rates(vif: txq->vif, sta: txq->sta, skb, |
472 | dest: info->control.rates, max_rates: 1); |
473 | |
474 | spin_lock(lock: &q->lock); |
475 | idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, sta: txq->sta, stop: &stop); |
476 | spin_unlock(lock: &q->lock); |
477 | if (idx < 0) |
478 | return idx; |
479 | |
480 | do { |
481 | if (test_bit(MT76_RESET, &phy->state)) |
482 | return -EBUSY; |
483 | |
484 | if (stop || mt76_txq_stopped(q)) |
485 | break; |
486 | |
487 | skb = mt76_txq_dequeue(phy, mtxq); |
488 | if (!skb) |
489 | break; |
490 | |
491 | info = IEEE80211_SKB_CB(skb); |
492 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
493 | ieee80211_get_tx_rates(vif: txq->vif, sta: txq->sta, skb, |
494 | dest: info->control.rates, max_rates: 1); |
495 | |
496 | spin_lock(lock: &q->lock); |
497 | idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, sta: txq->sta, stop: &stop); |
498 | spin_unlock(lock: &q->lock); |
499 | if (idx < 0) |
500 | break; |
501 | |
502 | n_frames++; |
503 | } while (1); |
504 | |
505 | spin_lock(lock: &q->lock); |
506 | dev->queue_ops->kick(dev, q); |
507 | spin_unlock(lock: &q->lock); |
508 | |
509 | return n_frames; |
510 | } |
511 | |
512 | static int |
513 | mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) |
514 | { |
515 | struct mt76_queue *q = phy->q_tx[qid]; |
516 | struct mt76_dev *dev = phy->dev; |
517 | struct ieee80211_txq *txq; |
518 | struct mt76_txq *mtxq; |
519 | struct mt76_wcid *wcid; |
520 | int ret = 0; |
521 | |
522 | while (1) { |
523 | int n_frames = 0; |
524 | |
525 | if (test_bit(MT76_RESET, &phy->state)) |
526 | return -EBUSY; |
527 | |
528 | if (dev->queue_ops->tx_cleanup && |
529 | q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { |
530 | dev->queue_ops->tx_cleanup(dev, q, false); |
531 | } |
532 | |
533 | txq = ieee80211_next_txq(hw: phy->hw, ac: qid); |
534 | if (!txq) |
535 | break; |
536 | |
537 | mtxq = (struct mt76_txq *)txq->drv_priv; |
538 | wcid = rcu_dereference(dev->wcid[mtxq->wcid]); |
539 | if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) |
540 | continue; |
541 | |
542 | if (mtxq->send_bar && mtxq->aggr) { |
543 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
544 | struct ieee80211_sta *sta = txq->sta; |
545 | struct ieee80211_vif *vif = txq->vif; |
546 | u16 agg_ssn = mtxq->agg_ssn; |
547 | u8 tid = txq->tid; |
548 | |
549 | mtxq->send_bar = false; |
550 | ieee80211_send_bar(vif, ra: sta->addr, tid, ssn: agg_ssn); |
551 | } |
552 | |
553 | if (!mt76_txq_stopped(q)) |
554 | n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); |
555 | |
556 | ieee80211_return_txq(hw: phy->hw, txq, force: false); |
557 | |
558 | if (unlikely(n_frames < 0)) |
559 | return n_frames; |
560 | |
561 | ret += n_frames; |
562 | } |
563 | |
564 | return ret; |
565 | } |
566 | |
567 | void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) |
568 | { |
569 | int len; |
570 | |
571 | if (qid >= 4) |
572 | return; |
573 | |
574 | local_bh_disable(); |
575 | rcu_read_lock(); |
576 | |
577 | do { |
578 | ieee80211_txq_schedule_start(hw: phy->hw, ac: qid); |
579 | len = mt76_txq_schedule_list(phy, qid); |
580 | ieee80211_txq_schedule_end(hw: phy->hw, ac: qid); |
581 | } while (len > 0); |
582 | |
583 | rcu_read_unlock(); |
584 | local_bh_enable(); |
585 | } |
586 | EXPORT_SYMBOL_GPL(mt76_txq_schedule); |
587 | |
588 | static int |
589 | mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) |
590 | { |
591 | struct mt76_dev *dev = phy->dev; |
592 | struct ieee80211_sta *sta; |
593 | struct mt76_queue *q; |
594 | struct sk_buff *skb; |
595 | int ret = 0; |
596 | |
597 | spin_lock(lock: &wcid->tx_pending.lock); |
598 | while ((skb = skb_peek(list_: &wcid->tx_pending)) != NULL) { |
599 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
600 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
601 | int qid = skb_get_queue_mapping(skb); |
602 | |
603 | if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && |
604 | !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && |
605 | !ieee80211_is_data(fc: hdr->frame_control) && |
606 | !ieee80211_is_bufferable_mmpdu(skb)) |
607 | qid = MT_TXQ_PSD; |
608 | |
609 | q = phy->q_tx[qid]; |
610 | if (mt76_txq_stopped(q)) { |
611 | ret = -1; |
612 | break; |
613 | } |
614 | |
615 | __skb_unlink(skb, list: &wcid->tx_pending); |
616 | spin_unlock(lock: &wcid->tx_pending.lock); |
617 | |
618 | sta = wcid_to_sta(wcid); |
619 | spin_lock(lock: &q->lock); |
620 | __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); |
621 | dev->queue_ops->kick(dev, q); |
622 | spin_unlock(lock: &q->lock); |
623 | |
624 | spin_lock(lock: &wcid->tx_pending.lock); |
625 | } |
626 | spin_unlock(lock: &wcid->tx_pending.lock); |
627 | |
628 | return ret; |
629 | } |
630 | |
631 | static void mt76_txq_schedule_pending(struct mt76_phy *phy) |
632 | { |
633 | if (list_empty(head: &phy->tx_list)) |
634 | return; |
635 | |
636 | local_bh_disable(); |
637 | rcu_read_lock(); |
638 | |
639 | spin_lock(lock: &phy->tx_lock); |
640 | while (!list_empty(head: &phy->tx_list)) { |
641 | struct mt76_wcid *wcid = NULL; |
642 | int ret; |
643 | |
644 | wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); |
645 | list_del_init(entry: &wcid->tx_list); |
646 | |
647 | spin_unlock(lock: &phy->tx_lock); |
648 | ret = mt76_txq_schedule_pending_wcid(phy, wcid); |
649 | spin_lock(lock: &phy->tx_lock); |
650 | |
651 | if (ret) { |
652 | if (list_empty(head: &wcid->tx_list)) |
653 | list_add_tail(new: &wcid->tx_list, head: &phy->tx_list); |
654 | break; |
655 | } |
656 | } |
657 | spin_unlock(lock: &phy->tx_lock); |
658 | |
659 | rcu_read_unlock(); |
660 | local_bh_enable(); |
661 | } |
662 | |
663 | void mt76_txq_schedule_all(struct mt76_phy *phy) |
664 | { |
665 | int i; |
666 | |
667 | mt76_txq_schedule_pending(phy); |
668 | for (i = 0; i <= MT_TXQ_BK; i++) |
669 | mt76_txq_schedule(phy, i); |
670 | } |
671 | EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); |
672 | |
673 | void mt76_tx_worker_run(struct mt76_dev *dev) |
674 | { |
675 | struct mt76_phy *phy; |
676 | int i; |
677 | |
678 | for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { |
679 | phy = dev->phys[i]; |
680 | if (!phy) |
681 | continue; |
682 | |
683 | mt76_txq_schedule_all(phy); |
684 | } |
685 | |
686 | #ifdef CONFIG_NL80211_TESTMODE |
687 | for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { |
688 | phy = dev->phys[i]; |
689 | if (!phy || !phy->test.tx_pending) |
690 | continue; |
691 | |
692 | mt76_testmode_tx_pending(phy); |
693 | } |
694 | #endif |
695 | } |
696 | EXPORT_SYMBOL_GPL(mt76_tx_worker_run); |
697 | |
698 | void mt76_tx_worker(struct mt76_worker *w) |
699 | { |
700 | struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); |
701 | |
702 | mt76_tx_worker_run(dev); |
703 | } |
704 | |
705 | void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, |
706 | bool send_bar) |
707 | { |
708 | int i; |
709 | |
710 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { |
711 | struct ieee80211_txq *txq = sta->txq[i]; |
712 | struct mt76_queue *hwq; |
713 | struct mt76_txq *mtxq; |
714 | |
715 | if (!txq) |
716 | continue; |
717 | |
718 | hwq = phy->q_tx[mt76_txq_get_qid(txq)]; |
719 | mtxq = (struct mt76_txq *)txq->drv_priv; |
720 | |
721 | spin_lock_bh(lock: &hwq->lock); |
722 | mtxq->send_bar = mtxq->aggr && send_bar; |
723 | spin_unlock_bh(lock: &hwq->lock); |
724 | } |
725 | } |
726 | EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); |
727 | |
728 | void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) |
729 | { |
730 | struct mt76_phy *phy = hw->priv; |
731 | struct mt76_dev *dev = phy->dev; |
732 | |
733 | if (!test_bit(MT76_STATE_RUNNING, &phy->state)) |
734 | return; |
735 | |
736 | mt76_worker_schedule(w: &dev->tx_worker); |
737 | } |
738 | EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); |
739 | |
740 | u8 mt76_ac_to_hwq(u8 ac) |
741 | { |
742 | static const u8 wmm_queue_map[] = { |
743 | [IEEE80211_AC_BE] = 0, |
744 | [IEEE80211_AC_BK] = 1, |
745 | [IEEE80211_AC_VI] = 2, |
746 | [IEEE80211_AC_VO] = 3, |
747 | }; |
748 | |
749 | if (WARN_ON(ac >= IEEE80211_NUM_ACS)) |
750 | return 0; |
751 | |
752 | return wmm_queue_map[ac]; |
753 | } |
754 | EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); |
755 | |
756 | int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) |
757 | { |
758 | struct sk_buff *iter, *last = skb; |
759 | |
760 | /* First packet of a A-MSDU burst keeps track of the whole burst |
761 | * length, need to update length of it and the last packet. |
762 | */ |
763 | skb_walk_frags(skb, iter) { |
764 | last = iter; |
765 | if (!iter->next) { |
766 | skb->data_len += pad; |
767 | skb->len += pad; |
768 | break; |
769 | } |
770 | } |
771 | |
772 | if (skb_pad(skb: last, pad)) |
773 | return -ENOMEM; |
774 | |
775 | __skb_put(skb: last, len: pad); |
776 | |
777 | return 0; |
778 | } |
779 | EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); |
780 | |
781 | void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, |
782 | struct mt76_queue_entry *e) |
783 | { |
784 | if (e->skb) |
785 | dev->drv->tx_complete_skb(dev, e); |
786 | |
787 | spin_lock_bh(lock: &q->lock); |
788 | q->tail = (q->tail + 1) % q->ndesc; |
789 | q->queued--; |
790 | spin_unlock_bh(lock: &q->lock); |
791 | } |
792 | EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); |
793 | |
794 | void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
795 | { |
796 | struct mt76_phy *phy = &dev->phy; |
797 | struct mt76_queue *q = phy->q_tx[0]; |
798 | |
799 | if (blocked == q->blocked) |
800 | return; |
801 | |
802 | q->blocked = blocked; |
803 | |
804 | phy = dev->phys[MT_BAND1]; |
805 | if (phy) { |
806 | q = phy->q_tx[0]; |
807 | q->blocked = blocked; |
808 | } |
809 | phy = dev->phys[MT_BAND2]; |
810 | if (phy) { |
811 | q = phy->q_tx[0]; |
812 | q->blocked = blocked; |
813 | } |
814 | |
815 | if (!blocked) |
816 | mt76_worker_schedule(w: &dev->tx_worker); |
817 | } |
818 | EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); |
819 | |
820 | int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
821 | { |
822 | int token; |
823 | |
824 | spin_lock_bh(lock: &dev->token_lock); |
825 | |
826 | token = idr_alloc(&dev->token, ptr: *ptxwi, start: 0, end: dev->token_size, GFP_ATOMIC); |
827 | if (token >= 0) |
828 | dev->token_count++; |
829 | |
830 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
831 | if (mtk_wed_device_active(&dev->mmio.wed) && |
832 | token >= dev->mmio.wed.wlan.token_start) |
833 | dev->wed_token_count++; |
834 | #endif |
835 | |
836 | if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) |
837 | __mt76_set_tx_blocked(dev, true); |
838 | |
839 | spin_unlock_bh(lock: &dev->token_lock); |
840 | |
841 | return token; |
842 | } |
843 | EXPORT_SYMBOL_GPL(mt76_token_consume); |
844 | |
845 | int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, |
846 | struct mt76_txwi_cache *t, dma_addr_t phys) |
847 | { |
848 | int token; |
849 | |
850 | spin_lock_bh(lock: &dev->rx_token_lock); |
851 | token = idr_alloc(&dev->rx_token, ptr: t, start: 0, end: dev->rx_token_size, |
852 | GFP_ATOMIC); |
853 | if (token >= 0) { |
854 | t->ptr = ptr; |
855 | t->dma_addr = phys; |
856 | } |
857 | spin_unlock_bh(lock: &dev->rx_token_lock); |
858 | |
859 | return token; |
860 | } |
861 | EXPORT_SYMBOL_GPL(mt76_rx_token_consume); |
862 | |
863 | struct mt76_txwi_cache * |
864 | mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
865 | { |
866 | struct mt76_txwi_cache *txwi; |
867 | |
868 | spin_lock_bh(lock: &dev->token_lock); |
869 | |
870 | txwi = idr_remove(&dev->token, id: token); |
871 | if (txwi) { |
872 | dev->token_count--; |
873 | |
874 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
875 | if (mtk_wed_device_active(&dev->mmio.wed) && |
876 | token >= dev->mmio.wed.wlan.token_start && |
877 | --dev->wed_token_count == 0) |
878 | wake_up(&dev->tx_wait); |
879 | #endif |
880 | } |
881 | |
882 | if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && |
883 | dev->phy.q_tx[0]->blocked) |
884 | *wake = true; |
885 | |
886 | spin_unlock_bh(lock: &dev->token_lock); |
887 | |
888 | return txwi; |
889 | } |
890 | EXPORT_SYMBOL_GPL(mt76_token_release); |
891 | |
892 | struct mt76_txwi_cache * |
893 | mt76_rx_token_release(struct mt76_dev *dev, int token) |
894 | { |
895 | struct mt76_txwi_cache *t; |
896 | |
897 | spin_lock_bh(lock: &dev->rx_token_lock); |
898 | t = idr_remove(&dev->rx_token, id: token); |
899 | spin_unlock_bh(lock: &dev->rx_token_lock); |
900 | |
901 | return t; |
902 | } |
903 | EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
904 | |