1 | /* |
2 | * Atheros CARL9170 driver |
3 | * |
4 | * 802.11 xmit & status routines |
5 | * |
6 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> |
7 | * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation; either version 2 of the License, or |
12 | * (at your option) any later version. |
13 | * |
14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. |
18 | * |
19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program; see the file COPYING. If not, see |
21 | * http://www.gnu.org/licenses/. |
22 | * |
23 | * This file incorporates work covered by the following copyright and |
24 | * permission notice: |
25 | * Copyright (c) 2007-2008 Atheros Communications, Inc. |
26 | * |
27 | * Permission to use, copy, modify, and/or distribute this software for any |
28 | * purpose with or without fee is hereby granted, provided that the above |
29 | * copyright notice and this permission notice appear in all copies. |
30 | * |
31 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
32 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
33 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
34 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
35 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
36 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
37 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
38 | */ |
39 | |
40 | #include <linux/slab.h> |
41 | #include <linux/module.h> |
42 | #include <linux/etherdevice.h> |
43 | #include <net/mac80211.h> |
44 | #include "carl9170.h" |
45 | #include "hw.h" |
46 | #include "cmd.h" |
47 | |
48 | static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, |
49 | unsigned int queue) |
50 | { |
51 | if (unlikely(modparam_noht)) { |
52 | return queue; |
53 | } else { |
54 | /* |
55 | * This is just another workaround, until |
56 | * someone figures out how to get QoS and |
57 | * AMPDU to play nicely together. |
58 | */ |
59 | |
60 | return 2; /* AC_BE */ |
61 | } |
62 | } |
63 | |
64 | static inline unsigned int carl9170_get_queue(struct ar9170 *ar, |
65 | struct sk_buff *skb) |
66 | { |
67 | return __carl9170_get_queue(ar, queue: skb_get_queue_mapping(skb)); |
68 | } |
69 | |
70 | static bool is_mem_full(struct ar9170 *ar) |
71 | { |
72 | return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > |
73 | atomic_read(v: &ar->mem_free_blocks)); |
74 | } |
75 | |
76 | static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) |
77 | { |
78 | int queue, i; |
79 | bool mem_full; |
80 | |
81 | atomic_inc(v: &ar->tx_total_queued); |
82 | |
83 | queue = skb_get_queue_mapping(skb); |
84 | spin_lock_bh(lock: &ar->tx_stats_lock); |
85 | |
86 | /* |
87 | * The driver has to accept the frame, regardless if the queue is |
88 | * full to the brim, or not. We have to do the queuing internally, |
89 | * since mac80211 assumes that a driver which can operate with |
90 | * aggregated frames does not reject frames for this reason. |
91 | */ |
92 | ar->tx_stats[queue].len++; |
93 | ar->tx_stats[queue].count++; |
94 | |
95 | mem_full = is_mem_full(ar); |
96 | for (i = 0; i < ar->hw->queues; i++) { |
97 | if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { |
98 | ieee80211_stop_queue(hw: ar->hw, queue: i); |
99 | ar->queue_stop_timeout[i] = jiffies; |
100 | } |
101 | } |
102 | |
103 | spin_unlock_bh(lock: &ar->tx_stats_lock); |
104 | } |
105 | |
106 | /* needs rcu_read_lock */ |
107 | static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar, |
108 | struct sk_buff *skb) |
109 | { |
110 | struct _carl9170_tx_superframe *super = (void *) skb->data; |
111 | struct ieee80211_hdr *hdr = (void *) super->frame_data; |
112 | struct ieee80211_vif *vif; |
113 | unsigned int vif_id; |
114 | |
115 | vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >> |
116 | CARL9170_TX_SUPER_MISC_VIF_ID_S; |
117 | |
118 | if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC)) |
119 | return NULL; |
120 | |
121 | vif = rcu_dereference(ar->vif_priv[vif_id].vif); |
122 | if (unlikely(!vif)) |
123 | return NULL; |
124 | |
125 | /* |
126 | * Normally we should use wrappers like ieee80211_get_DA to get |
127 | * the correct peer ieee80211_sta. |
128 | * |
129 | * But there is a problem with indirect traffic (broadcasts, or |
130 | * data which is designated for other stations) in station mode. |
131 | * The frame will be directed to the AP for distribution and not |
132 | * to the actual destination. |
133 | */ |
134 | |
135 | return ieee80211_find_sta(vif, addr: hdr->addr1); |
136 | } |
137 | |
138 | static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) |
139 | { |
140 | struct ieee80211_sta *sta; |
141 | struct carl9170_sta_info *sta_info; |
142 | |
143 | rcu_read_lock(); |
144 | sta = __carl9170_get_tx_sta(ar, skb); |
145 | if (unlikely(!sta)) |
146 | goto out_rcu; |
147 | |
148 | sta_info = (struct carl9170_sta_info *) sta->drv_priv; |
149 | if (atomic_dec_return(v: &sta_info->pending_frames) == 0) |
150 | ieee80211_sta_block_awake(hw: ar->hw, pubsta: sta, block: false); |
151 | |
152 | out_rcu: |
153 | rcu_read_unlock(); |
154 | } |
155 | |
156 | static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) |
157 | { |
158 | int queue; |
159 | |
160 | queue = skb_get_queue_mapping(skb); |
161 | |
162 | spin_lock_bh(lock: &ar->tx_stats_lock); |
163 | |
164 | ar->tx_stats[queue].len--; |
165 | |
166 | if (!is_mem_full(ar)) { |
167 | unsigned int i; |
168 | for (i = 0; i < ar->hw->queues; i++) { |
169 | if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) |
170 | continue; |
171 | |
172 | if (ieee80211_queue_stopped(hw: ar->hw, queue: i)) { |
173 | unsigned long tmp; |
174 | |
175 | tmp = jiffies - ar->queue_stop_timeout[i]; |
176 | if (tmp > ar->max_queue_stop_timeout[i]) |
177 | ar->max_queue_stop_timeout[i] = tmp; |
178 | } |
179 | |
180 | ieee80211_wake_queue(hw: ar->hw, queue: i); |
181 | } |
182 | } |
183 | |
184 | spin_unlock_bh(lock: &ar->tx_stats_lock); |
185 | |
186 | if (atomic_dec_and_test(v: &ar->tx_total_queued)) |
187 | complete(&ar->tx_flush); |
188 | } |
189 | |
190 | static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) |
191 | { |
192 | struct _carl9170_tx_superframe *super; |
193 | unsigned int chunks; |
194 | int cookie = -1; |
195 | |
196 | atomic_inc(v: &ar->mem_allocs); |
197 | |
198 | chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); |
199 | if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { |
200 | atomic_add(i: chunks, v: &ar->mem_free_blocks); |
201 | return -ENOSPC; |
202 | } |
203 | |
204 | spin_lock_bh(lock: &ar->mem_lock); |
205 | cookie = bitmap_find_free_region(bitmap: ar->mem_bitmap, bits: ar->fw.mem_blocks, order: 0); |
206 | spin_unlock_bh(lock: &ar->mem_lock); |
207 | |
208 | if (unlikely(cookie < 0)) { |
209 | atomic_add(i: chunks, v: &ar->mem_free_blocks); |
210 | return -ENOSPC; |
211 | } |
212 | |
213 | super = (void *) skb->data; |
214 | |
215 | /* |
216 | * Cookie #0 serves two special purposes: |
217 | * 1. The firmware might use it generate BlockACK frames |
218 | * in responds of an incoming BlockAckReqs. |
219 | * |
220 | * 2. Prevent double-free bugs. |
221 | */ |
222 | super->s.cookie = (u8) cookie + 1; |
223 | return 0; |
224 | } |
225 | |
226 | static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) |
227 | { |
228 | struct _carl9170_tx_superframe *super = (void *) skb->data; |
229 | int cookie; |
230 | |
231 | /* make a local copy of the cookie */ |
232 | cookie = super->s.cookie; |
233 | /* invalidate cookie */ |
234 | super->s.cookie = 0; |
235 | |
236 | /* |
237 | * Do a out-of-bounds check on the cookie: |
238 | * |
239 | * * cookie "0" is reserved and won't be assigned to any |
240 | * out-going frame. Internally however, it is used to |
241 | * mark no longer/un-accounted frames and serves as a |
242 | * cheap way of preventing frames from being freed |
243 | * twice by _accident_. NB: There is a tiny race... |
244 | * |
245 | * * obviously, cookie number is limited by the amount |
246 | * of available memory blocks, so the number can |
247 | * never execeed the mem_blocks count. |
248 | */ |
249 | if (WARN_ON_ONCE(cookie == 0) || |
250 | WARN_ON_ONCE(cookie > ar->fw.mem_blocks)) |
251 | return; |
252 | |
253 | atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), |
254 | v: &ar->mem_free_blocks); |
255 | |
256 | spin_lock_bh(lock: &ar->mem_lock); |
257 | bitmap_release_region(bitmap: ar->mem_bitmap, pos: cookie - 1, order: 0); |
258 | spin_unlock_bh(lock: &ar->mem_lock); |
259 | } |
260 | |
261 | /* Called from any context */ |
262 | static void carl9170_tx_release(struct kref *ref) |
263 | { |
264 | struct ar9170 *ar; |
265 | struct carl9170_tx_info *arinfo; |
266 | struct ieee80211_tx_info *txinfo; |
267 | struct sk_buff *skb; |
268 | |
269 | arinfo = container_of(ref, struct carl9170_tx_info, ref); |
270 | txinfo = container_of((void *) arinfo, struct ieee80211_tx_info, |
271 | rate_driver_data); |
272 | skb = container_of((void *) txinfo, struct sk_buff, cb); |
273 | |
274 | ar = arinfo->ar; |
275 | if (WARN_ON_ONCE(!ar)) |
276 | return; |
277 | |
278 | /* |
279 | * This does not call ieee80211_tx_info_clear_status() because |
280 | * carl9170_tx_fill_rateinfo() has filled the rate information |
281 | * before we get to this point. |
282 | */ |
283 | memset_after(&txinfo->status, 0, rates); |
284 | |
285 | if (atomic_read(v: &ar->tx_total_queued)) |
286 | ar->tx_schedule = true; |
287 | |
288 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) { |
289 | if (!atomic_read(v: &ar->tx_ampdu_upload)) |
290 | ar->tx_ampdu_schedule = true; |
291 | |
292 | if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) { |
293 | struct _carl9170_tx_superframe *super; |
294 | |
295 | super = (void *)skb->data; |
296 | txinfo->status.ampdu_len = super->s.rix; |
297 | txinfo->status.ampdu_ack_len = super->s.cnt; |
298 | } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) && |
299 | !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { |
300 | /* |
301 | * drop redundant tx_status reports: |
302 | * |
303 | * 1. ampdu_ack_len of the final tx_status does |
304 | * include the feedback of this particular frame. |
305 | * |
306 | * 2. tx_status_irqsafe only queues up to 128 |
307 | * tx feedback reports and discards the rest. |
308 | * |
309 | * 3. minstrel_ht is picky, it only accepts |
310 | * reports of frames with the TX_STATUS_AMPDU flag. |
311 | * |
312 | * 4. mac80211 is not particularly interested in |
313 | * feedback either [CTL_REQ_TX_STATUS not set] |
314 | */ |
315 | |
316 | ieee80211_free_txskb(hw: ar->hw, skb); |
317 | return; |
318 | } else { |
319 | /* |
320 | * Either the frame transmission has failed or |
321 | * mac80211 requested tx status. |
322 | */ |
323 | } |
324 | } |
325 | |
326 | skb_pull(skb, len: sizeof(struct _carl9170_tx_superframe)); |
327 | ieee80211_tx_status_irqsafe(hw: ar->hw, skb); |
328 | } |
329 | |
330 | void carl9170_tx_get_skb(struct sk_buff *skb) |
331 | { |
332 | struct carl9170_tx_info *arinfo = (void *) |
333 | (IEEE80211_SKB_CB(skb))->rate_driver_data; |
334 | kref_get(kref: &arinfo->ref); |
335 | } |
336 | |
337 | int carl9170_tx_put_skb(struct sk_buff *skb) |
338 | { |
339 | struct carl9170_tx_info *arinfo = (void *) |
340 | (IEEE80211_SKB_CB(skb))->rate_driver_data; |
341 | |
342 | return kref_put(kref: &arinfo->ref, release: carl9170_tx_release); |
343 | } |
344 | |
345 | /* Caller must hold the tid_info->lock & rcu_read_lock */ |
346 | static void carl9170_tx_shift_bm(struct ar9170 *ar, |
347 | struct carl9170_sta_tid *tid_info, u16 seq) |
348 | { |
349 | u16 off; |
350 | |
351 | off = SEQ_DIFF(seq, tid_info->bsn); |
352 | |
353 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) |
354 | return; |
355 | |
356 | /* |
357 | * Sanity check. For each MPDU we set the bit in bitmap and |
358 | * clear it once we received the tx_status. |
359 | * But if the bit is already cleared then we've been bitten |
360 | * by a bug. |
361 | */ |
362 | WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap)); |
363 | |
364 | off = SEQ_DIFF(tid_info->snx, tid_info->bsn); |
365 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) |
366 | return; |
367 | |
368 | if (!bitmap_empty(src: tid_info->bitmap, nbits: off)) |
369 | off = find_first_bit(addr: tid_info->bitmap, size: off); |
370 | |
371 | tid_info->bsn += off; |
372 | tid_info->bsn &= 0x0fff; |
373 | |
374 | bitmap_shift_right(dst: tid_info->bitmap, src: tid_info->bitmap, |
375 | shift: off, CARL9170_BAW_BITS); |
376 | } |
377 | |
378 | static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, |
379 | struct sk_buff *skb, struct ieee80211_tx_info *txinfo) |
380 | { |
381 | struct _carl9170_tx_superframe *super = (void *) skb->data; |
382 | struct ieee80211_hdr *hdr = (void *) super->frame_data; |
383 | struct ieee80211_sta *sta; |
384 | struct carl9170_sta_info *sta_info; |
385 | struct carl9170_sta_tid *tid_info; |
386 | u8 tid; |
387 | |
388 | if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || |
389 | txinfo->flags & IEEE80211_TX_CTL_INJECTED) |
390 | return; |
391 | |
392 | rcu_read_lock(); |
393 | sta = __carl9170_get_tx_sta(ar, skb); |
394 | if (unlikely(!sta)) |
395 | goto out_rcu; |
396 | |
397 | tid = ieee80211_get_tid(hdr); |
398 | |
399 | sta_info = (void *) sta->drv_priv; |
400 | tid_info = rcu_dereference(sta_info->agg[tid]); |
401 | if (!tid_info) |
402 | goto out_rcu; |
403 | |
404 | spin_lock_bh(lock: &tid_info->lock); |
405 | if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE)) |
406 | carl9170_tx_shift_bm(ar, tid_info, seq: get_seq_h(hdr)); |
407 | |
408 | if (sta_info->stats[tid].clear) { |
409 | sta_info->stats[tid].clear = false; |
410 | sta_info->stats[tid].req = false; |
411 | sta_info->stats[tid].ampdu_len = 0; |
412 | sta_info->stats[tid].ampdu_ack_len = 0; |
413 | } |
414 | |
415 | sta_info->stats[tid].ampdu_len++; |
416 | if (txinfo->status.rates[0].count == 1) |
417 | sta_info->stats[tid].ampdu_ack_len++; |
418 | |
419 | if (!(txinfo->flags & IEEE80211_TX_STAT_ACK)) |
420 | sta_info->stats[tid].req = true; |
421 | |
422 | if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { |
423 | super->s.rix = sta_info->stats[tid].ampdu_len; |
424 | super->s.cnt = sta_info->stats[tid].ampdu_ack_len; |
425 | txinfo->flags |= IEEE80211_TX_STAT_AMPDU; |
426 | if (sta_info->stats[tid].req) |
427 | txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; |
428 | |
429 | sta_info->stats[tid].clear = true; |
430 | } |
431 | spin_unlock_bh(lock: &tid_info->lock); |
432 | |
433 | out_rcu: |
434 | rcu_read_unlock(); |
435 | } |
436 | |
437 | static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb, |
438 | struct ieee80211_tx_info *tx_info) |
439 | { |
440 | struct _carl9170_tx_superframe *super = (void *) skb->data; |
441 | struct ieee80211_bar *bar = (void *) super->frame_data; |
442 | |
443 | /* |
444 | * Unlike all other frames, the status report for BARs does |
445 | * not directly come from the hardware as it is incapable of |
446 | * matching a BA to a previously send BAR. |
447 | * Instead the RX-path will scan for incoming BAs and set the |
448 | * IEEE80211_TX_STAT_ACK if it sees one that was likely |
449 | * caused by a BAR from us. |
450 | */ |
451 | |
452 | if (unlikely(ieee80211_is_back_req(bar->frame_control)) && |
453 | !(tx_info->flags & IEEE80211_TX_STAT_ACK)) { |
454 | struct carl9170_bar_list_entry *entry; |
455 | int queue = skb_get_queue_mapping(skb); |
456 | |
457 | rcu_read_lock(); |
458 | list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) { |
459 | if (entry->skb == skb) { |
460 | spin_lock_bh(lock: &ar->bar_list_lock[queue]); |
461 | list_del_rcu(entry: &entry->list); |
462 | spin_unlock_bh(lock: &ar->bar_list_lock[queue]); |
463 | kfree_rcu(entry, head); |
464 | goto out; |
465 | } |
466 | } |
467 | |
468 | WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n" , |
469 | queue, bar->ra, bar->ta, bar->control, |
470 | bar->start_seq_num); |
471 | out: |
472 | rcu_read_unlock(); |
473 | } |
474 | } |
475 | |
476 | void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, |
477 | const bool success) |
478 | { |
479 | struct ieee80211_tx_info *txinfo; |
480 | |
481 | carl9170_tx_accounting_free(ar, skb); |
482 | |
483 | txinfo = IEEE80211_SKB_CB(skb); |
484 | |
485 | carl9170_tx_bar_status(ar, skb, tx_info: txinfo); |
486 | |
487 | if (success) |
488 | txinfo->flags |= IEEE80211_TX_STAT_ACK; |
489 | else |
490 | ar->tx_ack_failures++; |
491 | |
492 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) |
493 | carl9170_tx_status_process_ampdu(ar, skb, txinfo); |
494 | |
495 | carl9170_tx_ps_unblock(ar, skb); |
496 | carl9170_tx_put_skb(skb); |
497 | } |
498 | |
499 | /* This function may be called form any context */ |
500 | void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) |
501 | { |
502 | struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); |
503 | |
504 | atomic_dec(v: &ar->tx_total_pending); |
505 | |
506 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) |
507 | atomic_dec(v: &ar->tx_ampdu_upload); |
508 | |
509 | if (carl9170_tx_put_skb(skb)) |
510 | tasklet_hi_schedule(t: &ar->usb_tasklet); |
511 | } |
512 | |
513 | static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, |
514 | struct sk_buff_head *queue) |
515 | { |
516 | struct sk_buff *skb; |
517 | |
518 | spin_lock_bh(lock: &queue->lock); |
519 | skb_queue_walk(queue, skb) { |
520 | struct _carl9170_tx_superframe *txc = (void *) skb->data; |
521 | |
522 | if (txc->s.cookie != cookie) |
523 | continue; |
524 | |
525 | __skb_unlink(skb, list: queue); |
526 | spin_unlock_bh(lock: &queue->lock); |
527 | |
528 | carl9170_release_dev_space(ar, skb); |
529 | return skb; |
530 | } |
531 | spin_unlock_bh(lock: &queue->lock); |
532 | |
533 | return NULL; |
534 | } |
535 | |
536 | static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, |
537 | unsigned int tries, struct ieee80211_tx_info *txinfo) |
538 | { |
539 | unsigned int i; |
540 | |
541 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { |
542 | if (txinfo->status.rates[i].idx < 0) |
543 | break; |
544 | |
545 | if (i == rix) { |
546 | txinfo->status.rates[i].count = tries; |
547 | i++; |
548 | break; |
549 | } |
550 | } |
551 | |
552 | for (; i < IEEE80211_TX_MAX_RATES; i++) { |
553 | txinfo->status.rates[i].idx = -1; |
554 | txinfo->status.rates[i].count = 0; |
555 | } |
556 | } |
557 | |
558 | static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) |
559 | { |
560 | int i; |
561 | struct sk_buff *skb; |
562 | struct ieee80211_tx_info *txinfo; |
563 | struct carl9170_tx_info *arinfo; |
564 | bool restart = false; |
565 | |
566 | for (i = 0; i < ar->hw->queues; i++) { |
567 | spin_lock_bh(lock: &ar->tx_status[i].lock); |
568 | |
569 | skb = skb_peek(list_: &ar->tx_status[i]); |
570 | |
571 | if (!skb) |
572 | goto next; |
573 | |
574 | txinfo = IEEE80211_SKB_CB(skb); |
575 | arinfo = (void *) txinfo->rate_driver_data; |
576 | |
577 | if (time_is_before_jiffies(arinfo->timeout + |
578 | msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true) |
579 | restart = true; |
580 | |
581 | next: |
582 | spin_unlock_bh(lock: &ar->tx_status[i].lock); |
583 | } |
584 | |
585 | if (restart) { |
586 | /* |
587 | * At least one queue has been stuck for long enough. |
588 | * Give the device a kick and hope it gets back to |
589 | * work. |
590 | * |
591 | * possible reasons may include: |
592 | * - frames got lost/corrupted (bad connection to the device) |
593 | * - stalled rx processing/usb controller hiccups |
594 | * - firmware errors/bugs |
595 | * - every bug you can think of. |
596 | * - all bugs you can't... |
597 | * - ... |
598 | */ |
599 | carl9170_restart(ar, r: CARL9170_RR_STUCK_TX); |
600 | } |
601 | } |
602 | |
603 | static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) |
604 | { |
605 | struct carl9170_sta_tid *iter; |
606 | struct sk_buff *skb; |
607 | struct ieee80211_tx_info *txinfo; |
608 | struct carl9170_tx_info *arinfo; |
609 | struct ieee80211_sta *sta; |
610 | |
611 | rcu_read_lock(); |
612 | list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { |
613 | if (iter->state < CARL9170_TID_STATE_IDLE) |
614 | continue; |
615 | |
616 | spin_lock_bh(lock: &iter->lock); |
617 | skb = skb_peek(list_: &iter->queue); |
618 | if (!skb) |
619 | goto unlock; |
620 | |
621 | txinfo = IEEE80211_SKB_CB(skb); |
622 | arinfo = (void *)txinfo->rate_driver_data; |
623 | if (time_is_after_jiffies(arinfo->timeout + |
624 | msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) |
625 | goto unlock; |
626 | |
627 | sta = iter->sta; |
628 | if (WARN_ON(!sta)) |
629 | goto unlock; |
630 | |
631 | ieee80211_stop_tx_ba_session(sta, tid: iter->tid); |
632 | unlock: |
633 | spin_unlock_bh(lock: &iter->lock); |
634 | |
635 | } |
636 | rcu_read_unlock(); |
637 | } |
638 | |
639 | void carl9170_tx_janitor(struct work_struct *work) |
640 | { |
641 | struct ar9170 *ar = container_of(work, struct ar9170, |
642 | tx_janitor.work); |
643 | if (!IS_STARTED(ar)) |
644 | return; |
645 | |
646 | ar->tx_janitor_last_run = jiffies; |
647 | |
648 | carl9170_check_queue_stop_timeout(ar); |
649 | carl9170_tx_ampdu_timeout(ar); |
650 | |
651 | if (!atomic_read(v: &ar->tx_total_queued)) |
652 | return; |
653 | |
654 | ieee80211_queue_delayed_work(hw: ar->hw, dwork: &ar->tx_janitor, |
655 | delay: msecs_to_jiffies(CARL9170_TX_TIMEOUT)); |
656 | } |
657 | |
658 | static void __carl9170_tx_process_status(struct ar9170 *ar, |
659 | const uint8_t cookie, const uint8_t info) |
660 | { |
661 | struct sk_buff *skb; |
662 | struct ieee80211_tx_info *txinfo; |
663 | unsigned int r, t, q; |
664 | bool success = true; |
665 | |
666 | q = ar9170_qmap(idx: info & CARL9170_TX_STATUS_QUEUE); |
667 | |
668 | skb = carl9170_get_queued_skb(ar, cookie, queue: &ar->tx_status[q]); |
669 | if (!skb) { |
670 | /* |
671 | * We have lost the race to another thread. |
672 | */ |
673 | |
674 | return ; |
675 | } |
676 | |
677 | txinfo = IEEE80211_SKB_CB(skb); |
678 | |
679 | if (!(info & CARL9170_TX_STATUS_SUCCESS)) |
680 | success = false; |
681 | |
682 | r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S; |
683 | t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S; |
684 | |
685 | carl9170_tx_fill_rateinfo(ar, rix: r, tries: t, txinfo); |
686 | carl9170_tx_status(ar, skb, success); |
687 | } |
688 | |
689 | void carl9170_tx_process_status(struct ar9170 *ar, |
690 | const struct carl9170_rsp *cmd) |
691 | { |
692 | unsigned int i; |
693 | |
694 | for (i = 0; i < cmd->hdr.ext; i++) { |
695 | if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) { |
696 | print_hex_dump_bytes("UU:" , DUMP_PREFIX_NONE, |
697 | (void *) cmd, cmd->hdr.len + 4); |
698 | break; |
699 | } |
700 | |
701 | __carl9170_tx_process_status(ar, cookie: cmd->_tx_status[i].cookie, |
702 | info: cmd->_tx_status[i].info); |
703 | } |
704 | } |
705 | |
706 | static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, |
707 | struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate, |
708 | unsigned int *phyrate, unsigned int *tpc, unsigned int *chains) |
709 | { |
710 | struct ieee80211_rate *rate = NULL; |
711 | u8 *txpower; |
712 | unsigned int idx; |
713 | |
714 | idx = txrate->idx; |
715 | *tpc = 0; |
716 | *phyrate = 0; |
717 | |
718 | if (txrate->flags & IEEE80211_TX_RC_MCS) { |
719 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { |
720 | /* +1 dBm for HT40 */ |
721 | *tpc += 2; |
722 | |
723 | if (info->band == NL80211_BAND_2GHZ) |
724 | txpower = ar->power_2G_ht40; |
725 | else |
726 | txpower = ar->power_5G_ht40; |
727 | } else { |
728 | if (info->band == NL80211_BAND_2GHZ) |
729 | txpower = ar->power_2G_ht20; |
730 | else |
731 | txpower = ar->power_5G_ht20; |
732 | } |
733 | |
734 | *phyrate = txrate->idx; |
735 | *tpc += txpower[idx & 7]; |
736 | } else { |
737 | if (info->band == NL80211_BAND_2GHZ) { |
738 | if (idx < 4) |
739 | txpower = ar->power_2G_cck; |
740 | else |
741 | txpower = ar->power_2G_ofdm; |
742 | } else { |
743 | txpower = ar->power_5G_leg; |
744 | idx += 4; |
745 | } |
746 | |
747 | rate = &__carl9170_ratetable[idx]; |
748 | *tpc += txpower[(rate->hw_value & 0x30) >> 4]; |
749 | *phyrate = rate->hw_value & 0xf; |
750 | } |
751 | |
752 | if (ar->eeprom.tx_mask == 1) { |
753 | *chains = AR9170_TX_PHY_TXCHAIN_1; |
754 | } else { |
755 | if (!(txrate->flags & IEEE80211_TX_RC_MCS) && |
756 | rate && rate->bitrate >= 360) |
757 | *chains = AR9170_TX_PHY_TXCHAIN_1; |
758 | else |
759 | *chains = AR9170_TX_PHY_TXCHAIN_2; |
760 | } |
761 | |
762 | *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2); |
763 | } |
764 | |
765 | static __le32 carl9170_tx_physet(struct ar9170 *ar, |
766 | struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate) |
767 | { |
768 | unsigned int power = 0, chains = 0, phyrate = 0; |
769 | __le32 tmp; |
770 | |
771 | tmp = cpu_to_le32(0); |
772 | |
773 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) |
774 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ << |
775 | AR9170_TX_PHY_BW_S); |
776 | /* this works because 40 MHz is 2 and dup is 3 */ |
777 | if (txrate->flags & IEEE80211_TX_RC_DUP_DATA) |
778 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP << |
779 | AR9170_TX_PHY_BW_S); |
780 | |
781 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) |
782 | tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI); |
783 | |
784 | if (txrate->flags & IEEE80211_TX_RC_MCS) { |
785 | SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx); |
786 | |
787 | /* heavy clip control */ |
788 | tmp |= cpu_to_le32((txrate->idx & 0x7) << |
789 | AR9170_TX_PHY_TX_HEAVY_CLIP_S); |
790 | |
791 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); |
792 | |
793 | /* |
794 | * green field preamble does not work. |
795 | * |
796 | * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
797 | * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); |
798 | */ |
799 | } else { |
800 | if (info->band == NL80211_BAND_2GHZ) { |
801 | if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M) |
802 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK); |
803 | else |
804 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM); |
805 | } else { |
806 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM); |
807 | } |
808 | |
809 | /* |
810 | * short preamble seems to be broken too. |
811 | * |
812 | * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) |
813 | * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE); |
814 | */ |
815 | } |
816 | carl9170_tx_rate_tpc_chains(ar, info, txrate, |
817 | phyrate: &phyrate, tpc: &power, chains: &chains); |
818 | |
819 | tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate)); |
820 | tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power)); |
821 | tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains)); |
822 | return tmp; |
823 | } |
824 | |
825 | static bool carl9170_tx_rts_check(struct ar9170 *ar, |
826 | struct ieee80211_tx_rate *rate, |
827 | bool ampdu, bool multi) |
828 | { |
829 | switch (ar->erp_mode) { |
830 | case CARL9170_ERP_AUTO: |
831 | if (ampdu) |
832 | break; |
833 | fallthrough; |
834 | |
835 | case CARL9170_ERP_MAC80211: |
836 | if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)) |
837 | break; |
838 | fallthrough; |
839 | |
840 | case CARL9170_ERP_RTS: |
841 | if (likely(!multi)) |
842 | return true; |
843 | break; |
844 | |
845 | default: |
846 | break; |
847 | } |
848 | |
849 | return false; |
850 | } |
851 | |
852 | static bool carl9170_tx_cts_check(struct ar9170 *ar, |
853 | struct ieee80211_tx_rate *rate) |
854 | { |
855 | switch (ar->erp_mode) { |
856 | case CARL9170_ERP_AUTO: |
857 | case CARL9170_ERP_MAC80211: |
858 | if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) |
859 | break; |
860 | fallthrough; |
861 | |
862 | case CARL9170_ERP_CTS: |
863 | return true; |
864 | |
865 | default: |
866 | break; |
867 | } |
868 | |
869 | return false; |
870 | } |
871 | |
872 | static void carl9170_tx_get_rates(struct ar9170 *ar, |
873 | struct ieee80211_vif *vif, |
874 | struct ieee80211_sta *sta, |
875 | struct sk_buff *skb) |
876 | { |
877 | struct ieee80211_tx_info *info; |
878 | |
879 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); |
880 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE); |
881 | |
882 | info = IEEE80211_SKB_CB(skb); |
883 | |
884 | ieee80211_get_tx_rates(vif, sta, skb, |
885 | dest: info->control.rates, |
886 | IEEE80211_TX_MAX_RATES); |
887 | } |
888 | |
889 | static void carl9170_tx_apply_rateset(struct ar9170 *ar, |
890 | struct ieee80211_tx_info *sinfo, |
891 | struct sk_buff *skb) |
892 | { |
893 | struct ieee80211_tx_rate *txrate; |
894 | struct ieee80211_tx_info *info; |
895 | struct _carl9170_tx_superframe *txc = (void *) skb->data; |
896 | int i; |
897 | bool ampdu; |
898 | bool no_ack; |
899 | |
900 | info = IEEE80211_SKB_CB(skb); |
901 | ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); |
902 | no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); |
903 | |
904 | /* Set the rate control probe flag for all (sub-) frames. |
905 | * This is because the TX_STATS_AMPDU flag is only set on |
906 | * the last frame, so it has to be inherited. |
907 | */ |
908 | info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); |
909 | |
910 | /* NOTE: For the first rate, the ERP & AMPDU flags are directly |
911 | * taken from mac_control. For all fallback rate, the firmware |
912 | * updates the mac_control flags from the rate info field. |
913 | */ |
914 | for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { |
915 | __le32 phy_set; |
916 | |
917 | txrate = &sinfo->control.rates[i]; |
918 | if (txrate->idx < 0) |
919 | break; |
920 | |
921 | phy_set = carl9170_tx_physet(ar, info, txrate); |
922 | if (i == 0) { |
923 | __le16 mac_tmp = cpu_to_le16(0); |
924 | |
925 | /* first rate - part of the hw's frame header */ |
926 | txc->f.phy_control = phy_set; |
927 | |
928 | if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS) |
929 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); |
930 | |
931 | if (carl9170_tx_rts_check(ar, rate: txrate, ampdu, multi: no_ack)) |
932 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); |
933 | else if (carl9170_tx_cts_check(ar, rate: txrate)) |
934 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); |
935 | |
936 | txc->f.mac_control |= mac_tmp; |
937 | } else { |
938 | /* fallback rates are stored in the firmware's |
939 | * retry rate set array. |
940 | */ |
941 | txc->s.rr[i - 1] = phy_set; |
942 | } |
943 | |
944 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], |
945 | txrate->count); |
946 | |
947 | if (carl9170_tx_rts_check(ar, rate: txrate, ampdu, multi: no_ack)) |
948 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << |
949 | CARL9170_TX_SUPER_RI_ERP_PROT_S); |
950 | else if (carl9170_tx_cts_check(ar, rate: txrate)) |
951 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << |
952 | CARL9170_TX_SUPER_RI_ERP_PROT_S); |
953 | |
954 | if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS)) |
955 | txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; |
956 | } |
957 | } |
958 | |
959 | static int carl9170_tx_prepare(struct ar9170 *ar, |
960 | struct ieee80211_sta *sta, |
961 | struct sk_buff *skb) |
962 | { |
963 | struct ieee80211_hdr *hdr; |
964 | struct _carl9170_tx_superframe *txc; |
965 | struct carl9170_vif_info *cvif; |
966 | struct ieee80211_tx_info *info; |
967 | struct carl9170_tx_info *arinfo; |
968 | unsigned int hw_queue; |
969 | __le16 mac_tmp; |
970 | u16 len; |
971 | |
972 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); |
973 | BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != |
974 | CARL9170_TX_SUPERDESC_LEN); |
975 | |
976 | BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != |
977 | AR9170_TX_HWDESC_LEN); |
978 | |
979 | BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > |
980 | ((CARL9170_TX_SUPER_MISC_VIF_ID >> |
981 | CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); |
982 | |
983 | hw_queue = ar9170_qmap(idx: carl9170_get_queue(ar, skb)); |
984 | |
985 | hdr = (void *)skb->data; |
986 | info = IEEE80211_SKB_CB(skb); |
987 | len = skb->len; |
988 | |
989 | /* |
990 | * Note: If the frame was sent through a monitor interface, |
991 | * the ieee80211_vif pointer can be NULL. |
992 | */ |
993 | if (likely(info->control.vif)) |
994 | cvif = (void *) info->control.vif->drv_priv; |
995 | else |
996 | cvif = NULL; |
997 | |
998 | txc = skb_push(skb, len: sizeof(*txc)); |
999 | memset(txc, 0, sizeof(*txc)); |
1000 | |
1001 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue); |
1002 | |
1003 | if (likely(cvif)) |
1004 | SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id); |
1005 | |
1006 | if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) |
1007 | txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; |
1008 | |
1009 | if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
1010 | txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ; |
1011 | |
1012 | if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) |
1013 | txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; |
1014 | |
1015 | mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | |
1016 | AR9170_TX_MAC_BACKOFF); |
1017 | mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & |
1018 | AR9170_TX_MAC_QOS); |
1019 | |
1020 | if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
1021 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); |
1022 | |
1023 | if (info->control.hw_key) { |
1024 | len += info->control.hw_key->icv_len; |
1025 | |
1026 | switch (info->control.hw_key->cipher) { |
1027 | case WLAN_CIPHER_SUITE_WEP40: |
1028 | case WLAN_CIPHER_SUITE_WEP104: |
1029 | case WLAN_CIPHER_SUITE_TKIP: |
1030 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4); |
1031 | break; |
1032 | case WLAN_CIPHER_SUITE_CCMP: |
1033 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES); |
1034 | break; |
1035 | default: |
1036 | WARN_ON(1); |
1037 | goto err_out; |
1038 | } |
1039 | } |
1040 | |
1041 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { |
1042 | unsigned int density, factor; |
1043 | |
1044 | if (unlikely(!sta || !cvif)) |
1045 | goto err_out; |
1046 | |
1047 | factor = min_t(unsigned int, 1u, |
1048 | sta->deflink.ht_cap.ampdu_factor); |
1049 | density = sta->deflink.ht_cap.ampdu_density; |
1050 | |
1051 | if (density) { |
1052 | /* |
1053 | * Watch out! |
1054 | * |
1055 | * Otus uses slightly different density values than |
1056 | * those from the 802.11n spec. |
1057 | */ |
1058 | |
1059 | density = max_t(unsigned int, density + 1, 7u); |
1060 | } |
1061 | |
1062 | SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY, |
1063 | txc->s.ampdu_settings, density); |
1064 | |
1065 | SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR, |
1066 | txc->s.ampdu_settings, factor); |
1067 | } |
1068 | |
1069 | txc->s.len = cpu_to_le16(skb->len); |
1070 | txc->f.length = cpu_to_le16(len + FCS_LEN); |
1071 | txc->f.mac_control = mac_tmp; |
1072 | |
1073 | arinfo = (void *)info->rate_driver_data; |
1074 | arinfo->timeout = jiffies; |
1075 | arinfo->ar = ar; |
1076 | kref_init(kref: &arinfo->ref); |
1077 | return 0; |
1078 | |
1079 | err_out: |
1080 | skb_pull(skb, len: sizeof(*txc)); |
1081 | return -EINVAL; |
1082 | } |
1083 | |
1084 | static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) |
1085 | { |
1086 | struct _carl9170_tx_superframe *super; |
1087 | |
1088 | super = (void *) skb->data; |
1089 | super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA); |
1090 | } |
1091 | |
1092 | static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) |
1093 | { |
1094 | struct _carl9170_tx_superframe *super; |
1095 | int tmp; |
1096 | |
1097 | super = (void *) skb->data; |
1098 | |
1099 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) << |
1100 | CARL9170_TX_SUPER_AMPDU_DENSITY_S; |
1101 | |
1102 | /* |
1103 | * If you haven't noticed carl9170_tx_prepare has already filled |
1104 | * in all ampdu spacing & factor parameters. |
1105 | * Now it's the time to check whenever the settings have to be |
1106 | * updated by the firmware, or if everything is still the same. |
1107 | * |
1108 | * There's no sane way to handle different density values with |
1109 | * this hardware, so we may as well just do the compare in the |
1110 | * driver. |
1111 | */ |
1112 | |
1113 | if (tmp != ar->current_density) { |
1114 | ar->current_density = tmp; |
1115 | super->s.ampdu_settings |= |
1116 | CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY; |
1117 | } |
1118 | |
1119 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) << |
1120 | CARL9170_TX_SUPER_AMPDU_FACTOR_S; |
1121 | |
1122 | if (tmp != ar->current_factor) { |
1123 | ar->current_factor = tmp; |
1124 | super->s.ampdu_settings |= |
1125 | CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR; |
1126 | } |
1127 | } |
1128 | |
1129 | static void carl9170_tx_ampdu(struct ar9170 *ar) |
1130 | { |
1131 | struct sk_buff_head agg; |
1132 | struct carl9170_sta_tid *tid_info; |
1133 | struct sk_buff *skb, *first; |
1134 | struct ieee80211_tx_info *tx_info_first; |
1135 | unsigned int i = 0, done_ampdus = 0; |
1136 | u16 seq, queue, tmpssn; |
1137 | |
1138 | atomic_inc(v: &ar->tx_ampdu_scheduler); |
1139 | ar->tx_ampdu_schedule = false; |
1140 | |
1141 | if (atomic_read(v: &ar->tx_ampdu_upload)) |
1142 | return; |
1143 | |
1144 | if (!ar->tx_ampdu_list_len) |
1145 | return; |
1146 | |
1147 | __skb_queue_head_init(list: &agg); |
1148 | |
1149 | rcu_read_lock(); |
1150 | tid_info = rcu_dereference(ar->tx_ampdu_iter); |
1151 | if (WARN_ON_ONCE(!tid_info)) { |
1152 | rcu_read_unlock(); |
1153 | return; |
1154 | } |
1155 | |
1156 | retry: |
1157 | list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { |
1158 | i++; |
1159 | |
1160 | if (tid_info->state < CARL9170_TID_STATE_PROGRESS) |
1161 | continue; |
1162 | |
1163 | queue = TID_TO_WME_AC(tid_info->tid); |
1164 | |
1165 | spin_lock_bh(lock: &tid_info->lock); |
1166 | if (tid_info->state != CARL9170_TID_STATE_XMIT) |
1167 | goto processed; |
1168 | |
1169 | tid_info->counter++; |
1170 | first = skb_peek(list_: &tid_info->queue); |
1171 | tmpssn = carl9170_get_seq(skb: first); |
1172 | seq = tid_info->snx; |
1173 | |
1174 | if (unlikely(tmpssn != seq)) { |
1175 | tid_info->state = CARL9170_TID_STATE_IDLE; |
1176 | |
1177 | goto processed; |
1178 | } |
1179 | |
1180 | tx_info_first = NULL; |
1181 | while ((skb = skb_peek(list_: &tid_info->queue))) { |
1182 | /* strict 0, 1, ..., n - 1, n frame sequence order */ |
1183 | if (unlikely(carl9170_get_seq(skb) != seq)) |
1184 | break; |
1185 | |
1186 | /* don't upload more than AMPDU FACTOR allows. */ |
1187 | if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >= |
1188 | (tid_info->max - 1))) |
1189 | break; |
1190 | |
1191 | if (!tx_info_first) { |
1192 | carl9170_tx_get_rates(ar, vif: tid_info->vif, |
1193 | sta: tid_info->sta, skb: first); |
1194 | tx_info_first = IEEE80211_SKB_CB(skb: first); |
1195 | } |
1196 | |
1197 | carl9170_tx_apply_rateset(ar, sinfo: tx_info_first, skb); |
1198 | |
1199 | atomic_inc(v: &ar->tx_ampdu_upload); |
1200 | tid_info->snx = seq = SEQ_NEXT(seq); |
1201 | __skb_unlink(skb, list: &tid_info->queue); |
1202 | |
1203 | __skb_queue_tail(list: &agg, newsk: skb); |
1204 | |
1205 | if (skb_queue_len(list_: &agg) >= CARL9170_NUM_TX_AGG_MAX) |
1206 | break; |
1207 | } |
1208 | |
1209 | if (skb_queue_empty(list: &tid_info->queue) || |
1210 | carl9170_get_seq(skb: skb_peek(list_: &tid_info->queue)) != |
1211 | tid_info->snx) { |
1212 | /* stop TID, if A-MPDU frames are still missing, |
1213 | * or whenever the queue is empty. |
1214 | */ |
1215 | |
1216 | tid_info->state = CARL9170_TID_STATE_IDLE; |
1217 | } |
1218 | done_ampdus++; |
1219 | |
1220 | processed: |
1221 | spin_unlock_bh(lock: &tid_info->lock); |
1222 | |
1223 | if (skb_queue_empty(list: &agg)) |
1224 | continue; |
1225 | |
1226 | /* apply ampdu spacing & factor settings */ |
1227 | carl9170_set_ampdu_params(ar, skb: skb_peek(list_: &agg)); |
1228 | |
1229 | /* set aggregation push bit */ |
1230 | carl9170_set_immba(ar, skb: skb_peek_tail(list_: &agg)); |
1231 | |
1232 | spin_lock_bh(lock: &ar->tx_pending[queue].lock); |
1233 | skb_queue_splice_tail_init(list: &agg, head: &ar->tx_pending[queue]); |
1234 | spin_unlock_bh(lock: &ar->tx_pending[queue].lock); |
1235 | ar->tx_schedule = true; |
1236 | } |
1237 | if ((done_ampdus++ == 0) && (i++ == 0)) |
1238 | goto retry; |
1239 | |
1240 | rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); |
1241 | rcu_read_unlock(); |
1242 | } |
1243 | |
1244 | static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, |
1245 | struct sk_buff_head *queue) |
1246 | { |
1247 | struct sk_buff *skb; |
1248 | struct ieee80211_tx_info *info; |
1249 | struct carl9170_tx_info *arinfo; |
1250 | |
1251 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); |
1252 | |
1253 | spin_lock_bh(lock: &queue->lock); |
1254 | skb = skb_peek(list_: queue); |
1255 | if (unlikely(!skb)) |
1256 | goto err_unlock; |
1257 | |
1258 | if (carl9170_alloc_dev_space(ar, skb)) |
1259 | goto err_unlock; |
1260 | |
1261 | __skb_unlink(skb, list: queue); |
1262 | spin_unlock_bh(lock: &queue->lock); |
1263 | |
1264 | info = IEEE80211_SKB_CB(skb); |
1265 | arinfo = (void *) info->rate_driver_data; |
1266 | |
1267 | arinfo->timeout = jiffies; |
1268 | return skb; |
1269 | |
1270 | err_unlock: |
1271 | spin_unlock_bh(lock: &queue->lock); |
1272 | return NULL; |
1273 | } |
1274 | |
1275 | void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) |
1276 | { |
1277 | struct _carl9170_tx_superframe *super; |
1278 | uint8_t q = 0; |
1279 | |
1280 | ar->tx_dropped++; |
1281 | |
1282 | super = (void *)skb->data; |
1283 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q, |
1284 | ar9170_qmap(carl9170_get_queue(ar, skb))); |
1285 | __carl9170_tx_process_status(ar, cookie: super->s.cookie, info: q); |
1286 | } |
1287 | |
1288 | static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) |
1289 | { |
1290 | struct ieee80211_sta *sta; |
1291 | struct carl9170_sta_info *sta_info; |
1292 | struct ieee80211_tx_info *tx_info; |
1293 | |
1294 | rcu_read_lock(); |
1295 | sta = __carl9170_get_tx_sta(ar, skb); |
1296 | if (!sta) |
1297 | goto out_rcu; |
1298 | |
1299 | sta_info = (void *) sta->drv_priv; |
1300 | tx_info = IEEE80211_SKB_CB(skb); |
1301 | |
1302 | if (unlikely(sta_info->sleeping) && |
1303 | !(tx_info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | |
1304 | IEEE80211_TX_CTL_CLEAR_PS_FILT))) { |
1305 | rcu_read_unlock(); |
1306 | |
1307 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) |
1308 | atomic_dec(v: &ar->tx_ampdu_upload); |
1309 | |
1310 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
1311 | carl9170_release_dev_space(ar, skb); |
1312 | carl9170_tx_status(ar, skb, success: false); |
1313 | return true; |
1314 | } |
1315 | |
1316 | out_rcu: |
1317 | rcu_read_unlock(); |
1318 | return false; |
1319 | } |
1320 | |
1321 | static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb) |
1322 | { |
1323 | struct _carl9170_tx_superframe *super = (void *) skb->data; |
1324 | struct ieee80211_bar *bar = (void *) super->frame_data; |
1325 | |
1326 | if (unlikely(ieee80211_is_back_req(bar->frame_control)) && |
1327 | skb->len >= sizeof(struct ieee80211_bar)) { |
1328 | struct carl9170_bar_list_entry *entry; |
1329 | unsigned int queue = skb_get_queue_mapping(skb); |
1330 | |
1331 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
1332 | if (!WARN_ON_ONCE(!entry)) { |
1333 | entry->skb = skb; |
1334 | spin_lock_bh(lock: &ar->bar_list_lock[queue]); |
1335 | list_add_tail_rcu(new: &entry->list, head: &ar->bar_list[queue]); |
1336 | spin_unlock_bh(lock: &ar->bar_list_lock[queue]); |
1337 | } |
1338 | } |
1339 | } |
1340 | |
1341 | static void carl9170_tx(struct ar9170 *ar) |
1342 | { |
1343 | struct sk_buff *skb; |
1344 | unsigned int i, q; |
1345 | bool schedule_garbagecollector = false; |
1346 | |
1347 | ar->tx_schedule = false; |
1348 | |
1349 | if (unlikely(!IS_STARTED(ar))) |
1350 | return; |
1351 | |
1352 | carl9170_usb_handle_tx_err(ar); |
1353 | |
1354 | for (i = 0; i < ar->hw->queues; i++) { |
1355 | while (!skb_queue_empty(list: &ar->tx_pending[i])) { |
1356 | skb = carl9170_tx_pick_skb(ar, queue: &ar->tx_pending[i]); |
1357 | if (unlikely(!skb)) |
1358 | break; |
1359 | |
1360 | if (unlikely(carl9170_tx_ps_drop(ar, skb))) |
1361 | continue; |
1362 | |
1363 | carl9170_bar_check(ar, skb); |
1364 | |
1365 | atomic_inc(v: &ar->tx_total_pending); |
1366 | |
1367 | q = __carl9170_get_queue(ar, queue: i); |
1368 | /* |
1369 | * NB: tx_status[i] vs. tx_status[q], |
1370 | * TODO: Move into pick_skb or alloc_dev_space. |
1371 | */ |
1372 | skb_queue_tail(list: &ar->tx_status[q], newsk: skb); |
1373 | |
1374 | /* |
1375 | * increase ref count to "2". |
1376 | * Ref counting is the easiest way to solve the |
1377 | * race between the urb's completion routine: |
1378 | * carl9170_tx_callback |
1379 | * and wlan tx status functions: |
1380 | * carl9170_tx_status/janitor. |
1381 | */ |
1382 | carl9170_tx_get_skb(skb); |
1383 | |
1384 | carl9170_usb_tx(ar, skb); |
1385 | schedule_garbagecollector = true; |
1386 | } |
1387 | } |
1388 | |
1389 | if (!schedule_garbagecollector) |
1390 | return; |
1391 | |
1392 | ieee80211_queue_delayed_work(hw: ar->hw, dwork: &ar->tx_janitor, |
1393 | delay: msecs_to_jiffies(CARL9170_TX_TIMEOUT)); |
1394 | } |
1395 | |
1396 | static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, |
1397 | struct ieee80211_sta *sta, struct sk_buff *skb, |
1398 | struct ieee80211_tx_info *txinfo) |
1399 | { |
1400 | struct carl9170_sta_info *sta_info; |
1401 | struct carl9170_sta_tid *agg; |
1402 | struct sk_buff *iter; |
1403 | u16 tid, seq, qseq, off; |
1404 | bool run = false; |
1405 | |
1406 | tid = carl9170_get_tid(skb); |
1407 | seq = carl9170_get_seq(skb); |
1408 | sta_info = (void *) sta->drv_priv; |
1409 | |
1410 | rcu_read_lock(); |
1411 | agg = rcu_dereference(sta_info->agg[tid]); |
1412 | |
1413 | if (!agg) |
1414 | goto err_unlock_rcu; |
1415 | |
1416 | spin_lock_bh(lock: &agg->lock); |
1417 | if (unlikely(agg->state < CARL9170_TID_STATE_IDLE)) |
1418 | goto err_unlock; |
1419 | |
1420 | /* check if sequence is within the BA window */ |
1421 | if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq))) |
1422 | goto err_unlock; |
1423 | |
1424 | if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq))) |
1425 | goto err_unlock; |
1426 | |
1427 | off = SEQ_DIFF(seq, agg->bsn); |
1428 | if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap))) |
1429 | goto err_unlock; |
1430 | |
1431 | if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) { |
1432 | __skb_queue_tail(list: &agg->queue, newsk: skb); |
1433 | agg->hsn = seq; |
1434 | goto queued; |
1435 | } |
1436 | |
1437 | skb_queue_reverse_walk(&agg->queue, iter) { |
1438 | qseq = carl9170_get_seq(skb: iter); |
1439 | |
1440 | if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) { |
1441 | __skb_queue_after(list: &agg->queue, prev: iter, newsk: skb); |
1442 | goto queued; |
1443 | } |
1444 | } |
1445 | |
1446 | __skb_queue_head(list: &agg->queue, newsk: skb); |
1447 | queued: |
1448 | |
1449 | if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) { |
1450 | if (agg->snx == carl9170_get_seq(skb: skb_peek(list_: &agg->queue))) { |
1451 | agg->state = CARL9170_TID_STATE_XMIT; |
1452 | run = true; |
1453 | } |
1454 | } |
1455 | |
1456 | spin_unlock_bh(lock: &agg->lock); |
1457 | rcu_read_unlock(); |
1458 | |
1459 | return run; |
1460 | |
1461 | err_unlock: |
1462 | spin_unlock_bh(lock: &agg->lock); |
1463 | |
1464 | err_unlock_rcu: |
1465 | rcu_read_unlock(); |
1466 | txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU; |
1467 | carl9170_tx_status(ar, skb, success: false); |
1468 | ar->tx_dropped++; |
1469 | return false; |
1470 | } |
1471 | |
1472 | void carl9170_op_tx(struct ieee80211_hw *hw, |
1473 | struct ieee80211_tx_control *control, |
1474 | struct sk_buff *skb) |
1475 | { |
1476 | struct ar9170 *ar = hw->priv; |
1477 | struct ieee80211_tx_info *info; |
1478 | struct ieee80211_sta *sta = control->sta; |
1479 | struct ieee80211_vif *vif; |
1480 | bool run; |
1481 | |
1482 | if (unlikely(!IS_STARTED(ar))) |
1483 | goto err_free; |
1484 | |
1485 | info = IEEE80211_SKB_CB(skb); |
1486 | vif = info->control.vif; |
1487 | |
1488 | if (unlikely(carl9170_tx_prepare(ar, sta, skb))) |
1489 | goto err_free; |
1490 | |
1491 | carl9170_tx_accounting(ar, skb); |
1492 | /* |
1493 | * from now on, one has to use carl9170_tx_status to free |
1494 | * all ressouces which are associated with the frame. |
1495 | */ |
1496 | |
1497 | if (sta) { |
1498 | struct carl9170_sta_info *stai = (void *) sta->drv_priv; |
1499 | atomic_inc(v: &stai->pending_frames); |
1500 | } |
1501 | |
1502 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { |
1503 | /* to static code analyzers and reviewers: |
1504 | * mac80211 guarantees that a valid "sta" |
1505 | * reference is present, if a frame is to |
1506 | * be part of an ampdu. Hence any extra |
1507 | * sta == NULL checks are redundant in this |
1508 | * special case. |
1509 | */ |
1510 | run = carl9170_tx_ampdu_queue(ar, sta, skb, txinfo: info); |
1511 | if (run) |
1512 | carl9170_tx_ampdu(ar); |
1513 | |
1514 | } else { |
1515 | unsigned int queue = skb_get_queue_mapping(skb); |
1516 | |
1517 | carl9170_tx_get_rates(ar, vif, sta, skb); |
1518 | carl9170_tx_apply_rateset(ar, sinfo: info, skb); |
1519 | skb_queue_tail(list: &ar->tx_pending[queue], newsk: skb); |
1520 | } |
1521 | |
1522 | carl9170_tx(ar); |
1523 | return; |
1524 | |
1525 | err_free: |
1526 | ar->tx_dropped++; |
1527 | ieee80211_free_txskb(hw: ar->hw, skb); |
1528 | } |
1529 | |
1530 | void carl9170_tx_scheduler(struct ar9170 *ar) |
1531 | { |
1532 | |
1533 | if (ar->tx_ampdu_schedule) |
1534 | carl9170_tx_ampdu(ar); |
1535 | |
1536 | if (ar->tx_schedule) |
1537 | carl9170_tx(ar); |
1538 | } |
1539 | |
1540 | /* caller has to take rcu_read_lock */ |
1541 | static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) |
1542 | { |
1543 | struct carl9170_vif_info *cvif; |
1544 | int i = 1; |
1545 | |
1546 | /* The AR9170 hardware has no fancy beacon queue or some |
1547 | * other scheduling mechanism. So, the driver has to make |
1548 | * due by setting the two beacon timers (pretbtt and tbtt) |
1549 | * once and then swapping the beacon address in the HW's |
1550 | * register file each time the pretbtt fires. |
1551 | */ |
1552 | |
1553 | cvif = rcu_dereference(ar->beacon_iter); |
1554 | if (ar->vifs > 0 && cvif) { |
1555 | do { |
1556 | list_for_each_entry_continue_rcu(cvif, &ar->vif_list, |
1557 | list) { |
1558 | if (cvif->active && cvif->enable_beacon) |
1559 | goto out; |
1560 | } |
1561 | } while (ar->beacon_enabled && i--); |
1562 | |
1563 | /* no entry found in list */ |
1564 | return NULL; |
1565 | } |
1566 | |
1567 | out: |
1568 | RCU_INIT_POINTER(ar->beacon_iter, cvif); |
1569 | return cvif; |
1570 | } |
1571 | |
1572 | static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb, |
1573 | u32 *ht1, u32 *plcp) |
1574 | { |
1575 | struct ieee80211_tx_info *txinfo; |
1576 | struct ieee80211_tx_rate *rate; |
1577 | unsigned int power, chains; |
1578 | bool ht_rate; |
1579 | |
1580 | txinfo = IEEE80211_SKB_CB(skb); |
1581 | rate = &txinfo->control.rates[0]; |
1582 | ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS); |
1583 | carl9170_tx_rate_tpc_chains(ar, info: txinfo, txrate: rate, phyrate: plcp, tpc: &power, chains: &chains); |
1584 | |
1585 | *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0; |
1586 | if (chains == AR9170_TX_PHY_TXCHAIN_2) |
1587 | *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1; |
1588 | SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7); |
1589 | SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power); |
1590 | SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains); |
1591 | |
1592 | if (ht_rate) { |
1593 | *ht1 |= AR9170_MAC_BCN_HT1_HT_EN; |
1594 | if (rate->flags & IEEE80211_TX_RC_SHORT_GI) |
1595 | *plcp |= AR9170_MAC_BCN_HT2_SGI; |
1596 | |
1597 | if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { |
1598 | *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED; |
1599 | *plcp |= AR9170_MAC_BCN_HT2_BW40; |
1600 | } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) { |
1601 | *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP; |
1602 | *plcp |= AR9170_MAC_BCN_HT2_BW40; |
1603 | } |
1604 | |
1605 | SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN); |
1606 | } else { |
1607 | if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M) |
1608 | *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400; |
1609 | else |
1610 | *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010; |
1611 | } |
1612 | |
1613 | return ht_rate; |
1614 | } |
1615 | |
1616 | int carl9170_update_beacon(struct ar9170 *ar, const bool submit) |
1617 | { |
1618 | struct sk_buff *skb = NULL; |
1619 | struct carl9170_vif_info *cvif; |
1620 | __le32 *data, *old = NULL; |
1621 | u32 word, ht1, plcp, off, addr, len; |
1622 | int i = 0, err = 0; |
1623 | bool ht_rate; |
1624 | |
1625 | rcu_read_lock(); |
1626 | cvif = carl9170_pick_beaconing_vif(ar); |
1627 | if (!cvif) |
1628 | goto out_unlock; |
1629 | |
1630 | skb = ieee80211_beacon_get_tim(hw: ar->hw, vif: carl9170_get_vif(priv: cvif), |
1631 | NULL, NULL, link_id: 0); |
1632 | |
1633 | if (!skb) { |
1634 | err = -ENOMEM; |
1635 | goto err_free; |
1636 | } |
1637 | |
1638 | spin_lock_bh(lock: &ar->beacon_lock); |
1639 | data = (__le32 *)skb->data; |
1640 | if (cvif->beacon) |
1641 | old = (__le32 *)cvif->beacon->data; |
1642 | |
1643 | off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX; |
1644 | addr = ar->fw.beacon_addr + off; |
1645 | len = roundup(skb->len + FCS_LEN, 4); |
1646 | |
1647 | if ((off + len) > ar->fw.beacon_max_len) { |
1648 | if (net_ratelimit()) { |
1649 | wiphy_err(ar->hw->wiphy, "beacon does not " |
1650 | "fit into device memory!\n" ); |
1651 | } |
1652 | err = -EINVAL; |
1653 | goto err_unlock; |
1654 | } |
1655 | |
1656 | if (len > AR9170_MAC_BCN_LENGTH_MAX) { |
1657 | if (net_ratelimit()) { |
1658 | wiphy_err(ar->hw->wiphy, "no support for beacons " |
1659 | "bigger than %d (yours:%d).\n" , |
1660 | AR9170_MAC_BCN_LENGTH_MAX, len); |
1661 | } |
1662 | |
1663 | err = -EMSGSIZE; |
1664 | goto err_unlock; |
1665 | } |
1666 | |
1667 | ht_rate = carl9170_tx_beacon_physet(ar, skb, ht1: &ht1, plcp: &plcp); |
1668 | |
1669 | carl9170_async_regwrite_begin(ar); |
1670 | carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1); |
1671 | if (ht_rate) |
1672 | carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp); |
1673 | else |
1674 | carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp); |
1675 | |
1676 | for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { |
1677 | /* |
1678 | * XXX: This accesses beyond skb data for up |
1679 | * to the last 3 bytes!! |
1680 | */ |
1681 | |
1682 | if (old && (data[i] == old[i])) |
1683 | continue; |
1684 | |
1685 | word = le32_to_cpu(data[i]); |
1686 | carl9170_async_regwrite(addr + 4 * i, word); |
1687 | } |
1688 | carl9170_async_regwrite_finish(); |
1689 | |
1690 | dev_kfree_skb_any(skb: cvif->beacon); |
1691 | cvif->beacon = NULL; |
1692 | |
1693 | err = carl9170_async_regwrite_result(); |
1694 | if (!err) |
1695 | cvif->beacon = skb; |
1696 | spin_unlock_bh(lock: &ar->beacon_lock); |
1697 | if (err) |
1698 | goto err_free; |
1699 | |
1700 | if (submit) { |
1701 | err = carl9170_bcn_ctrl(ar, vif_id: cvif->id, |
1702 | CARL9170_BCN_CTRL_CAB_TRIGGER, |
1703 | addr, len: skb->len + FCS_LEN); |
1704 | |
1705 | if (err) |
1706 | goto err_free; |
1707 | } |
1708 | out_unlock: |
1709 | rcu_read_unlock(); |
1710 | return 0; |
1711 | |
1712 | err_unlock: |
1713 | spin_unlock_bh(lock: &ar->beacon_lock); |
1714 | |
1715 | err_free: |
1716 | rcu_read_unlock(); |
1717 | dev_kfree_skb_any(skb); |
1718 | return err; |
1719 | } |
1720 | |