1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 */
8
9#include "mac.h"
10
11#include <net/cfg80211.h>
12#include <net/mac80211.h>
13#include <linux/etherdevice.h>
14#include <linux/acpi.h>
15#include <linux/of.h>
16#include <linux/bitfield.h>
17
18#include "hif.h"
19#include "core.h"
20#include "debug.h"
21#include "wmi.h"
22#include "htt.h"
23#include "txrx.h"
24#include "testmode.h"
25#include "wmi-tlv.h"
26#include "wmi-ops.h"
27#include "wow.h"
28
29/*********/
30/* Rates */
31/*********/
32
33static struct ieee80211_rate ath10k_rates[] = {
34 { .bitrate = 10,
35 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
36 { .bitrate = 20,
37 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
38 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
39 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
40 { .bitrate = 55,
41 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
42 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
43 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
44 { .bitrate = 110,
45 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
46 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
47 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
48
49 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
50 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
51 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
52 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
53 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
54 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
55 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
56 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
57};
58
59static struct ieee80211_rate ath10k_rates_rev2[] = {
60 { .bitrate = 10,
61 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
62 { .bitrate = 20,
63 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
64 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
65 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
66 { .bitrate = 55,
67 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
68 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
69 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
70 { .bitrate = 110,
71 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
72 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
73 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
74
75 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
76 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
77 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
78 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
79 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
80 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
81 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
82 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
83};
84
85static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = {
86 {.start_freq = 2402, .end_freq = 2494 },
87 {.start_freq = 5170, .end_freq = 5875 },
88};
89
90static const struct cfg80211_sar_capa ath10k_sar_capa = {
91 .type = NL80211_SAR_TYPE_POWER,
92 .num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)),
93 .freq_ranges = &ath10k_sar_freq_ranges[0],
94};
95
96#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
97
98#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
99#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
100 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
101#define ath10k_g_rates (ath10k_rates + 0)
102#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
103
104#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
105#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
106
107#define ath10k_wmi_legacy_rates ath10k_rates
108
109static bool ath10k_mac_bitrate_is_cck(int bitrate)
110{
111 switch (bitrate) {
112 case 10:
113 case 20:
114 case 55:
115 case 110:
116 return true;
117 }
118
119 return false;
120}
121
122static u8 ath10k_mac_bitrate_to_rate(int bitrate)
123{
124 return DIV_ROUND_UP(bitrate, 5) |
125 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
126}
127
128u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
129 u8 hw_rate, bool cck)
130{
131 const struct ieee80211_rate *rate;
132 int i;
133
134 for (i = 0; i < sband->n_bitrates; i++) {
135 rate = &sband->bitrates[i];
136
137 if (ath10k_mac_bitrate_is_cck(bitrate: rate->bitrate) != cck)
138 continue;
139
140 if (rate->hw_value == hw_rate)
141 return i;
142 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
143 rate->hw_value_short == hw_rate)
144 return i;
145 }
146
147 return 0;
148}
149
150u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
151 u32 bitrate)
152{
153 int i;
154
155 for (i = 0; i < sband->n_bitrates; i++)
156 if (sband->bitrates[i].bitrate == bitrate)
157 return i;
158
159 return 0;
160}
161
162static int ath10k_mac_get_rate_hw_value(int bitrate)
163{
164 int i;
165 u8 hw_value_prefix = 0;
166
167 if (ath10k_mac_bitrate_is_cck(bitrate))
168 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
169
170 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
171 if (ath10k_rates[i].bitrate == bitrate)
172 return hw_value_prefix | ath10k_rates[i].hw_value;
173 }
174
175 return -EINVAL;
176}
177
178static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
179{
180 switch ((mcs_map >> (2 * nss)) & 0x3) {
181 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
182 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
183 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
184 }
185 return 0;
186}
187
188static u32
189ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
190{
191 int nss;
192
193 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
194 if (ht_mcs_mask[nss])
195 return nss + 1;
196
197 return 1;
198}
199
200static u32
201ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
202{
203 int nss;
204
205 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
206 if (vht_mcs_mask[nss])
207 return nss + 1;
208
209 return 1;
210}
211
212int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
213{
214 enum wmi_host_platform_type platform_type;
215 int ret;
216
217 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
218 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
219 else
220 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
221
222 ret = ath10k_wmi_ext_resource_config(ar, type: platform_type, fw_feature_bitmap: val);
223
224 if (ret && ret != -EOPNOTSUPP) {
225 ath10k_warn(ar, fmt: "failed to configure ext resource: %d\n", ret);
226 return ret;
227 }
228
229 return 0;
230}
231
232/**********/
233/* Crypto */
234/**********/
235
236static int ath10k_send_key(struct ath10k_vif *arvif,
237 struct ieee80211_key_conf *key,
238 enum set_key_cmd cmd,
239 const u8 *macaddr, u32 flags)
240{
241 struct ath10k *ar = arvif->ar;
242 struct wmi_vdev_install_key_arg arg = {
243 .vdev_id = arvif->vdev_id,
244 .key_idx = key->keyidx,
245 .key_len = key->keylen,
246 .key_data = key->key,
247 .key_flags = flags,
248 .macaddr = macaddr,
249 };
250
251 lockdep_assert_held(&arvif->ar->conf_mutex);
252
253 switch (key->cipher) {
254 case WLAN_CIPHER_SUITE_CCMP:
255 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
256 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
257 break;
258 case WLAN_CIPHER_SUITE_TKIP:
259 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP];
260 arg.key_txmic_len = 8;
261 arg.key_rxmic_len = 8;
262 break;
263 case WLAN_CIPHER_SUITE_WEP40:
264 case WLAN_CIPHER_SUITE_WEP104:
265 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP];
266 break;
267 case WLAN_CIPHER_SUITE_CCMP_256:
268 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
269 break;
270 case WLAN_CIPHER_SUITE_GCMP:
271 case WLAN_CIPHER_SUITE_GCMP_256:
272 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
273 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
274 break;
275 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
276 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
277 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
278 case WLAN_CIPHER_SUITE_AES_CMAC:
279 WARN_ON(1);
280 return -EINVAL;
281 default:
282 ath10k_warn(ar, fmt: "cipher %d is not supported\n", key->cipher);
283 return -EOPNOTSUPP;
284 }
285
286 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
287 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
288
289 if (cmd == DISABLE_KEY) {
290 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
291 arg.key_data = NULL;
292 }
293
294 return ath10k_wmi_vdev_install_key(ar: arvif->ar, arg: &arg);
295}
296
297static int ath10k_install_key(struct ath10k_vif *arvif,
298 struct ieee80211_key_conf *key,
299 enum set_key_cmd cmd,
300 const u8 *macaddr, u32 flags)
301{
302 struct ath10k *ar = arvif->ar;
303 int ret;
304 unsigned long time_left;
305
306 lockdep_assert_held(&ar->conf_mutex);
307
308 reinit_completion(x: &ar->install_key_done);
309
310 if (arvif->nohwcrypt)
311 return 1;
312
313 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
314 if (ret)
315 return ret;
316
317 time_left = wait_for_completion_timeout(x: &ar->install_key_done, timeout: 3 * HZ);
318 if (time_left == 0)
319 return -ETIMEDOUT;
320
321 return 0;
322}
323
324static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
325 const u8 *addr)
326{
327 struct ath10k *ar = arvif->ar;
328 struct ath10k_peer *peer;
329 int ret;
330 int i;
331 u32 flags;
332
333 lockdep_assert_held(&ar->conf_mutex);
334
335 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
336 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
337 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
338 return -EINVAL;
339
340 spin_lock_bh(lock: &ar->data_lock);
341 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr);
342 spin_unlock_bh(lock: &ar->data_lock);
343
344 if (!peer)
345 return -ENOENT;
346
347 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
348 if (arvif->wep_keys[i] == NULL)
349 continue;
350
351 switch (arvif->vif->type) {
352 case NL80211_IFTYPE_AP:
353 flags = WMI_KEY_PAIRWISE;
354
355 if (arvif->def_wep_key_idx == i)
356 flags |= WMI_KEY_TX_USAGE;
357
358 ret = ath10k_install_key(arvif, key: arvif->wep_keys[i],
359 cmd: SET_KEY, macaddr: addr, flags);
360 if (ret < 0)
361 return ret;
362 break;
363 case NL80211_IFTYPE_ADHOC:
364 ret = ath10k_install_key(arvif, key: arvif->wep_keys[i],
365 cmd: SET_KEY, macaddr: addr,
366 WMI_KEY_PAIRWISE);
367 if (ret < 0)
368 return ret;
369
370 ret = ath10k_install_key(arvif, key: arvif->wep_keys[i],
371 cmd: SET_KEY, macaddr: addr, WMI_KEY_GROUP);
372 if (ret < 0)
373 return ret;
374 break;
375 default:
376 WARN_ON(1);
377 return -EINVAL;
378 }
379
380 spin_lock_bh(lock: &ar->data_lock);
381 peer->keys[i] = arvif->wep_keys[i];
382 spin_unlock_bh(lock: &ar->data_lock);
383 }
384
385 /* In some cases (notably with static WEP IBSS with multiple keys)
386 * multicast Tx becomes broken. Both pairwise and groupwise keys are
387 * installed already. Using WMI_KEY_TX_USAGE in different combinations
388 * didn't seem help. Using def_keyid vdev parameter seems to be
389 * effective so use that.
390 *
391 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
392 */
393 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
394 return 0;
395
396 if (arvif->def_wep_key_idx == -1)
397 return 0;
398
399 ret = ath10k_wmi_vdev_set_param(ar: arvif->ar,
400 vdev_id: arvif->vdev_id,
401 param_id: arvif->ar->wmi.vdev_param->def_keyid,
402 param_value: arvif->def_wep_key_idx);
403 if (ret) {
404 ath10k_warn(ar, fmt: "failed to re-set def wpa key idxon vdev %i: %d\n",
405 arvif->vdev_id, ret);
406 return ret;
407 }
408
409 return 0;
410}
411
412static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
413 const u8 *addr)
414{
415 struct ath10k *ar = arvif->ar;
416 struct ath10k_peer *peer;
417 int first_errno = 0;
418 int ret;
419 int i;
420 u32 flags = 0;
421
422 lockdep_assert_held(&ar->conf_mutex);
423
424 spin_lock_bh(lock: &ar->data_lock);
425 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr);
426 spin_unlock_bh(lock: &ar->data_lock);
427
428 if (!peer)
429 return -ENOENT;
430
431 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
432 if (peer->keys[i] == NULL)
433 continue;
434
435 /* key flags are not required to delete the key */
436 ret = ath10k_install_key(arvif, key: peer->keys[i],
437 cmd: DISABLE_KEY, macaddr: addr, flags);
438 if (ret < 0 && first_errno == 0)
439 first_errno = ret;
440
441 if (ret < 0)
442 ath10k_warn(ar, fmt: "failed to remove peer wep key %d: %d\n",
443 i, ret);
444
445 spin_lock_bh(lock: &ar->data_lock);
446 peer->keys[i] = NULL;
447 spin_unlock_bh(lock: &ar->data_lock);
448 }
449
450 return first_errno;
451}
452
453bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
454 u8 keyidx)
455{
456 struct ath10k_peer *peer;
457 int i;
458
459 lockdep_assert_held(&ar->data_lock);
460
461 /* We don't know which vdev this peer belongs to,
462 * since WMI doesn't give us that information.
463 *
464 * FIXME: multi-bss needs to be handled.
465 */
466 peer = ath10k_peer_find(ar, vdev_id: 0, addr);
467 if (!peer)
468 return false;
469
470 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
471 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
472 return true;
473 }
474
475 return false;
476}
477
478static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
479 struct ieee80211_key_conf *key)
480{
481 struct ath10k *ar = arvif->ar;
482 struct ath10k_peer *peer;
483 u8 addr[ETH_ALEN];
484 int first_errno = 0;
485 int ret;
486 int i;
487 u32 flags = 0;
488
489 lockdep_assert_held(&ar->conf_mutex);
490
491 for (;;) {
492 /* since ath10k_install_key we can't hold data_lock all the
493 * time, so we try to remove the keys incrementally
494 */
495 spin_lock_bh(lock: &ar->data_lock);
496 i = 0;
497 list_for_each_entry(peer, &ar->peers, list) {
498 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
499 if (peer->keys[i] == key) {
500 ether_addr_copy(dst: addr, src: peer->addr);
501 peer->keys[i] = NULL;
502 break;
503 }
504 }
505
506 if (i < ARRAY_SIZE(peer->keys))
507 break;
508 }
509 spin_unlock_bh(lock: &ar->data_lock);
510
511 if (i == ARRAY_SIZE(peer->keys))
512 break;
513 /* key flags are not required to delete the key */
514 ret = ath10k_install_key(arvif, key, cmd: DISABLE_KEY, macaddr: addr, flags);
515 if (ret < 0 && first_errno == 0)
516 first_errno = ret;
517
518 if (ret)
519 ath10k_warn(ar, fmt: "failed to remove key for %pM: %d\n",
520 addr, ret);
521 }
522
523 return first_errno;
524}
525
526static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
527 struct ieee80211_key_conf *key)
528{
529 struct ath10k *ar = arvif->ar;
530 struct ath10k_peer *peer;
531 int ret;
532
533 lockdep_assert_held(&ar->conf_mutex);
534
535 list_for_each_entry(peer, &ar->peers, list) {
536 if (ether_addr_equal(addr1: peer->addr, addr2: arvif->vif->addr))
537 continue;
538
539 if (ether_addr_equal(addr1: peer->addr, addr2: arvif->bssid))
540 continue;
541
542 if (peer->keys[key->keyidx] == key)
543 continue;
544
545 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
546 arvif->vdev_id, key->keyidx);
547
548 ret = ath10k_install_peer_wep_keys(arvif, addr: peer->addr);
549 if (ret) {
550 ath10k_warn(ar, fmt: "failed to update wep keys on vdev %i for peer %pM: %d\n",
551 arvif->vdev_id, peer->addr, ret);
552 return ret;
553 }
554 }
555
556 return 0;
557}
558
559/*********************/
560/* General utilities */
561/*********************/
562
563static inline enum wmi_phy_mode
564chan_to_phymode(const struct cfg80211_chan_def *chandef)
565{
566 enum wmi_phy_mode phymode = MODE_UNKNOWN;
567
568 switch (chandef->chan->band) {
569 case NL80211_BAND_2GHZ:
570 switch (chandef->width) {
571 case NL80211_CHAN_WIDTH_20_NOHT:
572 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
573 phymode = MODE_11B;
574 else
575 phymode = MODE_11G;
576 break;
577 case NL80211_CHAN_WIDTH_20:
578 phymode = MODE_11NG_HT20;
579 break;
580 case NL80211_CHAN_WIDTH_40:
581 phymode = MODE_11NG_HT40;
582 break;
583 default:
584 phymode = MODE_UNKNOWN;
585 break;
586 }
587 break;
588 case NL80211_BAND_5GHZ:
589 switch (chandef->width) {
590 case NL80211_CHAN_WIDTH_20_NOHT:
591 phymode = MODE_11A;
592 break;
593 case NL80211_CHAN_WIDTH_20:
594 phymode = MODE_11NA_HT20;
595 break;
596 case NL80211_CHAN_WIDTH_40:
597 phymode = MODE_11NA_HT40;
598 break;
599 case NL80211_CHAN_WIDTH_80:
600 phymode = MODE_11AC_VHT80;
601 break;
602 case NL80211_CHAN_WIDTH_160:
603 phymode = MODE_11AC_VHT160;
604 break;
605 case NL80211_CHAN_WIDTH_80P80:
606 phymode = MODE_11AC_VHT80_80;
607 break;
608 default:
609 phymode = MODE_UNKNOWN;
610 break;
611 }
612 break;
613 default:
614 break;
615 }
616
617 WARN_ON(phymode == MODE_UNKNOWN);
618 return phymode;
619}
620
621static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
622{
623/*
624 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
625 * 0 for no restriction
626 * 1 for 1/4 us
627 * 2 for 1/2 us
628 * 3 for 1 us
629 * 4 for 2 us
630 * 5 for 4 us
631 * 6 for 8 us
632 * 7 for 16 us
633 */
634 switch (mpdudensity) {
635 case 0:
636 return 0;
637 case 1:
638 case 2:
639 case 3:
640 /* Our lower layer calculations limit our precision to
641 * 1 microsecond
642 */
643 return 1;
644 case 4:
645 return 2;
646 case 5:
647 return 4;
648 case 6:
649 return 8;
650 case 7:
651 return 16;
652 default:
653 return 0;
654 }
655}
656
657int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
658 struct cfg80211_chan_def *def)
659{
660 struct ieee80211_chanctx_conf *conf;
661
662 rcu_read_lock();
663 conf = rcu_dereference(vif->bss_conf.chanctx_conf);
664 if (!conf) {
665 rcu_read_unlock();
666 return -ENOENT;
667 }
668
669 *def = conf->def;
670 rcu_read_unlock();
671
672 return 0;
673}
674
675static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
676 struct ieee80211_chanctx_conf *conf,
677 void *data)
678{
679 int *num = data;
680
681 (*num)++;
682}
683
684static int ath10k_mac_num_chanctxs(struct ath10k *ar)
685{
686 int num = 0;
687
688 ieee80211_iter_chan_contexts_atomic(hw: ar->hw,
689 iter: ath10k_mac_num_chanctxs_iter,
690 iter_data: &num);
691
692 return num;
693}
694
695static void
696ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
697 struct ieee80211_chanctx_conf *conf,
698 void *data)
699{
700 struct cfg80211_chan_def **def = data;
701
702 *def = &conf->def;
703}
704
705static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id,
706 const u8 *addr)
707{
708 unsigned long time_left;
709 int ret;
710
711 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
712 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
713 if (ret) {
714 ath10k_warn(ar, fmt: "failed wait for peer deleted");
715 return;
716 }
717
718 time_left = wait_for_completion_timeout(x: &ar->peer_delete_done,
719 timeout: 5 * HZ);
720 if (!time_left)
721 ath10k_warn(ar, fmt: "Timeout in receiving peer delete response\n");
722 }
723}
724
725static int ath10k_peer_create(struct ath10k *ar,
726 struct ieee80211_vif *vif,
727 struct ieee80211_sta *sta,
728 u32 vdev_id,
729 const u8 *addr,
730 enum wmi_peer_type peer_type)
731{
732 struct ath10k_peer *peer;
733 int ret;
734
735 lockdep_assert_held(&ar->conf_mutex);
736
737 /* Each vdev consumes a peer entry as well. */
738 if (ar->num_peers + list_count_nodes(head: &ar->arvifs) >= ar->max_num_peers)
739 return -ENOBUFS;
740
741 ret = ath10k_wmi_peer_create(ar, vdev_id, peer_addr: addr, peer_type);
742 if (ret) {
743 ath10k_warn(ar, fmt: "failed to create wmi peer %pM on vdev %i: %i\n",
744 addr, vdev_id, ret);
745 return ret;
746 }
747
748 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
749 if (ret) {
750 ath10k_warn(ar, fmt: "failed to wait for created wmi peer %pM on vdev %i: %i\n",
751 addr, vdev_id, ret);
752 return ret;
753 }
754
755 spin_lock_bh(lock: &ar->data_lock);
756
757 peer = ath10k_peer_find(ar, vdev_id, addr);
758 if (!peer) {
759 spin_unlock_bh(lock: &ar->data_lock);
760 ath10k_warn(ar, fmt: "failed to find peer %pM on vdev %i after creation\n",
761 addr, vdev_id);
762 ath10k_wait_for_peer_delete_done(ar, vdev_id, addr);
763 return -ENOENT;
764 }
765
766 peer->vif = vif;
767 peer->sta = sta;
768
769 spin_unlock_bh(lock: &ar->data_lock);
770
771 ar->num_peers++;
772
773 return 0;
774}
775
776static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
777{
778 struct ath10k *ar = arvif->ar;
779 u32 param;
780 int ret;
781
782 param = ar->wmi.pdev_param->sta_kickout_th;
783 ret = ath10k_wmi_pdev_set_param(ar, id: param,
784 ATH10K_KICKOUT_THRESHOLD);
785 if (ret) {
786 ath10k_warn(ar, fmt: "failed to set kickout threshold on vdev %i: %d\n",
787 arvif->vdev_id, ret);
788 return ret;
789 }
790
791 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
792 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: param,
793 ATH10K_KEEPALIVE_MIN_IDLE);
794 if (ret) {
795 ath10k_warn(ar, fmt: "failed to set keepalive minimum idle time on vdev %i: %d\n",
796 arvif->vdev_id, ret);
797 return ret;
798 }
799
800 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
801 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: param,
802 ATH10K_KEEPALIVE_MAX_IDLE);
803 if (ret) {
804 ath10k_warn(ar, fmt: "failed to set keepalive maximum idle time on vdev %i: %d\n",
805 arvif->vdev_id, ret);
806 return ret;
807 }
808
809 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
810 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: param,
811 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
812 if (ret) {
813 ath10k_warn(ar, fmt: "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
814 arvif->vdev_id, ret);
815 return ret;
816 }
817
818 return 0;
819}
820
821static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
822{
823 struct ath10k *ar = arvif->ar;
824 u32 vdev_param;
825
826 vdev_param = ar->wmi.vdev_param->rts_threshold;
827 return ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param, param_value: value);
828}
829
830static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
831{
832 int ret;
833
834 lockdep_assert_held(&ar->conf_mutex);
835
836 ret = ath10k_wmi_peer_delete(ar, vdev_id, peer_addr: addr);
837 if (ret)
838 return ret;
839
840 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
841 if (ret)
842 return ret;
843
844 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
845 unsigned long time_left;
846
847 time_left = wait_for_completion_timeout
848 (x: &ar->peer_delete_done, timeout: 5 * HZ);
849
850 if (!time_left) {
851 ath10k_warn(ar, fmt: "Timeout in receiving peer delete response\n");
852 return -ETIMEDOUT;
853 }
854 }
855
856 ar->num_peers--;
857
858 return 0;
859}
860
861static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
862{
863 int peer_id, i;
864
865 lockdep_assert_held(&ar->conf_mutex);
866
867 for_each_set_bit(peer_id, peer->peer_ids,
868 ATH10K_MAX_NUM_PEER_IDS) {
869 ar->peer_map[peer_id] = NULL;
870 }
871
872 /* Double check that peer is properly un-referenced from
873 * the peer_map
874 */
875 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
876 if (ar->peer_map[i] == peer) {
877 ath10k_warn(ar, fmt: "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
878 peer->addr, peer, i);
879 ar->peer_map[i] = NULL;
880 }
881 }
882
883 list_del(entry: &peer->list);
884 kfree(objp: peer);
885 ar->num_peers--;
886}
887
888static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
889{
890 struct ath10k_peer *peer, *tmp;
891
892 lockdep_assert_held(&ar->conf_mutex);
893
894 spin_lock_bh(lock: &ar->data_lock);
895 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
896 if (peer->vdev_id != vdev_id)
897 continue;
898
899 ath10k_warn(ar, fmt: "removing stale peer %pM from vdev_id %d\n",
900 peer->addr, vdev_id);
901
902 ath10k_peer_map_cleanup(ar, peer);
903 }
904 spin_unlock_bh(lock: &ar->data_lock);
905}
906
907static void ath10k_peer_cleanup_all(struct ath10k *ar)
908{
909 struct ath10k_peer *peer, *tmp;
910 int i;
911
912 lockdep_assert_held(&ar->conf_mutex);
913
914 spin_lock_bh(lock: &ar->data_lock);
915 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
916 list_del(entry: &peer->list);
917 kfree(objp: peer);
918 }
919
920 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
921 ar->peer_map[i] = NULL;
922
923 spin_unlock_bh(lock: &ar->data_lock);
924
925 ar->num_peers = 0;
926 ar->num_stations = 0;
927}
928
929static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
930 struct ieee80211_sta *sta,
931 enum wmi_tdls_peer_state state)
932{
933 int ret;
934 struct wmi_tdls_peer_update_cmd_arg arg = {};
935 struct wmi_tdls_peer_capab_arg cap = {};
936 struct wmi_channel_arg chan_arg = {};
937
938 lockdep_assert_held(&ar->conf_mutex);
939
940 arg.vdev_id = vdev_id;
941 arg.peer_state = state;
942 ether_addr_copy(dst: arg.addr, src: sta->addr);
943
944 cap.peer_max_sp = sta->max_sp;
945 cap.peer_uapsd_queues = sta->uapsd_queues;
946
947 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
948 !sta->tdls_initiator)
949 cap.is_peer_responder = 1;
950
951 ret = ath10k_wmi_tdls_peer_update(ar, arg: &arg, cap: &cap, chan: &chan_arg);
952 if (ret) {
953 ath10k_warn(ar, fmt: "failed to update tdls peer %pM on vdev %i: %i\n",
954 arg.addr, vdev_id, ret);
955 return ret;
956 }
957
958 return 0;
959}
960
961/************************/
962/* Interface management */
963/************************/
964
965void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
966{
967 struct ath10k *ar = arvif->ar;
968
969 lockdep_assert_held(&ar->data_lock);
970
971 if (!arvif->beacon)
972 return;
973
974 if (!arvif->beacon_buf)
975 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
976 arvif->beacon->len, DMA_TO_DEVICE);
977
978 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
979 arvif->beacon_state != ATH10K_BEACON_SENT))
980 return;
981
982 dev_kfree_skb_any(skb: arvif->beacon);
983
984 arvif->beacon = NULL;
985 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
986}
987
988static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
989{
990 struct ath10k *ar = arvif->ar;
991
992 lockdep_assert_held(&ar->data_lock);
993
994 ath10k_mac_vif_beacon_free(arvif);
995
996 if (arvif->beacon_buf) {
997 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
998 kfree(objp: arvif->beacon_buf);
999 else
1000 dma_free_coherent(dev: ar->dev, IEEE80211_MAX_FRAME_LEN,
1001 cpu_addr: arvif->beacon_buf,
1002 dma_handle: arvif->beacon_paddr);
1003 arvif->beacon_buf = NULL;
1004 }
1005}
1006
1007static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
1008{
1009 unsigned long time_left;
1010
1011 lockdep_assert_held(&ar->conf_mutex);
1012
1013 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1014 return -ESHUTDOWN;
1015
1016 time_left = wait_for_completion_timeout(x: &ar->vdev_setup_done,
1017 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
1018 if (time_left == 0)
1019 return -ETIMEDOUT;
1020
1021 return ar->last_wmi_vdev_start_status;
1022}
1023
1024static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
1025{
1026 struct cfg80211_chan_def *chandef = NULL;
1027 struct ieee80211_channel *channel = NULL;
1028 struct wmi_vdev_start_request_arg arg = {};
1029 int ret = 0;
1030
1031 lockdep_assert_held(&ar->conf_mutex);
1032
1033 ieee80211_iter_chan_contexts_atomic(hw: ar->hw,
1034 iter: ath10k_mac_get_any_chandef_iter,
1035 iter_data: &chandef);
1036 if (WARN_ON_ONCE(!chandef))
1037 return -ENOENT;
1038
1039 channel = chandef->chan;
1040
1041 arg.vdev_id = vdev_id;
1042 arg.channel.freq = channel->center_freq;
1043 arg.channel.band_center_freq1 = chandef->center_freq1;
1044 arg.channel.band_center_freq2 = chandef->center_freq2;
1045
1046 /* TODO setup this dynamically, what in case we
1047 * don't have any vifs?
1048 */
1049 arg.channel.mode = chan_to_phymode(chandef);
1050 arg.channel.chan_radar =
1051 !!(channel->flags & IEEE80211_CHAN_RADAR);
1052
1053 arg.channel.min_power = 0;
1054 arg.channel.max_power = channel->max_power * 2;
1055 arg.channel.max_reg_power = channel->max_reg_power * 2;
1056 arg.channel.max_antenna_gain = channel->max_antenna_gain;
1057
1058 reinit_completion(x: &ar->vdev_setup_done);
1059 reinit_completion(x: &ar->vdev_delete_done);
1060
1061 ret = ath10k_wmi_vdev_start(ar, arg: &arg);
1062 if (ret) {
1063 ath10k_warn(ar, fmt: "failed to request monitor vdev %i start: %d\n",
1064 vdev_id, ret);
1065 return ret;
1066 }
1067
1068 ret = ath10k_vdev_setup_sync(ar);
1069 if (ret) {
1070 ath10k_warn(ar, fmt: "failed to synchronize setup for monitor vdev %i start: %d\n",
1071 vdev_id, ret);
1072 return ret;
1073 }
1074
1075 ret = ath10k_wmi_vdev_up(ar, vdev_id, aid: 0, bssid: ar->mac_addr);
1076 if (ret) {
1077 ath10k_warn(ar, fmt: "failed to put up monitor vdev %i: %d\n",
1078 vdev_id, ret);
1079 goto vdev_stop;
1080 }
1081
1082 ar->monitor_vdev_id = vdev_id;
1083
1084 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1085 ar->monitor_vdev_id);
1086 return 0;
1087
1088vdev_stop:
1089 ret = ath10k_wmi_vdev_stop(ar, vdev_id: ar->monitor_vdev_id);
1090 if (ret)
1091 ath10k_warn(ar, fmt: "failed to stop monitor vdev %i after start failure: %d\n",
1092 ar->monitor_vdev_id, ret);
1093
1094 return ret;
1095}
1096
1097static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1098{
1099 int ret = 0;
1100
1101 lockdep_assert_held(&ar->conf_mutex);
1102
1103 ret = ath10k_wmi_vdev_down(ar, vdev_id: ar->monitor_vdev_id);
1104 if (ret)
1105 ath10k_warn(ar, fmt: "failed to put down monitor vdev %i: %d\n",
1106 ar->monitor_vdev_id, ret);
1107
1108 reinit_completion(x: &ar->vdev_setup_done);
1109 reinit_completion(x: &ar->vdev_delete_done);
1110
1111 ret = ath10k_wmi_vdev_stop(ar, vdev_id: ar->monitor_vdev_id);
1112 if (ret)
1113 ath10k_warn(ar, fmt: "failed to request monitor vdev %i stop: %d\n",
1114 ar->monitor_vdev_id, ret);
1115
1116 ret = ath10k_vdev_setup_sync(ar);
1117 if (ret)
1118 ath10k_warn(ar, fmt: "failed to synchronize monitor vdev %i stop: %d\n",
1119 ar->monitor_vdev_id, ret);
1120
1121 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1122 ar->monitor_vdev_id);
1123 return ret;
1124}
1125
1126static int ath10k_monitor_vdev_create(struct ath10k *ar)
1127{
1128 int bit, ret = 0;
1129
1130 lockdep_assert_held(&ar->conf_mutex);
1131
1132 if (ar->free_vdev_map == 0) {
1133 ath10k_warn(ar, fmt: "failed to find free vdev id for monitor vdev\n");
1134 return -ENOMEM;
1135 }
1136
1137 bit = __ffs64(word: ar->free_vdev_map);
1138
1139 ar->monitor_vdev_id = bit;
1140
1141 ret = ath10k_wmi_vdev_create(ar, vdev_id: ar->monitor_vdev_id,
1142 type: WMI_VDEV_TYPE_MONITOR,
1143 subtype: 0, macaddr: ar->mac_addr);
1144 if (ret) {
1145 ath10k_warn(ar, fmt: "failed to request monitor vdev %i creation: %d\n",
1146 ar->monitor_vdev_id, ret);
1147 return ret;
1148 }
1149
1150 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1151 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1152 ar->monitor_vdev_id);
1153
1154 return 0;
1155}
1156
1157static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1158{
1159 int ret = 0;
1160
1161 lockdep_assert_held(&ar->conf_mutex);
1162
1163 ret = ath10k_wmi_vdev_delete(ar, vdev_id: ar->monitor_vdev_id);
1164 if (ret) {
1165 ath10k_warn(ar, fmt: "failed to request wmi monitor vdev %i removal: %d\n",
1166 ar->monitor_vdev_id, ret);
1167 return ret;
1168 }
1169
1170 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1171
1172 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1173 ar->monitor_vdev_id);
1174 return ret;
1175}
1176
1177static int ath10k_monitor_start(struct ath10k *ar)
1178{
1179 int ret;
1180
1181 lockdep_assert_held(&ar->conf_mutex);
1182
1183 ret = ath10k_monitor_vdev_create(ar);
1184 if (ret) {
1185 ath10k_warn(ar, fmt: "failed to create monitor vdev: %d\n", ret);
1186 return ret;
1187 }
1188
1189 ret = ath10k_monitor_vdev_start(ar, vdev_id: ar->monitor_vdev_id);
1190 if (ret) {
1191 ath10k_warn(ar, fmt: "failed to start monitor vdev: %d\n", ret);
1192 ath10k_monitor_vdev_delete(ar);
1193 return ret;
1194 }
1195
1196 ar->monitor_started = true;
1197 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1198
1199 return 0;
1200}
1201
1202static int ath10k_monitor_stop(struct ath10k *ar)
1203{
1204 int ret;
1205
1206 lockdep_assert_held(&ar->conf_mutex);
1207
1208 ret = ath10k_monitor_vdev_stop(ar);
1209 if (ret) {
1210 ath10k_warn(ar, fmt: "failed to stop monitor vdev: %d\n", ret);
1211 return ret;
1212 }
1213
1214 ret = ath10k_monitor_vdev_delete(ar);
1215 if (ret) {
1216 ath10k_warn(ar, fmt: "failed to delete monitor vdev: %d\n", ret);
1217 return ret;
1218 }
1219
1220 ar->monitor_started = false;
1221 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1222
1223 return 0;
1224}
1225
1226static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1227{
1228 int num_ctx;
1229
1230 /* At least one chanctx is required to derive a channel to start
1231 * monitor vdev on.
1232 */
1233 num_ctx = ath10k_mac_num_chanctxs(ar);
1234 if (num_ctx == 0)
1235 return false;
1236
1237 /* If there's already an existing special monitor interface then don't
1238 * bother creating another monitor vdev.
1239 */
1240 if (ar->monitor_arvif)
1241 return false;
1242
1243 return ar->monitor ||
1244 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1245 ar->running_fw->fw_file.fw_features) &&
1246 (ar->filter_flags & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) ||
1247 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1248}
1249
1250static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1251{
1252 int num_ctx;
1253
1254 num_ctx = ath10k_mac_num_chanctxs(ar);
1255
1256 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1257 * shouldn't allow this but make sure to prevent handling the following
1258 * case anyway since multi-channel DFS hasn't been tested at all.
1259 */
1260 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1261 return false;
1262
1263 return true;
1264}
1265
1266static int ath10k_monitor_recalc(struct ath10k *ar)
1267{
1268 bool needed;
1269 bool allowed;
1270 int ret;
1271
1272 lockdep_assert_held(&ar->conf_mutex);
1273
1274 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1275 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1276
1277 ath10k_dbg(ar, ATH10K_DBG_MAC,
1278 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1279 ar->monitor_started, needed, allowed);
1280
1281 if (WARN_ON(needed && !allowed)) {
1282 if (ar->monitor_started) {
1283 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1284
1285 ret = ath10k_monitor_stop(ar);
1286 if (ret)
1287 ath10k_warn(ar, fmt: "failed to stop disallowed monitor: %d\n",
1288 ret);
1289 /* not serious */
1290 }
1291
1292 return -EPERM;
1293 }
1294
1295 if (needed == ar->monitor_started)
1296 return 0;
1297
1298 if (needed)
1299 return ath10k_monitor_start(ar);
1300 else
1301 return ath10k_monitor_stop(ar);
1302}
1303
1304static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
1305{
1306 struct ath10k *ar = arvif->ar;
1307
1308 lockdep_assert_held(&ar->conf_mutex);
1309
1310 if (!arvif->is_started) {
1311 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
1312 return false;
1313 }
1314
1315 return true;
1316}
1317
1318static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
1319{
1320 struct ath10k *ar = arvif->ar;
1321 u32 vdev_param;
1322
1323 lockdep_assert_held(&ar->conf_mutex);
1324
1325 vdev_param = ar->wmi.vdev_param->protection_mode;
1326
1327 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
1328 arvif->vdev_id, arvif->use_cts_prot);
1329
1330 return ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
1331 param_value: arvif->use_cts_prot ? 1 : 0);
1332}
1333
1334static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1335{
1336 struct ath10k *ar = arvif->ar;
1337 u32 vdev_param, rts_cts = 0;
1338
1339 lockdep_assert_held(&ar->conf_mutex);
1340
1341 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1342
1343 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1344
1345 if (arvif->num_legacy_stations > 0)
1346 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1347 WMI_RTSCTS_PROFILE);
1348 else
1349 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1350 WMI_RTSCTS_PROFILE);
1351
1352 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
1353 arvif->vdev_id, rts_cts);
1354
1355 return ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
1356 param_value: rts_cts);
1357}
1358
1359static int ath10k_start_cac(struct ath10k *ar)
1360{
1361 int ret;
1362
1363 lockdep_assert_held(&ar->conf_mutex);
1364
1365 set_bit(nr: ATH10K_CAC_RUNNING, addr: &ar->dev_flags);
1366
1367 ret = ath10k_monitor_recalc(ar);
1368 if (ret) {
1369 ath10k_warn(ar, fmt: "failed to start monitor (cac): %d\n", ret);
1370 clear_bit(nr: ATH10K_CAC_RUNNING, addr: &ar->dev_flags);
1371 return ret;
1372 }
1373
1374 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1375 ar->monitor_vdev_id);
1376
1377 return 0;
1378}
1379
1380static int ath10k_stop_cac(struct ath10k *ar)
1381{
1382 lockdep_assert_held(&ar->conf_mutex);
1383
1384 /* CAC is not running - do nothing */
1385 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1386 return 0;
1387
1388 clear_bit(nr: ATH10K_CAC_RUNNING, addr: &ar->dev_flags);
1389 ath10k_monitor_stop(ar);
1390
1391 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1392
1393 return 0;
1394}
1395
1396static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1397 struct ieee80211_chanctx_conf *conf,
1398 void *data)
1399{
1400 bool *ret = data;
1401
1402 if (!*ret && conf->radar_enabled)
1403 *ret = true;
1404}
1405
1406static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1407{
1408 bool has_radar = false;
1409
1410 ieee80211_iter_chan_contexts_atomic(hw: ar->hw,
1411 iter: ath10k_mac_has_radar_iter,
1412 iter_data: &has_radar);
1413
1414 return has_radar;
1415}
1416
1417static void ath10k_recalc_radar_detection(struct ath10k *ar)
1418{
1419 int ret;
1420
1421 lockdep_assert_held(&ar->conf_mutex);
1422
1423 ath10k_stop_cac(ar);
1424
1425 if (!ath10k_mac_has_radar_enabled(ar))
1426 return;
1427
1428 if (ar->num_started_vdevs > 0)
1429 return;
1430
1431 ret = ath10k_start_cac(ar);
1432 if (ret) {
1433 /*
1434 * Not possible to start CAC on current channel so starting
1435 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1436 * by indicating that radar was detected.
1437 */
1438 ath10k_warn(ar, fmt: "failed to start CAC: %d\n", ret);
1439 ieee80211_radar_detected(hw: ar->hw);
1440 }
1441}
1442
1443static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1444{
1445 struct ath10k *ar = arvif->ar;
1446 int ret;
1447
1448 lockdep_assert_held(&ar->conf_mutex);
1449
1450 reinit_completion(x: &ar->vdev_setup_done);
1451 reinit_completion(x: &ar->vdev_delete_done);
1452
1453 ret = ath10k_wmi_vdev_stop(ar, vdev_id: arvif->vdev_id);
1454 if (ret) {
1455 ath10k_warn(ar, fmt: "failed to stop WMI vdev %i: %d\n",
1456 arvif->vdev_id, ret);
1457 return ret;
1458 }
1459
1460 ret = ath10k_vdev_setup_sync(ar);
1461 if (ret) {
1462 ath10k_warn(ar, fmt: "failed to synchronize setup for vdev %i: %d\n",
1463 arvif->vdev_id, ret);
1464 return ret;
1465 }
1466
1467 WARN_ON(ar->num_started_vdevs == 0);
1468
1469 if (ar->num_started_vdevs != 0) {
1470 ar->num_started_vdevs--;
1471 ath10k_recalc_radar_detection(ar);
1472 }
1473
1474 return ret;
1475}
1476
1477static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1478 const struct cfg80211_chan_def *chandef,
1479 bool restart)
1480{
1481 struct ath10k *ar = arvif->ar;
1482 struct wmi_vdev_start_request_arg arg = {};
1483 int ret = 0;
1484
1485 lockdep_assert_held(&ar->conf_mutex);
1486
1487 reinit_completion(x: &ar->vdev_setup_done);
1488 reinit_completion(x: &ar->vdev_delete_done);
1489
1490 arg.vdev_id = arvif->vdev_id;
1491 arg.dtim_period = arvif->dtim_period;
1492 arg.bcn_intval = arvif->beacon_interval;
1493
1494 arg.channel.freq = chandef->chan->center_freq;
1495 arg.channel.band_center_freq1 = chandef->center_freq1;
1496 arg.channel.band_center_freq2 = chandef->center_freq2;
1497 arg.channel.mode = chan_to_phymode(chandef);
1498
1499 arg.channel.min_power = 0;
1500 arg.channel.max_power = chandef->chan->max_power * 2;
1501 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1502 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
1503
1504 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1505 arg.ssid = arvif->u.ap.ssid;
1506 arg.ssid_len = arvif->u.ap.ssid_len;
1507 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1508
1509 /* For now allow DFS for AP mode */
1510 arg.channel.chan_radar =
1511 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1512 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1513 arg.ssid = arvif->vif->cfg.ssid;
1514 arg.ssid_len = arvif->vif->cfg.ssid_len;
1515 }
1516
1517 ath10k_dbg(ar, ATH10K_DBG_MAC,
1518 "mac vdev %d start center_freq %d phymode %s\n",
1519 arg.vdev_id, arg.channel.freq,
1520 ath10k_wmi_phymode_str(arg.channel.mode));
1521
1522 if (restart)
1523 ret = ath10k_wmi_vdev_restart(ar, arg: &arg);
1524 else
1525 ret = ath10k_wmi_vdev_start(ar, arg: &arg);
1526
1527 if (ret) {
1528 ath10k_warn(ar, fmt: "failed to start WMI vdev %i: %d\n",
1529 arg.vdev_id, ret);
1530 return ret;
1531 }
1532
1533 ret = ath10k_vdev_setup_sync(ar);
1534 if (ret) {
1535 ath10k_warn(ar,
1536 fmt: "failed to synchronize setup for vdev %i restart %d: %d\n",
1537 arg.vdev_id, restart, ret);
1538 return ret;
1539 }
1540
1541 ar->num_started_vdevs++;
1542 ath10k_recalc_radar_detection(ar);
1543
1544 return ret;
1545}
1546
1547static int ath10k_vdev_start(struct ath10k_vif *arvif,
1548 const struct cfg80211_chan_def *def)
1549{
1550 return ath10k_vdev_start_restart(arvif, chandef: def, restart: false);
1551}
1552
1553static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1554 const struct cfg80211_chan_def *def)
1555{
1556 return ath10k_vdev_start_restart(arvif, chandef: def, restart: true);
1557}
1558
1559static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1560 struct sk_buff *bcn)
1561{
1562 struct ath10k *ar = arvif->ar;
1563 struct ieee80211_mgmt *mgmt;
1564 const u8 *p2p_ie;
1565 int ret;
1566
1567 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1568 return 0;
1569
1570 mgmt = (void *)bcn->data;
1571 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1572 ies: mgmt->u.beacon.variable,
1573 len: bcn->len - (mgmt->u.beacon.variable -
1574 bcn->data));
1575 if (!p2p_ie)
1576 return -ENOENT;
1577
1578 ret = ath10k_wmi_p2p_go_bcn_ie(ar, vdev_id: arvif->vdev_id, p2p_ie);
1579 if (ret) {
1580 ath10k_warn(ar, fmt: "failed to submit p2p go bcn ie for vdev %i: %d\n",
1581 arvif->vdev_id, ret);
1582 return ret;
1583 }
1584
1585 return 0;
1586}
1587
1588static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1589 u8 oui_type, size_t ie_offset)
1590{
1591 size_t len;
1592 const u8 *next;
1593 const u8 *end;
1594 u8 *ie;
1595
1596 if (WARN_ON(skb->len < ie_offset))
1597 return -EINVAL;
1598
1599 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1600 ies: skb->data + ie_offset,
1601 len: skb->len - ie_offset);
1602 if (!ie)
1603 return -ENOENT;
1604
1605 len = ie[1] + 2;
1606 end = skb->data + skb->len;
1607 next = ie + len;
1608
1609 if (WARN_ON(next > end))
1610 return -EINVAL;
1611
1612 memmove(ie, next, end - next);
1613 skb_trim(skb, len: skb->len - len);
1614
1615 return 0;
1616}
1617
1618static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1619{
1620 struct ath10k *ar = arvif->ar;
1621 struct ieee80211_hw *hw = ar->hw;
1622 struct ieee80211_vif *vif = arvif->vif;
1623 struct ieee80211_mutable_offsets offs = {};
1624 struct sk_buff *bcn;
1625 int ret;
1626
1627 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1628 return 0;
1629
1630 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1631 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1632 return 0;
1633
1634 bcn = ieee80211_beacon_get_template(hw, vif, offs: &offs, link_id: 0);
1635 if (!bcn) {
1636 ath10k_warn(ar, fmt: "failed to get beacon template from mac80211\n");
1637 return -EPERM;
1638 }
1639
1640 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1641 if (ret) {
1642 ath10k_warn(ar, fmt: "failed to setup p2p go bcn ie: %d\n", ret);
1643 kfree_skb(skb: bcn);
1644 return ret;
1645 }
1646
1647 /* P2P IE is inserted by firmware automatically (as configured above)
1648 * so remove it from the base beacon template to avoid duplicate P2P
1649 * IEs in beacon frames.
1650 */
1651 ath10k_mac_remove_vendor_ie(skb: bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1652 offsetof(struct ieee80211_mgmt,
1653 u.beacon.variable));
1654
1655 ret = ath10k_wmi_bcn_tmpl(ar, vdev_id: arvif->vdev_id, tim_ie_offset: offs.tim_offset, bcn, prb_caps: 0,
1656 prb_erp: 0, NULL, prb_ies_len: 0);
1657 kfree_skb(skb: bcn);
1658
1659 if (ret) {
1660 ath10k_warn(ar, fmt: "failed to submit beacon template command: %d\n",
1661 ret);
1662 return ret;
1663 }
1664
1665 return 0;
1666}
1667
1668static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1669{
1670 struct ath10k *ar = arvif->ar;
1671 struct ieee80211_hw *hw = ar->hw;
1672 struct ieee80211_vif *vif = arvif->vif;
1673 struct sk_buff *prb;
1674 int ret;
1675
1676 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1677 return 0;
1678
1679 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1680 return 0;
1681
1682 /* For mesh, probe response and beacon share the same template */
1683 if (ieee80211_vif_is_mesh(vif))
1684 return 0;
1685
1686 prb = ieee80211_proberesp_get(hw, vif);
1687 if (!prb) {
1688 ath10k_warn(ar, fmt: "failed to get probe resp template from mac80211\n");
1689 return -EPERM;
1690 }
1691
1692 ret = ath10k_wmi_prb_tmpl(ar, vdev_id: arvif->vdev_id, prb);
1693 kfree_skb(skb: prb);
1694
1695 if (ret) {
1696 ath10k_warn(ar, fmt: "failed to submit probe resp template command: %d\n",
1697 ret);
1698 return ret;
1699 }
1700
1701 return 0;
1702}
1703
1704static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1705{
1706 struct ath10k *ar = arvif->ar;
1707 struct cfg80211_chan_def def;
1708 int ret;
1709
1710 /* When originally vdev is started during assign_vif_chanctx() some
1711 * information is missing, notably SSID. Firmware revisions with beacon
1712 * offloading require the SSID to be provided during vdev (re)start to
1713 * handle hidden SSID properly.
1714 *
1715 * Vdev restart must be done after vdev has been both started and
1716 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1717 * deliver vdev restart response event causing timeouts during vdev
1718 * syncing in ath10k.
1719 *
1720 * Note: The vdev down/up and template reinstallation could be skipped
1721 * since only wmi-tlv firmware are known to have beacon offload and
1722 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1723 * response delivery. It's probably more robust to keep it as is.
1724 */
1725 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1726 return 0;
1727
1728 if (WARN_ON(!arvif->is_started))
1729 return -EINVAL;
1730
1731 if (WARN_ON(!arvif->is_up))
1732 return -EINVAL;
1733
1734 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1735 return -EINVAL;
1736
1737 ret = ath10k_wmi_vdev_down(ar, vdev_id: arvif->vdev_id);
1738 if (ret) {
1739 ath10k_warn(ar, fmt: "failed to bring down ap vdev %i: %d\n",
1740 arvif->vdev_id, ret);
1741 return ret;
1742 }
1743
1744 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1745 * firmware will crash upon vdev up.
1746 */
1747
1748 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1749 if (ret) {
1750 ath10k_warn(ar, fmt: "failed to update beacon template: %d\n", ret);
1751 return ret;
1752 }
1753
1754 ret = ath10k_mac_setup_prb_tmpl(arvif);
1755 if (ret) {
1756 ath10k_warn(ar, fmt: "failed to update presp template: %d\n", ret);
1757 return ret;
1758 }
1759
1760 ret = ath10k_vdev_restart(arvif, def: &def);
1761 if (ret) {
1762 ath10k_warn(ar, fmt: "failed to restart ap vdev %i: %d\n",
1763 arvif->vdev_id, ret);
1764 return ret;
1765 }
1766
1767 ret = ath10k_wmi_vdev_up(ar: arvif->ar, vdev_id: arvif->vdev_id, aid: arvif->aid,
1768 bssid: arvif->bssid);
1769 if (ret) {
1770 ath10k_warn(ar, fmt: "failed to bring up ap vdev %i: %d\n",
1771 arvif->vdev_id, ret);
1772 return ret;
1773 }
1774
1775 return 0;
1776}
1777
1778static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1779 struct ieee80211_bss_conf *info)
1780{
1781 struct ath10k *ar = arvif->ar;
1782 int ret = 0;
1783
1784 lockdep_assert_held(&arvif->ar->conf_mutex);
1785
1786 if (!info->enable_beacon) {
1787 ret = ath10k_wmi_vdev_down(ar, vdev_id: arvif->vdev_id);
1788 if (ret)
1789 ath10k_warn(ar, fmt: "failed to down vdev_id %i: %d\n",
1790 arvif->vdev_id, ret);
1791
1792 arvif->is_up = false;
1793
1794 spin_lock_bh(lock: &arvif->ar->data_lock);
1795 ath10k_mac_vif_beacon_free(arvif);
1796 spin_unlock_bh(lock: &arvif->ar->data_lock);
1797
1798 return;
1799 }
1800
1801 arvif->tx_seq_no = 0x1000;
1802
1803 arvif->aid = 0;
1804 ether_addr_copy(dst: arvif->bssid, src: info->bssid);
1805
1806 ret = ath10k_wmi_vdev_up(ar: arvif->ar, vdev_id: arvif->vdev_id, aid: arvif->aid,
1807 bssid: arvif->bssid);
1808 if (ret) {
1809 ath10k_warn(ar, fmt: "failed to bring up vdev %d: %i\n",
1810 arvif->vdev_id, ret);
1811 return;
1812 }
1813
1814 arvif->is_up = true;
1815
1816 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1817 if (ret) {
1818 ath10k_warn(ar, fmt: "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1819 arvif->vdev_id, ret);
1820 return;
1821 }
1822
1823 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1824}
1825
1826static void ath10k_control_ibss(struct ath10k_vif *arvif,
1827 struct ieee80211_vif *vif)
1828{
1829 struct ath10k *ar = arvif->ar;
1830 u32 vdev_param;
1831 int ret = 0;
1832
1833 lockdep_assert_held(&arvif->ar->conf_mutex);
1834
1835 if (!vif->cfg.ibss_joined) {
1836 if (is_zero_ether_addr(addr: arvif->bssid))
1837 return;
1838
1839 eth_zero_addr(addr: arvif->bssid);
1840
1841 return;
1842 }
1843
1844 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1845 ret = ath10k_wmi_vdev_set_param(ar: arvif->ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
1846 ATH10K_DEFAULT_ATIM);
1847 if (ret)
1848 ath10k_warn(ar, fmt: "failed to set IBSS ATIM for vdev %d: %d\n",
1849 arvif->vdev_id, ret);
1850}
1851
1852static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1853{
1854 struct ath10k *ar = arvif->ar;
1855 u32 param;
1856 u32 value;
1857 int ret;
1858
1859 lockdep_assert_held(&arvif->ar->conf_mutex);
1860
1861 if (arvif->u.sta.uapsd)
1862 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1863 else
1864 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1865
1866 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1867 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id, param_id: param, value);
1868 if (ret) {
1869 ath10k_warn(ar, fmt: "failed to submit ps wake threshold %u on vdev %i: %d\n",
1870 value, arvif->vdev_id, ret);
1871 return ret;
1872 }
1873
1874 return 0;
1875}
1876
1877static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1878{
1879 struct ath10k *ar = arvif->ar;
1880 u32 param;
1881 u32 value;
1882 int ret;
1883
1884 lockdep_assert_held(&arvif->ar->conf_mutex);
1885
1886 if (arvif->u.sta.uapsd)
1887 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1888 else
1889 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1890
1891 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1892 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id,
1893 param_id: param, value);
1894 if (ret) {
1895 ath10k_warn(ar, fmt: "failed to submit ps poll count %u on vdev %i: %d\n",
1896 value, arvif->vdev_id, ret);
1897 return ret;
1898 }
1899
1900 return 0;
1901}
1902
1903static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1904{
1905 struct ath10k_vif *arvif;
1906 int num = 0;
1907
1908 lockdep_assert_held(&ar->conf_mutex);
1909
1910 list_for_each_entry(arvif, &ar->arvifs, list)
1911 if (arvif->is_started)
1912 num++;
1913
1914 return num;
1915}
1916
1917static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1918{
1919 struct ath10k *ar = arvif->ar;
1920 struct ieee80211_vif *vif = arvif->vif;
1921 struct ieee80211_conf *conf = &ar->hw->conf;
1922 enum wmi_sta_powersave_param param;
1923 enum wmi_sta_ps_mode psmode;
1924 int ret;
1925 int ps_timeout;
1926 bool enable_ps;
1927
1928 lockdep_assert_held(&arvif->ar->conf_mutex);
1929
1930 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1931 return 0;
1932
1933 enable_ps = arvif->ps;
1934
1935 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1936 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1937 ar->running_fw->fw_file.fw_features)) {
1938 ath10k_warn(ar, fmt: "refusing to enable ps on vdev %i: not supported by fw\n",
1939 arvif->vdev_id);
1940 enable_ps = false;
1941 }
1942
1943 if (!arvif->is_started) {
1944 /* mac80211 can update vif powersave state while disconnected.
1945 * Firmware doesn't behave nicely and consumes more power than
1946 * necessary if PS is disabled on a non-started vdev. Hence
1947 * force-enable PS for non-running vdevs.
1948 */
1949 psmode = WMI_STA_PS_MODE_ENABLED;
1950 } else if (enable_ps) {
1951 psmode = WMI_STA_PS_MODE_ENABLED;
1952 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1953
1954 ps_timeout = conf->dynamic_ps_timeout;
1955 if (ps_timeout == 0) {
1956 /* Firmware doesn't like 0 */
1957 ps_timeout = ieee80211_tu_to_usec(
1958 tu: vif->bss_conf.beacon_int) / 1000;
1959 }
1960
1961 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id, param_id: param,
1962 value: ps_timeout);
1963 if (ret) {
1964 ath10k_warn(ar, fmt: "failed to set inactivity time for vdev %d: %i\n",
1965 arvif->vdev_id, ret);
1966 return ret;
1967 }
1968 } else {
1969 psmode = WMI_STA_PS_MODE_DISABLED;
1970 }
1971
1972 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1973 arvif->vdev_id, psmode ? "enable" : "disable");
1974
1975 ret = ath10k_wmi_set_psmode(ar, vdev_id: arvif->vdev_id, psmode);
1976 if (ret) {
1977 ath10k_warn(ar, fmt: "failed to set PS Mode %d for vdev %d: %d\n",
1978 psmode, arvif->vdev_id, ret);
1979 return ret;
1980 }
1981
1982 return 0;
1983}
1984
1985static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1986{
1987 struct ath10k *ar = arvif->ar;
1988 struct wmi_sta_keepalive_arg arg = {};
1989 int ret;
1990
1991 lockdep_assert_held(&arvif->ar->conf_mutex);
1992
1993 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1994 return 0;
1995
1996 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1997 return 0;
1998
1999 /* Some firmware revisions have a bug and ignore the `enabled` field.
2000 * Instead use the interval to disable the keepalive.
2001 */
2002 arg.vdev_id = arvif->vdev_id;
2003 arg.enabled = 1;
2004 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
2005 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
2006
2007 ret = ath10k_wmi_sta_keepalive(ar, arg: &arg);
2008 if (ret) {
2009 ath10k_warn(ar, fmt: "failed to submit keepalive on vdev %i: %d\n",
2010 arvif->vdev_id, ret);
2011 return ret;
2012 }
2013
2014 return 0;
2015}
2016
2017static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
2018{
2019 struct ath10k *ar = arvif->ar;
2020 struct ieee80211_vif *vif = arvif->vif;
2021 int ret;
2022
2023 lockdep_assert_held(&arvif->ar->conf_mutex);
2024
2025 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
2026 return;
2027
2028 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
2029 return;
2030
2031 if (!vif->bss_conf.csa_active)
2032 return;
2033
2034 if (!arvif->is_up)
2035 return;
2036
2037 if (!ieee80211_beacon_cntdwn_is_complete(vif, link_id: 0)) {
2038 ieee80211_beacon_update_cntdwn(vif, link_id: 0);
2039
2040 ret = ath10k_mac_setup_bcn_tmpl(arvif);
2041 if (ret)
2042 ath10k_warn(ar, fmt: "failed to update bcn tmpl during csa: %d\n",
2043 ret);
2044
2045 ret = ath10k_mac_setup_prb_tmpl(arvif);
2046 if (ret)
2047 ath10k_warn(ar, fmt: "failed to update prb tmpl during csa: %d\n",
2048 ret);
2049 } else {
2050 ieee80211_csa_finish(vif, link_id: 0);
2051 }
2052}
2053
2054static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
2055{
2056 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2057 ap_csa_work);
2058 struct ath10k *ar = arvif->ar;
2059
2060 mutex_lock(&ar->conf_mutex);
2061 ath10k_mac_vif_ap_csa_count_down(arvif);
2062 mutex_unlock(lock: &ar->conf_mutex);
2063}
2064
2065static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
2066 struct ieee80211_vif *vif)
2067{
2068 struct sk_buff *skb = data;
2069 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2070 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2071
2072 if (vif->type != NL80211_IFTYPE_STATION)
2073 return;
2074
2075 if (!ether_addr_equal(addr1: mgmt->bssid, addr2: vif->bss_conf.bssid))
2076 return;
2077
2078 cancel_delayed_work(dwork: &arvif->connection_loss_work);
2079}
2080
2081void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
2082{
2083 ieee80211_iterate_active_interfaces_atomic(hw: ar->hw,
2084 ATH10K_ITER_NORMAL_FLAGS,
2085 iterator: ath10k_mac_handle_beacon_iter,
2086 data: skb);
2087}
2088
2089static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
2090 struct ieee80211_vif *vif)
2091{
2092 u32 *vdev_id = data;
2093 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2094 struct ath10k *ar = arvif->ar;
2095 struct ieee80211_hw *hw = ar->hw;
2096
2097 if (arvif->vdev_id != *vdev_id)
2098 return;
2099
2100 if (!arvif->is_up)
2101 return;
2102
2103 ieee80211_beacon_loss(vif);
2104
2105 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
2106 * (done by mac80211) succeeds but beacons do not resume then it
2107 * doesn't make sense to continue operation. Queue connection loss work
2108 * which can be cancelled when beacon is received.
2109 */
2110 ieee80211_queue_delayed_work(hw, dwork: &arvif->connection_loss_work,
2111 ATH10K_CONNECTION_LOSS_HZ);
2112}
2113
2114void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2115{
2116 ieee80211_iterate_active_interfaces_atomic(hw: ar->hw,
2117 ATH10K_ITER_NORMAL_FLAGS,
2118 iterator: ath10k_mac_handle_beacon_miss_iter,
2119 data: &vdev_id);
2120}
2121
2122static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2123{
2124 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2125 connection_loss_work.work);
2126 struct ieee80211_vif *vif = arvif->vif;
2127
2128 if (!arvif->is_up)
2129 return;
2130
2131 ieee80211_connection_loss(vif);
2132}
2133
2134/**********************/
2135/* Station management */
2136/**********************/
2137
2138static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2139 struct ieee80211_vif *vif)
2140{
2141 /* Some firmware revisions have unstable STA powersave when listen
2142 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2143 * generate NullFunc frames properly even if buffered frames have been
2144 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2145 * buffered frames. Often pinging the device from AP would simply fail.
2146 *
2147 * As a workaround set it to 1.
2148 */
2149 if (vif->type == NL80211_IFTYPE_STATION)
2150 return 1;
2151
2152 return ar->hw->conf.listen_interval;
2153}
2154
2155static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2156 struct ieee80211_vif *vif,
2157 struct ieee80211_sta *sta,
2158 struct wmi_peer_assoc_complete_arg *arg)
2159{
2160 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2161 u32 aid;
2162
2163 lockdep_assert_held(&ar->conf_mutex);
2164
2165 if (vif->type == NL80211_IFTYPE_STATION)
2166 aid = vif->cfg.aid;
2167 else
2168 aid = sta->aid;
2169
2170 ether_addr_copy(dst: arg->addr, src: sta->addr);
2171 arg->vdev_id = arvif->vdev_id;
2172 arg->peer_aid = aid;
2173 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2174 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2175 arg->peer_num_spatial_streams = 1;
2176 arg->peer_caps = vif->bss_conf.assoc_capability;
2177}
2178
2179static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2180 struct ieee80211_vif *vif,
2181 struct ieee80211_sta *sta,
2182 struct wmi_peer_assoc_complete_arg *arg)
2183{
2184 struct ieee80211_bss_conf *info = &vif->bss_conf;
2185 struct cfg80211_chan_def def;
2186 struct cfg80211_bss *bss;
2187 const u8 *rsnie = NULL;
2188 const u8 *wpaie = NULL;
2189
2190 lockdep_assert_held(&ar->conf_mutex);
2191
2192 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2193 return;
2194
2195 bss = cfg80211_get_bss(wiphy: ar->hw->wiphy, channel: def.chan, bssid: info->bssid,
2196 ssid: vif->cfg.ssid_len ? vif->cfg.ssid : NULL,
2197 ssid_len: vif->cfg.ssid_len,
2198 bss_type: IEEE80211_BSS_TYPE_ANY, privacy: IEEE80211_PRIVACY_ANY);
2199 if (bss) {
2200 const struct cfg80211_bss_ies *ies;
2201
2202 rcu_read_lock();
2203 rsnie = ieee80211_bss_get_ie(bss, id: WLAN_EID_RSN);
2204
2205 ies = rcu_dereference(bss->ies);
2206
2207 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2208 WLAN_OUI_TYPE_MICROSOFT_WPA,
2209 ies: ies->data,
2210 len: ies->len);
2211 rcu_read_unlock();
2212 cfg80211_put_bss(wiphy: ar->hw->wiphy, bss);
2213 }
2214
2215 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2216 if (rsnie || wpaie) {
2217 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2218 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2219 }
2220
2221 if (wpaie) {
2222 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2223 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2224 }
2225
2226 if (sta->mfp &&
2227 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2228 ar->running_fw->fw_file.fw_features)) {
2229 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2230 }
2231}
2232
2233static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2234 struct ieee80211_vif *vif,
2235 struct ieee80211_sta *sta,
2236 struct wmi_peer_assoc_complete_arg *arg)
2237{
2238 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2239 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2240 struct cfg80211_chan_def def;
2241 const struct ieee80211_supported_band *sband;
2242 const struct ieee80211_rate *rates;
2243 enum nl80211_band band;
2244 u32 ratemask;
2245 u8 rate;
2246 int i;
2247
2248 lockdep_assert_held(&ar->conf_mutex);
2249
2250 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2251 return;
2252
2253 band = def.chan->band;
2254 sband = ar->hw->wiphy->bands[band];
2255 ratemask = sta->deflink.supp_rates[band];
2256 ratemask &= arvif->bitrate_mask.control[band].legacy;
2257 rates = sband->bitrates;
2258
2259 rateset->num_rates = 0;
2260
2261 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2262 if (!(ratemask & 1))
2263 continue;
2264
2265 rate = ath10k_mac_bitrate_to_rate(bitrate: rates->bitrate);
2266 rateset->rates[rateset->num_rates] = rate;
2267 rateset->num_rates++;
2268 }
2269}
2270
2271static bool
2272ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2273{
2274 int nss;
2275
2276 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2277 if (ht_mcs_mask[nss])
2278 return false;
2279
2280 return true;
2281}
2282
2283static bool
2284ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2285{
2286 int nss;
2287
2288 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2289 if (vht_mcs_mask[nss])
2290 return false;
2291
2292 return true;
2293}
2294
2295static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2296 struct ieee80211_vif *vif,
2297 struct ieee80211_sta *sta,
2298 struct wmi_peer_assoc_complete_arg *arg)
2299{
2300 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
2301 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2302 struct cfg80211_chan_def def;
2303 enum nl80211_band band;
2304 const u8 *ht_mcs_mask;
2305 const u16 *vht_mcs_mask;
2306 int i, n;
2307 u8 max_nss;
2308 u32 stbc;
2309
2310 lockdep_assert_held(&ar->conf_mutex);
2311
2312 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2313 return;
2314
2315 if (!ht_cap->ht_supported)
2316 return;
2317
2318 band = def.chan->band;
2319 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2320 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2321
2322 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2323 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2324 return;
2325
2326 arg->peer_flags |= ar->wmi.peer_flags->ht;
2327 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2328 ht_cap->ampdu_factor)) - 1;
2329
2330 arg->peer_mpdu_density =
2331 ath10k_parse_mpdudensity(mpdudensity: ht_cap->ampdu_density);
2332
2333 arg->peer_ht_caps = ht_cap->cap;
2334 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2335
2336 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2337 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2338
2339 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
2340 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2341 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2342 }
2343
2344 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2345 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2346 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2347
2348 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2349 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2350 }
2351
2352 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2353 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2354 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2355 }
2356
2357 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2358 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2359 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2360 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2361 arg->peer_rate_caps |= stbc;
2362 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2363 }
2364
2365 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2366 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2367 else if (ht_cap->mcs.rx_mask[1])
2368 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2369
2370 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2371 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2372 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2373 max_nss = (i / 8) + 1;
2374 arg->peer_ht_rates.rates[n++] = i;
2375 }
2376
2377 /*
2378 * This is a workaround for HT-enabled STAs which break the spec
2379 * and have no HT capabilities RX mask (no HT RX MCS map).
2380 *
2381 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2382 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2383 *
2384 * Firmware asserts if such situation occurs.
2385 */
2386 if (n == 0) {
2387 arg->peer_ht_rates.num_rates = 8;
2388 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2389 arg->peer_ht_rates.rates[i] = i;
2390 } else {
2391 arg->peer_ht_rates.num_rates = n;
2392 arg->peer_num_spatial_streams = min(sta->deflink.rx_nss,
2393 max_nss);
2394 }
2395
2396 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2397 arg->addr,
2398 arg->peer_ht_rates.num_rates,
2399 arg->peer_num_spatial_streams);
2400}
2401
2402static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2403 struct ath10k_vif *arvif,
2404 struct ieee80211_sta *sta)
2405{
2406 u32 uapsd = 0;
2407 u32 max_sp = 0;
2408 int ret = 0;
2409
2410 lockdep_assert_held(&ar->conf_mutex);
2411
2412 if (sta->wme && sta->uapsd_queues) {
2413 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2414 sta->uapsd_queues, sta->max_sp);
2415
2416 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2417 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2418 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2419 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2420 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2421 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2422 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2423 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2424 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2425 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2426 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2427 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2428
2429 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2430 max_sp = sta->max_sp;
2431
2432 ret = ath10k_wmi_set_ap_ps_param(ar, vdev_id: arvif->vdev_id,
2433 mac: sta->addr,
2434 param_id: WMI_AP_PS_PEER_PARAM_UAPSD,
2435 value: uapsd);
2436 if (ret) {
2437 ath10k_warn(ar, fmt: "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2438 arvif->vdev_id, ret);
2439 return ret;
2440 }
2441
2442 ret = ath10k_wmi_set_ap_ps_param(ar, vdev_id: arvif->vdev_id,
2443 mac: sta->addr,
2444 param_id: WMI_AP_PS_PEER_PARAM_MAX_SP,
2445 value: max_sp);
2446 if (ret) {
2447 ath10k_warn(ar, fmt: "failed to set ap ps peer param max sp for vdev %i: %d\n",
2448 arvif->vdev_id, ret);
2449 return ret;
2450 }
2451
2452 /* TODO setup this based on STA listen interval and
2453 * beacon interval. Currently we don't know
2454 * sta->listen_interval - mac80211 patch required.
2455 * Currently use 10 seconds
2456 */
2457 ret = ath10k_wmi_set_ap_ps_param(ar, vdev_id: arvif->vdev_id, mac: sta->addr,
2458 param_id: WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2459 value: 10);
2460 if (ret) {
2461 ath10k_warn(ar, fmt: "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2462 arvif->vdev_id, ret);
2463 return ret;
2464 }
2465 }
2466
2467 return 0;
2468}
2469
2470static u16
2471ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2472 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2473{
2474 int idx_limit;
2475 int nss;
2476 u16 mcs_map;
2477 u16 mcs;
2478
2479 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2480 mcs_map = ath10k_mac_get_max_vht_mcs_map(mcs_map: tx_mcs_set, nss) &
2481 vht_mcs_limit[nss];
2482
2483 if (mcs_map)
2484 idx_limit = fls(x: mcs_map) - 1;
2485 else
2486 idx_limit = -1;
2487
2488 switch (idx_limit) {
2489 case 0:
2490 case 1:
2491 case 2:
2492 case 3:
2493 case 4:
2494 case 5:
2495 case 6:
2496 default:
2497 /* see ath10k_mac_can_set_bitrate_mask() */
2498 WARN_ON(1);
2499 fallthrough;
2500 case -1:
2501 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2502 break;
2503 case 7:
2504 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2505 break;
2506 case 8:
2507 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2508 break;
2509 case 9:
2510 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2511 break;
2512 }
2513
2514 tx_mcs_set &= ~(0x3 << (nss * 2));
2515 tx_mcs_set |= mcs << (nss * 2);
2516 }
2517
2518 return tx_mcs_set;
2519}
2520
2521static u32 get_160mhz_nss_from_maxrate(int rate)
2522{
2523 u32 nss;
2524
2525 switch (rate) {
2526 case 780:
2527 nss = 1;
2528 break;
2529 case 1560:
2530 nss = 2;
2531 break;
2532 case 2106:
2533 nss = 3; /* not support MCS9 from spec*/
2534 break;
2535 case 3120:
2536 nss = 4;
2537 break;
2538 default:
2539 nss = 1;
2540 }
2541
2542 return nss;
2543}
2544
2545static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2546 struct ieee80211_vif *vif,
2547 struct ieee80211_sta *sta,
2548 struct wmi_peer_assoc_complete_arg *arg)
2549{
2550 const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
2551 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2552 struct ath10k_hw_params *hw = &ar->hw_params;
2553 struct cfg80211_chan_def def;
2554 enum nl80211_band band;
2555 const u16 *vht_mcs_mask;
2556 u8 ampdu_factor;
2557 u8 max_nss, vht_mcs;
2558 int i;
2559
2560 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2561 return;
2562
2563 if (!vht_cap->vht_supported)
2564 return;
2565
2566 band = def.chan->band;
2567 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2568
2569 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2570 return;
2571
2572 arg->peer_flags |= ar->wmi.peer_flags->vht;
2573
2574 if (def.chan->band == NL80211_BAND_2GHZ)
2575 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2576
2577 arg->peer_vht_caps = vht_cap->cap;
2578
2579 ampdu_factor = (vht_cap->cap &
2580 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2581 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2582
2583 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2584 * zero in VHT IE. Using it would result in degraded throughput.
2585 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2586 * it if VHT max_mpdu is smaller.
2587 */
2588 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2589 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2590 ampdu_factor)) - 1);
2591
2592 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
2593 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2594
2595 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
2596 arg->peer_flags |= ar->wmi.peer_flags->bw160;
2597
2598 /* Calculate peer NSS capability from VHT capabilities if STA
2599 * supports VHT.
2600 */
2601 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
2602 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
2603 (2 * i) & 3;
2604
2605 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
2606 vht_mcs_mask[i])
2607 max_nss = i + 1;
2608 }
2609 arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
2610 arg->peer_vht_rates.rx_max_rate =
2611 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2612 arg->peer_vht_rates.rx_mcs_set =
2613 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2614 arg->peer_vht_rates.tx_max_rate =
2615 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2616 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2617 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_limit: vht_mcs_mask);
2618
2619 /* Configure bandwidth-NSS mapping to FW
2620 * for the chip's tx chains setting on 160Mhz bw
2621 */
2622 if (arg->peer_phymode == MODE_11AC_VHT160 ||
2623 arg->peer_phymode == MODE_11AC_VHT80_80) {
2624 u32 rx_nss;
2625 u32 max_rate;
2626
2627 max_rate = arg->peer_vht_rates.rx_max_rate;
2628 rx_nss = get_160mhz_nss_from_maxrate(rate: max_rate);
2629
2630 if (rx_nss == 0)
2631 rx_nss = arg->peer_num_spatial_streams;
2632 else
2633 rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
2634
2635 max_rate = hw->vht160_mcs_tx_highest;
2636 rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
2637
2638 arg->peer_bw_rxnss_override =
2639 FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
2640 FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
2641
2642 if (arg->peer_phymode == MODE_11AC_VHT80_80) {
2643 arg->peer_bw_rxnss_override |=
2644 FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
2645 }
2646 }
2647 ath10k_dbg(ar, ATH10K_DBG_MAC,
2648 "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
2649 sta->addr, arg->peer_max_mpdu,
2650 arg->peer_flags, arg->peer_bw_rxnss_override);
2651}
2652
2653static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2654 struct ieee80211_vif *vif,
2655 struct ieee80211_sta *sta,
2656 struct wmi_peer_assoc_complete_arg *arg)
2657{
2658 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2659
2660 switch (arvif->vdev_type) {
2661 case WMI_VDEV_TYPE_AP:
2662 if (sta->wme)
2663 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2664
2665 if (sta->wme && sta->uapsd_queues) {
2666 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2667 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2668 }
2669 break;
2670 case WMI_VDEV_TYPE_STA:
2671 if (sta->wme)
2672 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2673 break;
2674 case WMI_VDEV_TYPE_IBSS:
2675 if (sta->wme)
2676 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2677 break;
2678 default:
2679 break;
2680 }
2681
2682 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2683 sta->addr, !!(arg->peer_flags &
2684 arvif->ar->wmi.peer_flags->qos));
2685}
2686
2687static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2688{
2689 return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
2690 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2691}
2692
2693static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
2694 struct ieee80211_sta *sta)
2695{
2696 struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
2697
2698 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
2699 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2700 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2701 return MODE_11AC_VHT160;
2702 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
2703 return MODE_11AC_VHT80_80;
2704 default:
2705 /* not sure if this is a valid case? */
2706 return MODE_11AC_VHT160;
2707 }
2708 }
2709
2710 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
2711 return MODE_11AC_VHT80;
2712
2713 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2714 return MODE_11AC_VHT40;
2715
2716 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
2717 return MODE_11AC_VHT20;
2718
2719 return MODE_UNKNOWN;
2720}
2721
2722static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2723 struct ieee80211_vif *vif,
2724 struct ieee80211_sta *sta,
2725 struct wmi_peer_assoc_complete_arg *arg)
2726{
2727 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2728 struct cfg80211_chan_def def;
2729 enum nl80211_band band;
2730 const u8 *ht_mcs_mask;
2731 const u16 *vht_mcs_mask;
2732 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2733
2734 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2735 return;
2736
2737 band = def.chan->band;
2738 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2739 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2740
2741 switch (band) {
2742 case NL80211_BAND_2GHZ:
2743 if (sta->deflink.vht_cap.vht_supported &&
2744 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2745 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2746 phymode = MODE_11AC_VHT40;
2747 else
2748 phymode = MODE_11AC_VHT20;
2749 } else if (sta->deflink.ht_cap.ht_supported &&
2750 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2751 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2752 phymode = MODE_11NG_HT40;
2753 else
2754 phymode = MODE_11NG_HT20;
2755 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2756 phymode = MODE_11G;
2757 } else {
2758 phymode = MODE_11B;
2759 }
2760
2761 break;
2762 case NL80211_BAND_5GHZ:
2763 /*
2764 * Check VHT first.
2765 */
2766 if (sta->deflink.vht_cap.vht_supported &&
2767 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2768 phymode = ath10k_mac_get_phymode_vht(ar, sta);
2769 } else if (sta->deflink.ht_cap.ht_supported &&
2770 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2771 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
2772 phymode = MODE_11NA_HT40;
2773 else
2774 phymode = MODE_11NA_HT20;
2775 } else {
2776 phymode = MODE_11A;
2777 }
2778
2779 break;
2780 default:
2781 break;
2782 }
2783
2784 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2785 sta->addr, ath10k_wmi_phymode_str(phymode));
2786
2787 arg->peer_phymode = phymode;
2788 WARN_ON(phymode == MODE_UNKNOWN);
2789}
2790
2791static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2792 struct ieee80211_vif *vif,
2793 struct ieee80211_sta *sta,
2794 struct wmi_peer_assoc_complete_arg *arg)
2795{
2796 lockdep_assert_held(&ar->conf_mutex);
2797
2798 memset(arg, 0, sizeof(*arg));
2799
2800 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2801 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2802 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2803 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2804 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2805 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2806 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2807
2808 return 0;
2809}
2810
2811static const u32 ath10k_smps_map[] = {
2812 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2813 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2814 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2815 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2816};
2817
2818static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2819 const u8 *addr,
2820 const struct ieee80211_sta_ht_cap *ht_cap)
2821{
2822 int smps;
2823
2824 if (!ht_cap->ht_supported)
2825 return 0;
2826
2827 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2828 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2829
2830 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2831 return -EINVAL;
2832
2833 return ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: addr,
2834 param_id: ar->wmi.peer_param->smps_state,
2835 param_value: ath10k_smps_map[smps]);
2836}
2837
2838static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2839 struct ieee80211_vif *vif,
2840 struct ieee80211_sta_vht_cap vht_cap)
2841{
2842 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2843 int ret;
2844 u32 param;
2845 u32 value;
2846
2847 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2848 return 0;
2849
2850 if (!(ar->vht_cap_info &
2851 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2852 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2853 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2854 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2855 return 0;
2856
2857 param = ar->wmi.vdev_param->txbf;
2858 value = 0;
2859
2860 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2861 return 0;
2862
2863 /* The following logic is correct. If a remote STA advertises support
2864 * for being a beamformer then we should enable us being a beamformee.
2865 */
2866
2867 if (ar->vht_cap_info &
2868 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2869 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2870 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2871 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2872
2873 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2874 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2875 }
2876
2877 if (ar->vht_cap_info &
2878 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2879 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2880 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2881 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2882
2883 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2884 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2885 }
2886
2887 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2888 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2889
2890 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2891 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2892
2893 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: param, param_value: value);
2894 if (ret) {
2895 ath10k_warn(ar, fmt: "failed to submit vdev param txbf 0x%x: %d\n",
2896 value, ret);
2897 return ret;
2898 }
2899
2900 return 0;
2901}
2902
2903static bool ath10k_mac_is_connected(struct ath10k *ar)
2904{
2905 struct ath10k_vif *arvif;
2906
2907 list_for_each_entry(arvif, &ar->arvifs, list) {
2908 if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
2909 return true;
2910 }
2911
2912 return false;
2913}
2914
2915static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
2916{
2917 int ret;
2918 u32 param;
2919 int tx_power_2g, tx_power_5g;
2920 bool connected;
2921
2922 lockdep_assert_held(&ar->conf_mutex);
2923
2924 /* ath10k internally uses unit of 0.5 dBm so multiply by 2 */
2925 tx_power_2g = txpower * 2;
2926 tx_power_5g = txpower * 2;
2927
2928 connected = ath10k_mac_is_connected(ar);
2929
2930 if (connected && ar->tx_power_2g_limit)
2931 if (tx_power_2g > ar->tx_power_2g_limit)
2932 tx_power_2g = ar->tx_power_2g_limit;
2933
2934 if (connected && ar->tx_power_5g_limit)
2935 if (tx_power_5g > ar->tx_power_5g_limit)
2936 tx_power_5g = ar->tx_power_5g_limit;
2937
2938 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n",
2939 tx_power_2g, tx_power_5g);
2940
2941 param = ar->wmi.pdev_param->txpower_limit2g;
2942 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: tx_power_2g);
2943 if (ret) {
2944 ath10k_warn(ar, fmt: "failed to set 2g txpower %d: %d\n",
2945 tx_power_2g, ret);
2946 return ret;
2947 }
2948
2949 param = ar->wmi.pdev_param->txpower_limit5g;
2950 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: tx_power_5g);
2951 if (ret) {
2952 ath10k_warn(ar, fmt: "failed to set 5g txpower %d: %d\n",
2953 tx_power_5g, ret);
2954 return ret;
2955 }
2956
2957 return 0;
2958}
2959
2960static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2961{
2962 struct ath10k_vif *arvif;
2963 int ret, txpower = -1;
2964
2965 lockdep_assert_held(&ar->conf_mutex);
2966
2967 list_for_each_entry(arvif, &ar->arvifs, list) {
2968 /* txpower not initialized yet? */
2969 if (arvif->txpower == INT_MIN)
2970 continue;
2971
2972 if (txpower == -1)
2973 txpower = arvif->txpower;
2974 else
2975 txpower = min(txpower, arvif->txpower);
2976 }
2977
2978 if (txpower == -1)
2979 return 0;
2980
2981 ret = ath10k_mac_txpower_setup(ar, txpower);
2982 if (ret) {
2983 ath10k_warn(ar, fmt: "failed to setup tx power %d: %d\n",
2984 txpower, ret);
2985 return ret;
2986 }
2987
2988 return 0;
2989}
2990
2991static int ath10k_mac_set_sar_power(struct ath10k *ar)
2992{
2993 if (!ar->hw_params.dynamic_sar_support)
2994 return -EOPNOTSUPP;
2995
2996 if (!ath10k_mac_is_connected(ar))
2997 return 0;
2998
2999 /* if connected, then arvif->txpower must be valid */
3000 return ath10k_mac_txpower_recalc(ar);
3001}
3002
3003static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw,
3004 const struct cfg80211_sar_specs *sar)
3005{
3006 const struct cfg80211_sar_sub_specs *sub_specs;
3007 struct ath10k *ar = hw->priv;
3008 u32 i;
3009 int ret;
3010
3011 mutex_lock(&ar->conf_mutex);
3012
3013 if (!ar->hw_params.dynamic_sar_support) {
3014 ret = -EOPNOTSUPP;
3015 goto err;
3016 }
3017
3018 if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
3019 sar->num_sub_specs == 0) {
3020 ret = -EINVAL;
3021 goto err;
3022 }
3023
3024 sub_specs = sar->sub_specs;
3025
3026 /* 0dbm is not a practical value for ath10k, so use 0
3027 * as no SAR limitation on it.
3028 */
3029 ar->tx_power_2g_limit = 0;
3030 ar->tx_power_5g_limit = 0;
3031
3032 /* note the power is in 0.25dbm unit, while ath10k uses
3033 * 0.5dbm unit.
3034 */
3035 for (i = 0; i < sar->num_sub_specs; i++) {
3036 if (sub_specs->freq_range_index == 0)
3037 ar->tx_power_2g_limit = sub_specs->power / 2;
3038 else if (sub_specs->freq_range_index == 1)
3039 ar->tx_power_5g_limit = sub_specs->power / 2;
3040
3041 sub_specs++;
3042 }
3043
3044 ret = ath10k_mac_set_sar_power(ar);
3045 if (ret) {
3046 ath10k_warn(ar, fmt: "failed to set sar power: %d", ret);
3047 goto err;
3048 }
3049
3050err:
3051 mutex_unlock(lock: &ar->conf_mutex);
3052 return ret;
3053}
3054
3055/* can be called only in mac80211 callbacks due to `key_count` usage */
3056static void ath10k_bss_assoc(struct ieee80211_hw *hw,
3057 struct ieee80211_vif *vif,
3058 struct ieee80211_bss_conf *bss_conf)
3059{
3060 struct ath10k *ar = hw->priv;
3061 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3062 struct ieee80211_sta_ht_cap ht_cap;
3063 struct ieee80211_sta_vht_cap vht_cap;
3064 struct wmi_peer_assoc_complete_arg peer_arg;
3065 struct ieee80211_sta *ap_sta;
3066 int ret;
3067
3068 lockdep_assert_held(&ar->conf_mutex);
3069
3070 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
3071 arvif->vdev_id, arvif->bssid, arvif->aid);
3072
3073 rcu_read_lock();
3074
3075 ap_sta = ieee80211_find_sta(vif, addr: bss_conf->bssid);
3076 if (!ap_sta) {
3077 ath10k_warn(ar, fmt: "failed to find station entry for bss %pM vdev %i\n",
3078 bss_conf->bssid, arvif->vdev_id);
3079 rcu_read_unlock();
3080 return;
3081 }
3082
3083 /* ap_sta must be accessed only within rcu section which must be left
3084 * before calling ath10k_setup_peer_smps() which might sleep.
3085 */
3086 ht_cap = ap_sta->deflink.ht_cap;
3087 vht_cap = ap_sta->deflink.vht_cap;
3088
3089 ret = ath10k_peer_assoc_prepare(ar, vif, sta: ap_sta, arg: &peer_arg);
3090 if (ret) {
3091 ath10k_warn(ar, fmt: "failed to prepare peer assoc for %pM vdev %i: %d\n",
3092 bss_conf->bssid, arvif->vdev_id, ret);
3093 rcu_read_unlock();
3094 return;
3095 }
3096
3097 rcu_read_unlock();
3098
3099 ret = ath10k_wmi_peer_assoc(ar, arg: &peer_arg);
3100 if (ret) {
3101 ath10k_warn(ar, fmt: "failed to run peer assoc for %pM vdev %i: %d\n",
3102 bss_conf->bssid, arvif->vdev_id, ret);
3103 return;
3104 }
3105
3106 ret = ath10k_setup_peer_smps(ar, arvif, addr: bss_conf->bssid, ht_cap: &ht_cap);
3107 if (ret) {
3108 ath10k_warn(ar, fmt: "failed to setup peer SMPS for vdev %i: %d\n",
3109 arvif->vdev_id, ret);
3110 return;
3111 }
3112
3113 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
3114 if (ret) {
3115 ath10k_warn(ar, fmt: "failed to recalc txbf for vdev %i on bss %pM: %d\n",
3116 arvif->vdev_id, bss_conf->bssid, ret);
3117 return;
3118 }
3119
3120 ath10k_dbg(ar, ATH10K_DBG_MAC,
3121 "mac vdev %d up (associated) bssid %pM aid %d\n",
3122 arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
3123
3124 WARN_ON(arvif->is_up);
3125
3126 arvif->aid = vif->cfg.aid;
3127 ether_addr_copy(dst: arvif->bssid, src: bss_conf->bssid);
3128
3129 ret = ath10k_wmi_pdev_set_param(ar,
3130 id: ar->wmi.pdev_param->peer_stats_info_enable, value: 1);
3131 if (ret)
3132 ath10k_warn(ar, fmt: "failed to enable peer stats info: %d\n", ret);
3133
3134 ret = ath10k_wmi_vdev_up(ar, vdev_id: arvif->vdev_id, aid: arvif->aid, bssid: arvif->bssid);
3135 if (ret) {
3136 ath10k_warn(ar, fmt: "failed to set vdev %d up: %d\n",
3137 arvif->vdev_id, ret);
3138 return;
3139 }
3140
3141 arvif->is_up = true;
3142
3143 ath10k_mac_set_sar_power(ar);
3144
3145 /* Workaround: Some firmware revisions (tested with qca6174
3146 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
3147 * poked with peer param command.
3148 */
3149 ret = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: arvif->bssid,
3150 param_id: ar->wmi.peer_param->dummy_var, param_value: 1);
3151 if (ret) {
3152 ath10k_warn(ar, fmt: "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
3153 arvif->bssid, arvif->vdev_id, ret);
3154 return;
3155 }
3156}
3157
3158static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
3159 struct ieee80211_vif *vif)
3160{
3161 struct ath10k *ar = hw->priv;
3162 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3163 struct ieee80211_sta_vht_cap vht_cap = {};
3164 int ret;
3165
3166 lockdep_assert_held(&ar->conf_mutex);
3167
3168 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
3169 arvif->vdev_id, arvif->bssid);
3170
3171 ret = ath10k_wmi_vdev_down(ar, vdev_id: arvif->vdev_id);
3172 if (ret)
3173 ath10k_warn(ar, fmt: "failed to down vdev %i: %d\n",
3174 arvif->vdev_id, ret);
3175
3176 arvif->def_wep_key_idx = -1;
3177
3178 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
3179 if (ret) {
3180 ath10k_warn(ar, fmt: "failed to recalc txbf for vdev %i: %d\n",
3181 arvif->vdev_id, ret);
3182 return;
3183 }
3184
3185 arvif->is_up = false;
3186
3187 ath10k_mac_txpower_recalc(ar);
3188
3189 cancel_delayed_work_sync(dwork: &arvif->connection_loss_work);
3190}
3191
3192static int ath10k_new_peer_tid_config(struct ath10k *ar,
3193 struct ieee80211_sta *sta,
3194 struct ath10k_vif *arvif)
3195{
3196 struct wmi_per_peer_per_tid_cfg_arg arg = {};
3197 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3198 bool config_apply;
3199 int ret, i;
3200
3201 for (i = 0; i < ATH10K_TID_MAX; i++) {
3202 config_apply = false;
3203 if (arvif->retry_long[i] || arvif->ampdu[i] ||
3204 arvif->rate_ctrl[i] || arvif->rtscts[i]) {
3205 config_apply = true;
3206 arg.tid = i;
3207 arg.vdev_id = arvif->vdev_id;
3208 arg.retry_count = arvif->retry_long[i];
3209 arg.aggr_control = arvif->ampdu[i];
3210 arg.rate_ctrl = arvif->rate_ctrl[i];
3211 arg.rcode_flags = arvif->rate_code[i];
3212
3213 if (arvif->rtscts[i])
3214 arg.ext_tid_cfg_bitmap =
3215 WMI_EXT_TID_RTS_CTS_CONFIG;
3216 else
3217 arg.ext_tid_cfg_bitmap = 0;
3218
3219 arg.rtscts_ctrl = arvif->rtscts[i];
3220 }
3221
3222 if (arvif->noack[i]) {
3223 arg.ack_policy = arvif->noack[i];
3224 arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
3225 arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
3226 config_apply = true;
3227 }
3228
3229 /* Assign default value(-1) to newly connected station.
3230 * This is to identify station specific tid configuration not
3231 * configured for the station.
3232 */
3233 arsta->retry_long[i] = -1;
3234 arsta->noack[i] = -1;
3235 arsta->ampdu[i] = -1;
3236
3237 if (!config_apply)
3238 continue;
3239
3240 ether_addr_copy(dst: arg.peer_macaddr.addr, src: sta->addr);
3241
3242 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg: &arg);
3243 if (ret) {
3244 ath10k_warn(ar, fmt: "failed to set per tid retry/aggr config for sta %pM: %d\n",
3245 sta->addr, ret);
3246 return ret;
3247 }
3248
3249 memset(&arg, 0, sizeof(arg));
3250 }
3251
3252 return 0;
3253}
3254
3255static int ath10k_station_assoc(struct ath10k *ar,
3256 struct ieee80211_vif *vif,
3257 struct ieee80211_sta *sta,
3258 bool reassoc)
3259{
3260 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3261 struct wmi_peer_assoc_complete_arg peer_arg;
3262 int ret = 0;
3263
3264 lockdep_assert_held(&ar->conf_mutex);
3265
3266 ret = ath10k_peer_assoc_prepare(ar, vif, sta, arg: &peer_arg);
3267 if (ret) {
3268 ath10k_warn(ar, fmt: "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
3269 sta->addr, arvif->vdev_id, ret);
3270 return ret;
3271 }
3272
3273 ret = ath10k_wmi_peer_assoc(ar, arg: &peer_arg);
3274 if (ret) {
3275 ath10k_warn(ar, fmt: "failed to run peer assoc for STA %pM vdev %i: %d\n",
3276 sta->addr, arvif->vdev_id, ret);
3277 return ret;
3278 }
3279
3280 /* Re-assoc is run only to update supported rates for given station. It
3281 * doesn't make much sense to reconfigure the peer completely.
3282 */
3283 if (!reassoc) {
3284 ret = ath10k_setup_peer_smps(ar, arvif, addr: sta->addr,
3285 ht_cap: &sta->deflink.ht_cap);
3286 if (ret) {
3287 ath10k_warn(ar, fmt: "failed to setup peer SMPS for vdev %d: %d\n",
3288 arvif->vdev_id, ret);
3289 return ret;
3290 }
3291
3292 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
3293 if (ret) {
3294 ath10k_warn(ar, fmt: "failed to set qos params for STA %pM for vdev %i: %d\n",
3295 sta->addr, arvif->vdev_id, ret);
3296 return ret;
3297 }
3298
3299 if (!sta->wme) {
3300 arvif->num_legacy_stations++;
3301 ret = ath10k_recalc_rtscts_prot(arvif);
3302 if (ret) {
3303 ath10k_warn(ar, fmt: "failed to recalculate rts/cts prot for vdev %d: %d\n",
3304 arvif->vdev_id, ret);
3305 return ret;
3306 }
3307 }
3308
3309 /* Plumb cached keys only for static WEP */
3310 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
3311 ret = ath10k_install_peer_wep_keys(arvif, addr: sta->addr);
3312 if (ret) {
3313 ath10k_warn(ar, fmt: "failed to install peer wep keys for vdev %i: %d\n",
3314 arvif->vdev_id, ret);
3315 return ret;
3316 }
3317 }
3318 }
3319
3320 if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map))
3321 return ret;
3322
3323 return ath10k_new_peer_tid_config(ar, sta, arvif);
3324}
3325
3326static int ath10k_station_disassoc(struct ath10k *ar,
3327 struct ieee80211_vif *vif,
3328 struct ieee80211_sta *sta)
3329{
3330 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3331 int ret = 0;
3332
3333 lockdep_assert_held(&ar->conf_mutex);
3334
3335 if (!sta->wme) {
3336 arvif->num_legacy_stations--;
3337 ret = ath10k_recalc_rtscts_prot(arvif);
3338 if (ret) {
3339 ath10k_warn(ar, fmt: "failed to recalculate rts/cts prot for vdev %d: %d\n",
3340 arvif->vdev_id, ret);
3341 return ret;
3342 }
3343 }
3344
3345 ret = ath10k_clear_peer_keys(arvif, addr: sta->addr);
3346 if (ret) {
3347 ath10k_warn(ar, fmt: "failed to clear all peer wep keys for vdev %i: %d\n",
3348 arvif->vdev_id, ret);
3349 return ret;
3350 }
3351
3352 return ret;
3353}
3354
3355/**************/
3356/* Regulatory */
3357/**************/
3358
3359static int ath10k_update_channel_list(struct ath10k *ar)
3360{
3361 struct ieee80211_hw *hw = ar->hw;
3362 struct ieee80211_supported_band **bands;
3363 enum nl80211_band band;
3364 struct ieee80211_channel *channel;
3365 struct wmi_scan_chan_list_arg arg = {0};
3366 struct wmi_channel_arg *ch;
3367 bool passive;
3368 int len;
3369 int ret;
3370 int i;
3371
3372 lockdep_assert_held(&ar->conf_mutex);
3373
3374 bands = hw->wiphy->bands;
3375 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3376 if (!bands[band])
3377 continue;
3378
3379 for (i = 0; i < bands[band]->n_channels; i++) {
3380 if (bands[band]->channels[i].flags &
3381 IEEE80211_CHAN_DISABLED)
3382 continue;
3383
3384 arg.n_channels++;
3385 }
3386 }
3387
3388 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
3389 arg.channels = kzalloc(size: len, GFP_KERNEL);
3390 if (!arg.channels)
3391 return -ENOMEM;
3392
3393 ch = arg.channels;
3394 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3395 if (!bands[band])
3396 continue;
3397
3398 for (i = 0; i < bands[band]->n_channels; i++) {
3399 channel = &bands[band]->channels[i];
3400
3401 if (channel->flags & IEEE80211_CHAN_DISABLED)
3402 continue;
3403
3404 ch->allow_ht = true;
3405
3406 /* FIXME: when should we really allow VHT? */
3407 ch->allow_vht = true;
3408
3409 ch->allow_ibss =
3410 !(channel->flags & IEEE80211_CHAN_NO_IR);
3411
3412 ch->ht40plus =
3413 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
3414
3415 ch->chan_radar =
3416 !!(channel->flags & IEEE80211_CHAN_RADAR);
3417
3418 passive = channel->flags & IEEE80211_CHAN_NO_IR;
3419 ch->passive = passive;
3420
3421 /* the firmware is ignoring the "radar" flag of the
3422 * channel and is scanning actively using Probe Requests
3423 * on "Radar detection"/DFS channels which are not
3424 * marked as "available"
3425 */
3426 ch->passive |= ch->chan_radar;
3427
3428 ch->freq = channel->center_freq;
3429 ch->band_center_freq1 = channel->center_freq;
3430 ch->min_power = 0;
3431 ch->max_power = channel->max_power * 2;
3432 ch->max_reg_power = channel->max_reg_power * 2;
3433 ch->max_antenna_gain = channel->max_antenna_gain;
3434 ch->reg_class_id = 0; /* FIXME */
3435
3436 /* FIXME: why use only legacy modes, why not any
3437 * HT/VHT modes? Would that even make any
3438 * difference?
3439 */
3440 if (channel->band == NL80211_BAND_2GHZ)
3441 ch->mode = MODE_11G;
3442 else
3443 ch->mode = MODE_11A;
3444
3445 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
3446 continue;
3447
3448 ath10k_dbg(ar, ATH10K_DBG_WMI,
3449 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3450 ch - arg.channels, arg.n_channels,
3451 ch->freq, ch->max_power, ch->max_reg_power,
3452 ch->max_antenna_gain, ch->mode);
3453
3454 ch++;
3455 }
3456 }
3457
3458 ret = ath10k_wmi_scan_chan_list(ar, arg: &arg);
3459 kfree(objp: arg.channels);
3460
3461 return ret;
3462}
3463
3464static enum wmi_dfs_region
3465ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3466{
3467 switch (dfs_region) {
3468 case NL80211_DFS_UNSET:
3469 return WMI_UNINIT_DFS_DOMAIN;
3470 case NL80211_DFS_FCC:
3471 return WMI_FCC_DFS_DOMAIN;
3472 case NL80211_DFS_ETSI:
3473 return WMI_ETSI_DFS_DOMAIN;
3474 case NL80211_DFS_JP:
3475 return WMI_MKK4_DFS_DOMAIN;
3476 }
3477 return WMI_UNINIT_DFS_DOMAIN;
3478}
3479
3480static void ath10k_regd_update(struct ath10k *ar)
3481{
3482 struct reg_dmn_pair_mapping *regpair;
3483 int ret;
3484 enum wmi_dfs_region wmi_dfs_reg;
3485 enum nl80211_dfs_regions nl_dfs_reg;
3486
3487 lockdep_assert_held(&ar->conf_mutex);
3488
3489 ret = ath10k_update_channel_list(ar);
3490 if (ret)
3491 ath10k_warn(ar, fmt: "failed to update channel list: %d\n", ret);
3492
3493 regpair = ar->ath_common.regulatory.regpair;
3494
3495 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3496 nl_dfs_reg = ar->dfs_detector->region;
3497 wmi_dfs_reg = ath10k_mac_get_dfs_region(dfs_region: nl_dfs_reg);
3498 } else {
3499 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3500 }
3501
3502 /* Target allows setting up per-band regdomain but ath_common provides
3503 * a combined one only
3504 */
3505 ret = ath10k_wmi_pdev_set_regdomain(ar,
3506 rd: regpair->reg_domain,
3507 rd2g: regpair->reg_domain, /* 2ghz */
3508 rd5g: regpair->reg_domain, /* 5ghz */
3509 ctl2g: regpair->reg_2ghz_ctl,
3510 ctl5g: regpair->reg_5ghz_ctl,
3511 dfs_reg: wmi_dfs_reg);
3512 if (ret)
3513 ath10k_warn(ar, fmt: "failed to set pdev regdomain: %d\n", ret);
3514}
3515
3516static void ath10k_mac_update_channel_list(struct ath10k *ar,
3517 struct ieee80211_supported_band *band)
3518{
3519 int i;
3520
3521 if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
3522 for (i = 0; i < band->n_channels; i++) {
3523 if (band->channels[i].center_freq < ar->low_5ghz_chan ||
3524 band->channels[i].center_freq > ar->high_5ghz_chan)
3525 band->channels[i].flags |=
3526 IEEE80211_CHAN_DISABLED;
3527 }
3528 }
3529}
3530
3531static void ath10k_reg_notifier(struct wiphy *wiphy,
3532 struct regulatory_request *request)
3533{
3534 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3535 struct ath10k *ar = hw->priv;
3536 bool result;
3537
3538 ath_reg_notifier_apply(wiphy, request, reg: &ar->ath_common.regulatory);
3539
3540 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3541 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3542 request->dfs_region);
3543 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3544 request->dfs_region);
3545 if (!result)
3546 ath10k_warn(ar, fmt: "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3547 request->dfs_region);
3548 }
3549
3550 mutex_lock(&ar->conf_mutex);
3551 if (ar->state == ATH10K_STATE_ON)
3552 ath10k_regd_update(ar);
3553 mutex_unlock(lock: &ar->conf_mutex);
3554
3555 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
3556 ath10k_mac_update_channel_list(ar,
3557 band: ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
3558}
3559
3560static void ath10k_stop_radar_confirmation(struct ath10k *ar)
3561{
3562 spin_lock_bh(lock: &ar->data_lock);
3563 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED;
3564 spin_unlock_bh(lock: &ar->data_lock);
3565
3566 cancel_work_sync(work: &ar->radar_confirmation_work);
3567}
3568
3569/***************/
3570/* TX handlers */
3571/***************/
3572
3573enum ath10k_mac_tx_path {
3574 ATH10K_MAC_TX_HTT,
3575 ATH10K_MAC_TX_HTT_MGMT,
3576 ATH10K_MAC_TX_WMI_MGMT,
3577 ATH10K_MAC_TX_UNKNOWN,
3578};
3579
3580void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3581{
3582 lockdep_assert_held(&ar->htt.tx_lock);
3583
3584 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3585 ar->tx_paused |= BIT(reason);
3586 ieee80211_stop_queues(hw: ar->hw);
3587}
3588
3589static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3590 struct ieee80211_vif *vif)
3591{
3592 struct ath10k *ar = data;
3593 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3594
3595 if (arvif->tx_paused)
3596 return;
3597
3598 ieee80211_wake_queue(hw: ar->hw, queue: arvif->vdev_id);
3599}
3600
3601void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3602{
3603 lockdep_assert_held(&ar->htt.tx_lock);
3604
3605 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3606 ar->tx_paused &= ~BIT(reason);
3607
3608 if (ar->tx_paused)
3609 return;
3610
3611 ieee80211_iterate_active_interfaces_atomic(hw: ar->hw,
3612 ATH10K_ITER_RESUME_FLAGS,
3613 iterator: ath10k_mac_tx_unlock_iter,
3614 data: ar);
3615
3616 ieee80211_wake_queue(hw: ar->hw, queue: ar->hw->offchannel_tx_hw_queue);
3617}
3618
3619void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3620{
3621 struct ath10k *ar = arvif->ar;
3622
3623 lockdep_assert_held(&ar->htt.tx_lock);
3624
3625 WARN_ON(reason >= BITS_PER_LONG);
3626 arvif->tx_paused |= BIT(reason);
3627 ieee80211_stop_queue(hw: ar->hw, queue: arvif->vdev_id);
3628}
3629
3630void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3631{
3632 struct ath10k *ar = arvif->ar;
3633
3634 lockdep_assert_held(&ar->htt.tx_lock);
3635
3636 WARN_ON(reason >= BITS_PER_LONG);
3637 arvif->tx_paused &= ~BIT(reason);
3638
3639 if (ar->tx_paused)
3640 return;
3641
3642 if (arvif->tx_paused)
3643 return;
3644
3645 ieee80211_wake_queue(hw: ar->hw, queue: arvif->vdev_id);
3646}
3647
3648static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3649 enum wmi_tlv_tx_pause_id pause_id,
3650 enum wmi_tlv_tx_pause_action action)
3651{
3652 struct ath10k *ar = arvif->ar;
3653
3654 lockdep_assert_held(&ar->htt.tx_lock);
3655
3656 switch (action) {
3657 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3658 ath10k_mac_vif_tx_lock(arvif, reason: pause_id);
3659 break;
3660 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3661 ath10k_mac_vif_tx_unlock(arvif, reason: pause_id);
3662 break;
3663 default:
3664 ath10k_dbg(ar, ATH10K_DBG_BOOT,
3665 "received unknown tx pause action %d on vdev %i, ignoring\n",
3666 action, arvif->vdev_id);
3667 break;
3668 }
3669}
3670
3671struct ath10k_mac_tx_pause {
3672 u32 vdev_id;
3673 enum wmi_tlv_tx_pause_id pause_id;
3674 enum wmi_tlv_tx_pause_action action;
3675};
3676
3677static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3678 struct ieee80211_vif *vif)
3679{
3680 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3681 struct ath10k_mac_tx_pause *arg = data;
3682
3683 if (arvif->vdev_id != arg->vdev_id)
3684 return;
3685
3686 ath10k_mac_vif_handle_tx_pause(arvif, pause_id: arg->pause_id, action: arg->action);
3687}
3688
3689void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3690 enum wmi_tlv_tx_pause_id pause_id,
3691 enum wmi_tlv_tx_pause_action action)
3692{
3693 struct ath10k_mac_tx_pause arg = {
3694 .vdev_id = vdev_id,
3695 .pause_id = pause_id,
3696 .action = action,
3697 };
3698
3699 spin_lock_bh(lock: &ar->htt.tx_lock);
3700 ieee80211_iterate_active_interfaces_atomic(hw: ar->hw,
3701 ATH10K_ITER_RESUME_FLAGS,
3702 iterator: ath10k_mac_handle_tx_pause_iter,
3703 data: &arg);
3704 spin_unlock_bh(lock: &ar->htt.tx_lock);
3705}
3706
3707static enum ath10k_hw_txrx_mode
3708ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3709 struct ieee80211_vif *vif,
3710 struct ieee80211_sta *sta,
3711 struct sk_buff *skb)
3712{
3713 const struct ieee80211_hdr *hdr = (void *)skb->data;
3714 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
3715 __le16 fc = hdr->frame_control;
3716
3717 if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
3718 return ATH10K_HW_TXRX_ETHERNET;
3719
3720 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3721 return ATH10K_HW_TXRX_RAW;
3722
3723 if (ieee80211_is_mgmt(fc))
3724 return ATH10K_HW_TXRX_MGMT;
3725
3726 /* Workaround:
3727 *
3728 * NullFunc frames are mostly used to ping if a client or AP are still
3729 * reachable and responsive. This implies tx status reports must be
3730 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3731 * come to a conclusion that the other end disappeared and tear down
3732 * BSS connection or it can never disconnect from BSS/client (which is
3733 * the case).
3734 *
3735 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3736 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3737 * which seems to deliver correct tx reports for NullFunc frames. The
3738 * downside of using it is it ignores client powersave state so it can
3739 * end up disconnecting sleeping clients in AP mode. It should fix STA
3740 * mode though because AP don't sleep.
3741 */
3742 if (ar->htt.target_version_major < 3 &&
3743 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3744 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3745 ar->running_fw->fw_file.fw_features))
3746 return ATH10K_HW_TXRX_MGMT;
3747
3748 /* Workaround:
3749 *
3750 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3751 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3752 * to work with Ethernet txmode so use it.
3753 *
3754 * FIXME: Check if raw mode works with TDLS.
3755 */
3756 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3757 return ATH10K_HW_TXRX_ETHERNET;
3758
3759 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) ||
3760 skb_cb->flags & ATH10K_SKB_F_RAW_TX)
3761 return ATH10K_HW_TXRX_RAW;
3762
3763 return ATH10K_HW_TXRX_NATIVE_WIFI;
3764}
3765
3766static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3767 struct sk_buff *skb)
3768{
3769 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3770 const struct ieee80211_hdr *hdr = (void *)skb->data;
3771 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3772 IEEE80211_TX_CTL_INJECTED;
3773
3774 if (!ieee80211_has_protected(fc: hdr->frame_control))
3775 return false;
3776
3777 if ((info->flags & mask) == mask)
3778 return false;
3779
3780 if (vif)
3781 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
3782
3783 return true;
3784}
3785
3786/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3787 * Control in the header.
3788 */
3789static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3790{
3791 struct ieee80211_hdr *hdr = (void *)skb->data;
3792 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3793 u8 *qos_ctl;
3794
3795 if (!ieee80211_is_data_qos(fc: hdr->frame_control))
3796 return;
3797
3798 qos_ctl = ieee80211_get_qos_ctl(hdr);
3799 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3800 skb->data, (void *)qos_ctl - (void *)skb->data);
3801 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3802
3803 /* Some firmware revisions don't handle sending QoS NullFunc well.
3804 * These frames are mainly used for CQM purposes so it doesn't really
3805 * matter whether QoS NullFunc or NullFunc are sent.
3806 */
3807 hdr = (void *)skb->data;
3808 if (ieee80211_is_qos_nullfunc(fc: hdr->frame_control))
3809 cb->flags &= ~ATH10K_SKB_F_QOS;
3810
3811 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3812}
3813
3814static void ath10k_tx_h_8023(struct sk_buff *skb)
3815{
3816 struct ieee80211_hdr *hdr;
3817 struct rfc1042_hdr *rfc1042;
3818 struct ethhdr *eth;
3819 size_t hdrlen;
3820 u8 da[ETH_ALEN];
3821 u8 sa[ETH_ALEN];
3822 __be16 type;
3823
3824 hdr = (void *)skb->data;
3825 hdrlen = ieee80211_hdrlen(fc: hdr->frame_control);
3826 rfc1042 = (void *)skb->data + hdrlen;
3827
3828 ether_addr_copy(dst: da, src: ieee80211_get_DA(hdr));
3829 ether_addr_copy(dst: sa, src: ieee80211_get_SA(hdr));
3830 type = rfc1042->snap_type;
3831
3832 skb_pull(skb, len: hdrlen + sizeof(*rfc1042));
3833 skb_push(skb, len: sizeof(*eth));
3834
3835 eth = (void *)skb->data;
3836 ether_addr_copy(dst: eth->h_dest, src: da);
3837 ether_addr_copy(dst: eth->h_source, src: sa);
3838 eth->h_proto = type;
3839}
3840
3841static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3842 struct ieee80211_vif *vif,
3843 struct sk_buff *skb)
3844{
3845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3846 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3847
3848 /* This is case only for P2P_GO */
3849 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3850 return;
3851
3852 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3853 spin_lock_bh(lock: &ar->data_lock);
3854 if (arvif->u.ap.noa_data)
3855 if (!pskb_expand_head(skb, nhead: 0, ntail: arvif->u.ap.noa_len,
3856 GFP_ATOMIC))
3857 skb_put_data(skb, data: arvif->u.ap.noa_data,
3858 len: arvif->u.ap.noa_len);
3859 spin_unlock_bh(lock: &ar->data_lock);
3860 }
3861}
3862
3863static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3864 struct ieee80211_vif *vif,
3865 struct ieee80211_txq *txq,
3866 struct ieee80211_sta *sta,
3867 struct sk_buff *skb, u16 airtime)
3868{
3869 struct ieee80211_hdr *hdr = (void *)skb->data;
3870 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3871 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3872 bool is_data = ieee80211_is_data(fc: hdr->frame_control) ||
3873 ieee80211_is_data_qos(fc: hdr->frame_control);
3874 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3875 struct ath10k_sta *arsta;
3876 u8 tid, *qos_ctl;
3877 bool noack = false;
3878
3879 cb->flags = 0;
3880
3881 if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
3882 cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
3883 goto finish_cb_fill;
3884 }
3885
3886 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3887 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3888
3889 if (ieee80211_is_mgmt(fc: hdr->frame_control))
3890 cb->flags |= ATH10K_SKB_F_MGMT;
3891
3892 if (ieee80211_is_data_qos(fc: hdr->frame_control)) {
3893 cb->flags |= ATH10K_SKB_F_QOS;
3894 qos_ctl = ieee80211_get_qos_ctl(hdr);
3895 tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK;
3896
3897 if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
3898 noack = true;
3899
3900 if (sta) {
3901 arsta = (struct ath10k_sta *)sta->drv_priv;
3902
3903 if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
3904 noack = true;
3905
3906 if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK)
3907 noack = false;
3908 }
3909
3910 if (noack)
3911 cb->flags |= ATH10K_SKB_F_NOACK_TID;
3912 }
3913
3914 /* Data frames encrypted in software will be posted to firmware
3915 * with tx encap mode set to RAW. Ex: Multicast traffic generated
3916 * for a specific VLAN group will always be encrypted in software.
3917 */
3918 if (is_data && ieee80211_has_protected(fc: hdr->frame_control) &&
3919 !info->control.hw_key) {
3920 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3921 cb->flags |= ATH10K_SKB_F_RAW_TX;
3922 }
3923
3924finish_cb_fill:
3925 cb->vif = vif;
3926 cb->txq = txq;
3927 cb->airtime_est = airtime;
3928 if (sta) {
3929 arsta = (struct ath10k_sta *)sta->drv_priv;
3930 spin_lock_bh(lock: &ar->data_lock);
3931 cb->ucast_cipher = arsta->ucast_cipher;
3932 spin_unlock_bh(lock: &ar->data_lock);
3933 }
3934}
3935
3936bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3937{
3938 /* FIXME: Not really sure since when the behaviour changed. At some
3939 * point new firmware stopped requiring creation of peer entries for
3940 * offchannel tx (and actually creating them causes issues with wmi-htc
3941 * tx credit replenishment and reliability). Assuming it's at least 3.4
3942 * because that's when the `freq` was introduced to TX_FRM HTT command.
3943 */
3944 return (ar->htt.target_version_major >= 3 &&
3945 ar->htt.target_version_minor >= 4 &&
3946 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3947}
3948
3949static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3950{
3951 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3952
3953 if (skb_queue_len_lockless(list_: q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
3954 ath10k_warn(ar, fmt: "wmi mgmt tx queue is full\n");
3955 return -ENOSPC;
3956 }
3957
3958 skb_queue_tail(list: q, newsk: skb);
3959 ieee80211_queue_work(hw: ar->hw, work: &ar->wmi_mgmt_tx_work);
3960
3961 return 0;
3962}
3963
3964static enum ath10k_mac_tx_path
3965ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3966 struct sk_buff *skb,
3967 enum ath10k_hw_txrx_mode txmode)
3968{
3969 switch (txmode) {
3970 case ATH10K_HW_TXRX_RAW:
3971 case ATH10K_HW_TXRX_NATIVE_WIFI:
3972 case ATH10K_HW_TXRX_ETHERNET:
3973 return ATH10K_MAC_TX_HTT;
3974 case ATH10K_HW_TXRX_MGMT:
3975 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3976 ar->running_fw->fw_file.fw_features) ||
3977 test_bit(WMI_SERVICE_MGMT_TX_WMI,
3978 ar->wmi.svc_map))
3979 return ATH10K_MAC_TX_WMI_MGMT;
3980 else if (ar->htt.target_version_major >= 3)
3981 return ATH10K_MAC_TX_HTT;
3982 else
3983 return ATH10K_MAC_TX_HTT_MGMT;
3984 }
3985
3986 return ATH10K_MAC_TX_UNKNOWN;
3987}
3988
3989static int ath10k_mac_tx_submit(struct ath10k *ar,
3990 enum ath10k_hw_txrx_mode txmode,
3991 enum ath10k_mac_tx_path txpath,
3992 struct sk_buff *skb)
3993{
3994 struct ath10k_htt *htt = &ar->htt;
3995 int ret = -EINVAL;
3996
3997 switch (txpath) {
3998 case ATH10K_MAC_TX_HTT:
3999 ret = ath10k_htt_tx(htt, txmode, msdu: skb);
4000 break;
4001 case ATH10K_MAC_TX_HTT_MGMT:
4002 ret = ath10k_htt_mgmt_tx(htt, msdu: skb);
4003 break;
4004 case ATH10K_MAC_TX_WMI_MGMT:
4005 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
4006 break;
4007 case ATH10K_MAC_TX_UNKNOWN:
4008 WARN_ON_ONCE(1);
4009 ret = -EINVAL;
4010 break;
4011 }
4012
4013 if (ret) {
4014 ath10k_warn(ar, fmt: "failed to transmit packet, dropping: %d\n",
4015 ret);
4016 ieee80211_free_txskb(hw: ar->hw, skb);
4017 }
4018
4019 return ret;
4020}
4021
4022/* This function consumes the sk_buff regardless of return value as far as
4023 * caller is concerned so no freeing is necessary afterwards.
4024 */
4025static int ath10k_mac_tx(struct ath10k *ar,
4026 struct ieee80211_vif *vif,
4027 enum ath10k_hw_txrx_mode txmode,
4028 enum ath10k_mac_tx_path txpath,
4029 struct sk_buff *skb, bool noque_offchan)
4030{
4031 struct ieee80211_hw *hw = ar->hw;
4032 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4033 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
4034 int ret;
4035
4036 /* We should disable CCK RATE due to P2P */
4037 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
4038 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
4039
4040 switch (txmode) {
4041 case ATH10K_HW_TXRX_MGMT:
4042 case ATH10K_HW_TXRX_NATIVE_WIFI:
4043 ath10k_tx_h_nwifi(hw, skb);
4044 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
4045 ath10k_tx_h_seq_no(vif, skb);
4046 break;
4047 case ATH10K_HW_TXRX_ETHERNET:
4048 /* Convert 802.11->802.3 header only if the frame was earlier
4049 * encapsulated to 802.11 by mac80211. Otherwise pass it as is.
4050 */
4051 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
4052 ath10k_tx_h_8023(skb);
4053 break;
4054 case ATH10K_HW_TXRX_RAW:
4055 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
4056 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
4057 WARN_ON_ONCE(1);
4058 ieee80211_free_txskb(hw, skb);
4059 return -EOPNOTSUPP;
4060 }
4061 }
4062
4063 if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
4064 if (!ath10k_mac_tx_frm_has_freq(ar)) {
4065 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
4066 skb, skb->len);
4067
4068 skb_queue_tail(list: &ar->offchan_tx_queue, newsk: skb);
4069 ieee80211_queue_work(hw, work: &ar->offchan_tx_work);
4070 return 0;
4071 }
4072 }
4073
4074 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
4075 if (ret) {
4076 ath10k_warn(ar, fmt: "failed to submit frame: %d\n", ret);
4077 return ret;
4078 }
4079
4080 return 0;
4081}
4082
4083void ath10k_offchan_tx_purge(struct ath10k *ar)
4084{
4085 struct sk_buff *skb;
4086
4087 for (;;) {
4088 skb = skb_dequeue(list: &ar->offchan_tx_queue);
4089 if (!skb)
4090 break;
4091
4092 ieee80211_free_txskb(hw: ar->hw, skb);
4093 }
4094}
4095
4096void ath10k_offchan_tx_work(struct work_struct *work)
4097{
4098 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
4099 struct ath10k_peer *peer;
4100 struct ath10k_vif *arvif;
4101 enum ath10k_hw_txrx_mode txmode;
4102 enum ath10k_mac_tx_path txpath;
4103 struct ieee80211_hdr *hdr;
4104 struct ieee80211_vif *vif;
4105 struct ieee80211_sta *sta;
4106 struct sk_buff *skb;
4107 const u8 *peer_addr;
4108 int vdev_id;
4109 int ret;
4110 unsigned long time_left;
4111 bool tmp_peer_created = false;
4112
4113 /* FW requirement: We must create a peer before FW will send out
4114 * an offchannel frame. Otherwise the frame will be stuck and
4115 * never transmitted. We delete the peer upon tx completion.
4116 * It is unlikely that a peer for offchannel tx will already be
4117 * present. However it may be in some rare cases so account for that.
4118 * Otherwise we might remove a legitimate peer and break stuff.
4119 */
4120
4121 for (;;) {
4122 skb = skb_dequeue(list: &ar->offchan_tx_queue);
4123 if (!skb)
4124 break;
4125
4126 mutex_lock(&ar->conf_mutex);
4127
4128 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
4129 skb, skb->len);
4130
4131 hdr = (struct ieee80211_hdr *)skb->data;
4132 peer_addr = ieee80211_get_DA(hdr);
4133
4134 spin_lock_bh(lock: &ar->data_lock);
4135 vdev_id = ar->scan.vdev_id;
4136 peer = ath10k_peer_find(ar, vdev_id, addr: peer_addr);
4137 spin_unlock_bh(lock: &ar->data_lock);
4138
4139 if (peer) {
4140 ath10k_warn(ar, fmt: "peer %pM on vdev %d already present\n",
4141 peer_addr, vdev_id);
4142 } else {
4143 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
4144 addr: peer_addr,
4145 peer_type: WMI_PEER_TYPE_DEFAULT);
4146 if (ret)
4147 ath10k_warn(ar, fmt: "failed to create peer %pM on vdev %d: %d\n",
4148 peer_addr, vdev_id, ret);
4149 tmp_peer_created = (ret == 0);
4150 }
4151
4152 spin_lock_bh(lock: &ar->data_lock);
4153 reinit_completion(x: &ar->offchan_tx_completed);
4154 ar->offchan_tx_skb = skb;
4155 spin_unlock_bh(lock: &ar->data_lock);
4156
4157 /* It's safe to access vif and sta - conf_mutex guarantees that
4158 * sta_state() and remove_interface() are locked exclusively
4159 * out wrt to this offchannel worker.
4160 */
4161 arvif = ath10k_get_arvif(ar, vdev_id);
4162 if (arvif) {
4163 vif = arvif->vif;
4164 sta = ieee80211_find_sta(vif, addr: peer_addr);
4165 } else {
4166 vif = NULL;
4167 sta = NULL;
4168 }
4169
4170 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4171 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4172
4173 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, noque_offchan: true);
4174 if (ret) {
4175 ath10k_warn(ar, fmt: "failed to transmit offchannel frame: %d\n",
4176 ret);
4177 /* not serious */
4178 }
4179
4180 time_left =
4181 wait_for_completion_timeout(x: &ar->offchan_tx_completed, timeout: 3 * HZ);
4182 if (time_left == 0)
4183 ath10k_warn(ar, fmt: "timed out waiting for offchannel skb %pK, len: %d\n",
4184 skb, skb->len);
4185
4186 if (!peer && tmp_peer_created) {
4187 ret = ath10k_peer_delete(ar, vdev_id, addr: peer_addr);
4188 if (ret)
4189 ath10k_warn(ar, fmt: "failed to delete peer %pM on vdev %d: %d\n",
4190 peer_addr, vdev_id, ret);
4191 }
4192
4193 mutex_unlock(lock: &ar->conf_mutex);
4194 }
4195}
4196
4197void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
4198{
4199 struct sk_buff *skb;
4200
4201 for (;;) {
4202 skb = skb_dequeue(list: &ar->wmi_mgmt_tx_queue);
4203 if (!skb)
4204 break;
4205
4206 ieee80211_free_txskb(hw: ar->hw, skb);
4207 }
4208}
4209
4210void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
4211{
4212 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
4213 struct sk_buff *skb;
4214 dma_addr_t paddr;
4215 int ret;
4216
4217 for (;;) {
4218 skb = skb_dequeue(list: &ar->wmi_mgmt_tx_queue);
4219 if (!skb)
4220 break;
4221
4222 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
4223 ar->running_fw->fw_file.fw_features)) {
4224 paddr = dma_map_single(ar->dev, skb->data,
4225 skb->len, DMA_TO_DEVICE);
4226 if (dma_mapping_error(dev: ar->dev, dma_addr: paddr)) {
4227 ieee80211_free_txskb(hw: ar->hw, skb);
4228 continue;
4229 }
4230 ret = ath10k_wmi_mgmt_tx_send(ar, msdu: skb, paddr);
4231 if (ret) {
4232 ath10k_warn(ar, fmt: "failed to transmit management frame by ref via WMI: %d\n",
4233 ret);
4234 /* remove this msdu from idr tracking */
4235 ath10k_wmi_cleanup_mgmt_tx_send(ar, msdu: skb);
4236
4237 dma_unmap_single(ar->dev, paddr, skb->len,
4238 DMA_TO_DEVICE);
4239 ieee80211_free_txskb(hw: ar->hw, skb);
4240 }
4241 } else {
4242 ret = ath10k_wmi_mgmt_tx(ar, msdu: skb);
4243 if (ret) {
4244 ath10k_warn(ar, fmt: "failed to transmit management frame via WMI: %d\n",
4245 ret);
4246 ieee80211_free_txskb(hw: ar->hw, skb);
4247 }
4248 }
4249 }
4250}
4251
4252static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
4253{
4254 struct ath10k_txq *artxq;
4255
4256 if (!txq)
4257 return;
4258
4259 artxq = (void *)txq->drv_priv;
4260 INIT_LIST_HEAD(list: &artxq->list);
4261}
4262
4263static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
4264{
4265 struct ath10k_skb_cb *cb;
4266 struct sk_buff *msdu;
4267 int msdu_id;
4268
4269 if (!txq)
4270 return;
4271
4272 spin_lock_bh(lock: &ar->htt.tx_lock);
4273 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
4274 cb = ATH10K_SKB_CB(skb: msdu);
4275 if (cb->txq == txq)
4276 cb->txq = NULL;
4277 }
4278 spin_unlock_bh(lock: &ar->htt.tx_lock);
4279}
4280
4281struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
4282 u16 peer_id,
4283 u8 tid)
4284{
4285 struct ath10k_peer *peer;
4286
4287 lockdep_assert_held(&ar->data_lock);
4288
4289 peer = ar->peer_map[peer_id];
4290 if (!peer)
4291 return NULL;
4292
4293 if (peer->removed)
4294 return NULL;
4295
4296 if (peer->sta)
4297 return peer->sta->txq[tid];
4298 else if (peer->vif)
4299 return peer->vif->txq;
4300 else
4301 return NULL;
4302}
4303
4304static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
4305 struct ieee80211_txq *txq)
4306{
4307 struct ath10k *ar = hw->priv;
4308 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4309
4310 /* No need to get locks */
4311 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
4312 return true;
4313
4314 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
4315 return true;
4316
4317 if (artxq->num_fw_queued < artxq->num_push_allowed)
4318 return true;
4319
4320 return false;
4321}
4322
4323/* Return estimated airtime in microsecond, which is calculated using last
4324 * reported TX rate. This is just a rough estimation because host driver has no
4325 * knowledge of the actual transmit rate, retries or aggregation. If actual
4326 * airtime can be reported by firmware, then delta between estimated and actual
4327 * airtime can be adjusted from deficit.
4328 */
4329#define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */
4330#define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */
4331static u16 ath10k_mac_update_airtime(struct ath10k *ar,
4332 struct ieee80211_txq *txq,
4333 struct sk_buff *skb)
4334{
4335 struct ath10k_sta *arsta;
4336 u32 pktlen;
4337 u16 airtime = 0;
4338
4339 if (!txq || !txq->sta)
4340 return airtime;
4341
4342 if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
4343 return airtime;
4344
4345 spin_lock_bh(lock: &ar->data_lock);
4346 arsta = (struct ath10k_sta *)txq->sta->drv_priv;
4347
4348 pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */
4349 if (arsta->last_tx_bitrate) {
4350 /* airtime in us, last_tx_bitrate in 100kbps */
4351 airtime = (pktlen * 8 * (1000 / 100))
4352 / arsta->last_tx_bitrate;
4353 /* overhead for media access time and IFS */
4354 airtime += IEEE80211_ATF_OVERHEAD_IFS;
4355 } else {
4356 /* This is mostly for throttle excessive BC/MC frames, and the
4357 * airtime/rate doesn't need be exact. Airtime of BC/MC frames
4358 * in 2G get some discount, which helps prevent very low rate
4359 * frames from being blocked for too long.
4360 */
4361 airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */
4362 airtime += IEEE80211_ATF_OVERHEAD;
4363 }
4364 spin_unlock_bh(lock: &ar->data_lock);
4365
4366 return airtime;
4367}
4368
4369int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
4370 struct ieee80211_txq *txq)
4371{
4372 struct ath10k *ar = hw->priv;
4373 struct ath10k_htt *htt = &ar->htt;
4374 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4375 struct ieee80211_vif *vif = txq->vif;
4376 struct ieee80211_sta *sta = txq->sta;
4377 enum ath10k_hw_txrx_mode txmode;
4378 enum ath10k_mac_tx_path txpath;
4379 struct sk_buff *skb;
4380 struct ieee80211_hdr *hdr;
4381 size_t skb_len;
4382 bool is_mgmt, is_presp;
4383 int ret;
4384 u16 airtime;
4385
4386 spin_lock_bh(lock: &ar->htt.tx_lock);
4387 ret = ath10k_htt_tx_inc_pending(htt);
4388 spin_unlock_bh(lock: &ar->htt.tx_lock);
4389
4390 if (ret)
4391 return ret;
4392
4393 skb = ieee80211_tx_dequeue_ni(hw, txq);
4394 if (!skb) {
4395 spin_lock_bh(lock: &ar->htt.tx_lock);
4396 ath10k_htt_tx_dec_pending(htt);
4397 spin_unlock_bh(lock: &ar->htt.tx_lock);
4398
4399 return -ENOENT;
4400 }
4401
4402 airtime = ath10k_mac_update_airtime(ar, txq, skb);
4403 ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
4404
4405 skb_len = skb->len;
4406 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4407 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4408 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4409
4410 if (is_mgmt) {
4411 hdr = (struct ieee80211_hdr *)skb->data;
4412 is_presp = ieee80211_is_probe_resp(fc: hdr->frame_control);
4413
4414 spin_lock_bh(lock: &ar->htt.tx_lock);
4415 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4416
4417 if (ret) {
4418 ath10k_htt_tx_dec_pending(htt);
4419 spin_unlock_bh(lock: &ar->htt.tx_lock);
4420 return ret;
4421 }
4422 spin_unlock_bh(lock: &ar->htt.tx_lock);
4423 }
4424
4425 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, noque_offchan: false);
4426 if (unlikely(ret)) {
4427 ath10k_warn(ar, fmt: "failed to push frame: %d\n", ret);
4428
4429 spin_lock_bh(lock: &ar->htt.tx_lock);
4430 ath10k_htt_tx_dec_pending(htt);
4431 if (is_mgmt)
4432 ath10k_htt_tx_mgmt_dec_pending(htt);
4433 spin_unlock_bh(lock: &ar->htt.tx_lock);
4434
4435 return ret;
4436 }
4437
4438 spin_lock_bh(lock: &ar->htt.tx_lock);
4439 artxq->num_fw_queued++;
4440 spin_unlock_bh(lock: &ar->htt.tx_lock);
4441
4442 return skb_len;
4443}
4444
4445static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
4446{
4447 struct ieee80211_txq *txq;
4448 int ret = 0;
4449
4450 ieee80211_txq_schedule_start(hw, ac);
4451 while ((txq = ieee80211_next_txq(hw, ac))) {
4452 while (ath10k_mac_tx_can_push(hw, txq)) {
4453 ret = ath10k_mac_tx_push_txq(hw, txq);
4454 if (ret < 0)
4455 break;
4456 }
4457 ieee80211_return_txq(hw, txq, force: false);
4458 ath10k_htt_tx_txq_update(hw, txq);
4459 if (ret == -EBUSY)
4460 break;
4461 }
4462 ieee80211_txq_schedule_end(hw, ac);
4463
4464 return ret;
4465}
4466
4467void ath10k_mac_tx_push_pending(struct ath10k *ar)
4468{
4469 struct ieee80211_hw *hw = ar->hw;
4470 u32 ac;
4471
4472 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
4473 return;
4474
4475 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
4476 return;
4477
4478 rcu_read_lock();
4479 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
4480 if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY)
4481 break;
4482 }
4483 rcu_read_unlock();
4484}
4485EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
4486
4487/************/
4488/* Scanning */
4489/************/
4490
4491void __ath10k_scan_finish(struct ath10k *ar)
4492{
4493 lockdep_assert_held(&ar->data_lock);
4494
4495 switch (ar->scan.state) {
4496 case ATH10K_SCAN_IDLE:
4497 break;
4498 case ATH10K_SCAN_RUNNING:
4499 case ATH10K_SCAN_ABORTING:
4500 if (ar->scan.is_roc && ar->scan.roc_notify)
4501 ieee80211_remain_on_channel_expired(hw: ar->hw);
4502 fallthrough;
4503 case ATH10K_SCAN_STARTING:
4504 if (!ar->scan.is_roc) {
4505 struct cfg80211_scan_info info = {
4506 .aborted = ((ar->scan.state ==
4507 ATH10K_SCAN_ABORTING) ||
4508 (ar->scan.state ==
4509 ATH10K_SCAN_STARTING)),
4510 };
4511
4512 ieee80211_scan_completed(hw: ar->hw, info: &info);
4513 }
4514
4515 ar->scan.state = ATH10K_SCAN_IDLE;
4516 ar->scan_channel = NULL;
4517 ar->scan.roc_freq = 0;
4518 ath10k_offchan_tx_purge(ar);
4519 cancel_delayed_work(dwork: &ar->scan.timeout);
4520 complete(&ar->scan.completed);
4521 break;
4522 }
4523}
4524
4525void ath10k_scan_finish(struct ath10k *ar)
4526{
4527 spin_lock_bh(lock: &ar->data_lock);
4528 __ath10k_scan_finish(ar);
4529 spin_unlock_bh(lock: &ar->data_lock);
4530}
4531
4532static int ath10k_scan_stop(struct ath10k *ar)
4533{
4534 struct wmi_stop_scan_arg arg = {
4535 .req_id = 1, /* FIXME */
4536 .req_type = WMI_SCAN_STOP_ONE,
4537 .u.scan_id = ATH10K_SCAN_ID,
4538 };
4539 int ret;
4540
4541 lockdep_assert_held(&ar->conf_mutex);
4542
4543 ret = ath10k_wmi_stop_scan(ar, arg: &arg);
4544 if (ret) {
4545 ath10k_warn(ar, fmt: "failed to stop wmi scan: %d\n", ret);
4546 goto out;
4547 }
4548
4549 ret = wait_for_completion_timeout(x: &ar->scan.completed, timeout: 3 * HZ);
4550 if (ret == 0) {
4551 ath10k_warn(ar, fmt: "failed to receive scan abortion completion: timed out\n");
4552 ret = -ETIMEDOUT;
4553 } else if (ret > 0) {
4554 ret = 0;
4555 }
4556
4557out:
4558 /* Scan state should be updated upon scan completion but in case
4559 * firmware fails to deliver the event (for whatever reason) it is
4560 * desired to clean up scan state anyway. Firmware may have just
4561 * dropped the scan completion event delivery due to transport pipe
4562 * being overflown with data and/or it can recover on its own before
4563 * next scan request is submitted.
4564 */
4565 spin_lock_bh(lock: &ar->data_lock);
4566 if (ar->scan.state != ATH10K_SCAN_IDLE)
4567 __ath10k_scan_finish(ar);
4568 spin_unlock_bh(lock: &ar->data_lock);
4569
4570 return ret;
4571}
4572
4573static void ath10k_scan_abort(struct ath10k *ar)
4574{
4575 int ret;
4576
4577 lockdep_assert_held(&ar->conf_mutex);
4578
4579 spin_lock_bh(lock: &ar->data_lock);
4580
4581 switch (ar->scan.state) {
4582 case ATH10K_SCAN_IDLE:
4583 /* This can happen if timeout worker kicked in and called
4584 * abortion while scan completion was being processed.
4585 */
4586 break;
4587 case ATH10K_SCAN_STARTING:
4588 case ATH10K_SCAN_ABORTING:
4589 ath10k_warn(ar, fmt: "refusing scan abortion due to invalid scan state: %s (%d)\n",
4590 ath10k_scan_state_str(state: ar->scan.state),
4591 ar->scan.state);
4592 break;
4593 case ATH10K_SCAN_RUNNING:
4594 ar->scan.state = ATH10K_SCAN_ABORTING;
4595 spin_unlock_bh(lock: &ar->data_lock);
4596
4597 ret = ath10k_scan_stop(ar);
4598 if (ret)
4599 ath10k_warn(ar, fmt: "failed to abort scan: %d\n", ret);
4600
4601 spin_lock_bh(lock: &ar->data_lock);
4602 break;
4603 }
4604
4605 spin_unlock_bh(lock: &ar->data_lock);
4606}
4607
4608void ath10k_scan_timeout_work(struct work_struct *work)
4609{
4610 struct ath10k *ar = container_of(work, struct ath10k,
4611 scan.timeout.work);
4612
4613 mutex_lock(&ar->conf_mutex);
4614 ath10k_scan_abort(ar);
4615 mutex_unlock(lock: &ar->conf_mutex);
4616}
4617
4618static int ath10k_start_scan(struct ath10k *ar,
4619 const struct wmi_start_scan_arg *arg)
4620{
4621 int ret;
4622
4623 lockdep_assert_held(&ar->conf_mutex);
4624
4625 ret = ath10k_wmi_start_scan(ar, arg);
4626 if (ret)
4627 return ret;
4628
4629 ret = wait_for_completion_timeout(x: &ar->scan.started, timeout: 1 * HZ);
4630 if (ret == 0) {
4631 ret = ath10k_scan_stop(ar);
4632 if (ret)
4633 ath10k_warn(ar, fmt: "failed to stop scan: %d\n", ret);
4634
4635 return -ETIMEDOUT;
4636 }
4637
4638 /* If we failed to start the scan, return error code at
4639 * this point. This is probably due to some issue in the
4640 * firmware, but no need to wedge the driver due to that...
4641 */
4642 spin_lock_bh(lock: &ar->data_lock);
4643 if (ar->scan.state == ATH10K_SCAN_IDLE) {
4644 spin_unlock_bh(lock: &ar->data_lock);
4645 return -EINVAL;
4646 }
4647 spin_unlock_bh(lock: &ar->data_lock);
4648
4649 return 0;
4650}
4651
4652/**********************/
4653/* mac80211 callbacks */
4654/**********************/
4655
4656static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4657 struct ieee80211_tx_control *control,
4658 struct sk_buff *skb)
4659{
4660 struct ath10k *ar = hw->priv;
4661 struct ath10k_htt *htt = &ar->htt;
4662 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4663 struct ieee80211_vif *vif = info->control.vif;
4664 struct ieee80211_sta *sta = control->sta;
4665 struct ieee80211_txq *txq = NULL;
4666 enum ath10k_hw_txrx_mode txmode;
4667 enum ath10k_mac_tx_path txpath;
4668 bool is_htt;
4669 bool is_mgmt;
4670 int ret;
4671 u16 airtime;
4672
4673 airtime = ath10k_mac_update_airtime(ar, txq, skb);
4674 ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
4675
4676 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4677 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4678 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4679 txpath == ATH10K_MAC_TX_HTT_MGMT);
4680 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4681
4682 if (is_htt) {
4683 bool is_presp = false;
4684
4685 spin_lock_bh(lock: &ar->htt.tx_lock);
4686 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
4687 struct ieee80211_hdr *hdr = (void *)skb->data;
4688
4689 is_presp = ieee80211_is_probe_resp(fc: hdr->frame_control);
4690 }
4691
4692 ret = ath10k_htt_tx_inc_pending(htt);
4693 if (ret) {
4694 ath10k_warn(ar, fmt: "failed to increase tx pending count: %d, dropping\n",
4695 ret);
4696 spin_unlock_bh(lock: &ar->htt.tx_lock);
4697 ieee80211_free_txskb(hw: ar->hw, skb);
4698 return;
4699 }
4700
4701 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4702 if (ret) {
4703 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4704 ret);
4705 ath10k_htt_tx_dec_pending(htt);
4706 spin_unlock_bh(lock: &ar->htt.tx_lock);
4707 ieee80211_free_txskb(hw: ar->hw, skb);
4708 return;
4709 }
4710 spin_unlock_bh(lock: &ar->htt.tx_lock);
4711 }
4712
4713 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, noque_offchan: false);
4714 if (ret) {
4715 ath10k_warn(ar, fmt: "failed to transmit frame: %d\n", ret);
4716 if (is_htt) {
4717 spin_lock_bh(lock: &ar->htt.tx_lock);
4718 ath10k_htt_tx_dec_pending(htt);
4719 if (is_mgmt)
4720 ath10k_htt_tx_mgmt_dec_pending(htt);
4721 spin_unlock_bh(lock: &ar->htt.tx_lock);
4722 }
4723 return;
4724 }
4725}
4726
4727static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4728 struct ieee80211_txq *txq)
4729{
4730 struct ath10k *ar = hw->priv;
4731 int ret;
4732 u8 ac = txq->ac;
4733
4734 ath10k_htt_tx_txq_update(hw, txq);
4735 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
4736 return;
4737
4738 spin_lock_bh(lock: &ar->queue_lock[ac]);
4739
4740 ieee80211_txq_schedule_start(hw, ac);
4741 txq = ieee80211_next_txq(hw, ac);
4742 if (!txq)
4743 goto out;
4744
4745 while (ath10k_mac_tx_can_push(hw, txq)) {
4746 ret = ath10k_mac_tx_push_txq(hw, txq);
4747 if (ret < 0)
4748 break;
4749 }
4750 ieee80211_return_txq(hw, txq, force: false);
4751 ath10k_htt_tx_txq_update(hw, txq);
4752out:
4753 ieee80211_txq_schedule_end(hw, ac);
4754 spin_unlock_bh(lock: &ar->queue_lock[ac]);
4755}
4756
4757/* Must not be called with conf_mutex held as workers can use that also. */
4758void ath10k_drain_tx(struct ath10k *ar)
4759{
4760 lockdep_assert_not_held(&ar->conf_mutex);
4761
4762 /* make sure rcu-protected mac80211 tx path itself is drained */
4763 synchronize_net();
4764
4765 ath10k_offchan_tx_purge(ar);
4766 ath10k_mgmt_over_wmi_tx_purge(ar);
4767
4768 cancel_work_sync(work: &ar->offchan_tx_work);
4769 cancel_work_sync(work: &ar->wmi_mgmt_tx_work);
4770}
4771
4772void ath10k_halt(struct ath10k *ar)
4773{
4774 struct ath10k_vif *arvif;
4775
4776 lockdep_assert_held(&ar->conf_mutex);
4777
4778 clear_bit(nr: ATH10K_CAC_RUNNING, addr: &ar->dev_flags);
4779 ar->filter_flags = 0;
4780 ar->monitor = false;
4781 ar->monitor_arvif = NULL;
4782
4783 if (ar->monitor_started)
4784 ath10k_monitor_stop(ar);
4785
4786 ar->monitor_started = false;
4787 ar->tx_paused = 0;
4788
4789 ath10k_scan_finish(ar);
4790 ath10k_peer_cleanup_all(ar);
4791 ath10k_stop_radar_confirmation(ar);
4792 ath10k_core_stop(ar);
4793 ath10k_hif_power_down(ar);
4794
4795 spin_lock_bh(lock: &ar->data_lock);
4796 list_for_each_entry(arvif, &ar->arvifs, list)
4797 ath10k_mac_vif_beacon_cleanup(arvif);
4798 spin_unlock_bh(lock: &ar->data_lock);
4799}
4800
4801static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4802{
4803 struct ath10k *ar = hw->priv;
4804
4805 mutex_lock(&ar->conf_mutex);
4806
4807 *tx_ant = ar->cfg_tx_chainmask;
4808 *rx_ant = ar->cfg_rx_chainmask;
4809
4810 mutex_unlock(lock: &ar->conf_mutex);
4811
4812 return 0;
4813}
4814
4815static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4816{
4817 /* It is not clear that allowing gaps in chainmask
4818 * is helpful. Probably it will not do what user
4819 * is hoping for, so warn in that case.
4820 */
4821 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4822 return true;
4823
4824 ath10k_warn(ar, fmt: "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4825 dbg, cm);
4826 return false;
4827}
4828
4829static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4830{
4831 int nsts = ar->vht_cap_info;
4832
4833 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4834 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4835
4836 /* If firmware does not deliver to host number of space-time
4837 * streams supported, assume it support up to 4 BF STS and return
4838 * the value for VHT CAP: nsts-1)
4839 */
4840 if (nsts == 0)
4841 return 3;
4842
4843 return nsts;
4844}
4845
4846static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4847{
4848 int sound_dim = ar->vht_cap_info;
4849
4850 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4851 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4852
4853 /* If the sounding dimension is not advertised by the firmware,
4854 * let's use a default value of 1
4855 */
4856 if (sound_dim == 0)
4857 return 1;
4858
4859 return sound_dim;
4860}
4861
4862static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4863{
4864 struct ieee80211_sta_vht_cap vht_cap = {0};
4865 struct ath10k_hw_params *hw = &ar->hw_params;
4866 u16 mcs_map;
4867 u32 val;
4868 int i;
4869
4870 vht_cap.vht_supported = 1;
4871 vht_cap.cap = ar->vht_cap_info;
4872
4873 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4874 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4875 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4876 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4877 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4878
4879 vht_cap.cap |= val;
4880 }
4881
4882 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4883 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4884 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4885 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4886 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4887
4888 vht_cap.cap |= val;
4889 }
4890
4891 mcs_map = 0;
4892 for (i = 0; i < 8; i++) {
4893 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4894 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4895 else
4896 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4897 }
4898
4899 if (ar->cfg_tx_chainmask <= 1)
4900 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4901
4902 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4903 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4904
4905 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
4906 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
4907 * user-space a clue if that is the case.
4908 */
4909 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
4910 (hw->vht160_mcs_rx_highest != 0 ||
4911 hw->vht160_mcs_tx_highest != 0)) {
4912 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
4913 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
4914 }
4915
4916 return vht_cap;
4917}
4918
4919static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4920{
4921 int i;
4922 struct ieee80211_sta_ht_cap ht_cap = {0};
4923
4924 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4925 return ht_cap;
4926
4927 ht_cap.ht_supported = 1;
4928 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4929 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4930 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4931 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4932 ht_cap.cap |=
4933 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4934
4935 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4936 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4937
4938 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4939 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4940
4941 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4942 u32 smps;
4943
4944 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4945 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4946
4947 ht_cap.cap |= smps;
4948 }
4949
4950 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4951 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4952
4953 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4954 u32 stbc;
4955
4956 stbc = ar->ht_cap_info;
4957 stbc &= WMI_HT_CAP_RX_STBC;
4958 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4959 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4960 stbc &= IEEE80211_HT_CAP_RX_STBC;
4961
4962 ht_cap.cap |= stbc;
4963 }
4964
4965 if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info &
4966 WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC)))
4967 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4968
4969 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4970 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4971
4972 /* max AMSDU is implicitly taken from vht_cap_info */
4973 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4974 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4975
4976 for (i = 0; i < ar->num_rf_chains; i++) {
4977 if (ar->cfg_rx_chainmask & BIT(i))
4978 ht_cap.mcs.rx_mask[i] = 0xFF;
4979 }
4980
4981 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4982
4983 return ht_cap;
4984}
4985
4986static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4987{
4988 struct ieee80211_supported_band *band;
4989 struct ieee80211_sta_vht_cap vht_cap;
4990 struct ieee80211_sta_ht_cap ht_cap;
4991
4992 ht_cap = ath10k_get_ht_cap(ar);
4993 vht_cap = ath10k_create_vht_cap(ar);
4994
4995 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4996 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4997 band->ht_cap = ht_cap;
4998 }
4999 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
5000 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
5001 band->ht_cap = ht_cap;
5002 band->vht_cap = vht_cap;
5003 }
5004}
5005
5006static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
5007{
5008 int ret;
5009 bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
5010
5011 lockdep_assert_held(&ar->conf_mutex);
5012
5013 is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, cm: tx_ant, dbg: "tx");
5014 is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, cm: rx_ant, dbg: "rx");
5015
5016 if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
5017 return -EINVAL;
5018
5019 ar->cfg_tx_chainmask = tx_ant;
5020 ar->cfg_rx_chainmask = rx_ant;
5021
5022 if ((ar->state != ATH10K_STATE_ON) &&
5023 (ar->state != ATH10K_STATE_RESTARTED))
5024 return 0;
5025
5026 ret = ath10k_wmi_pdev_set_param(ar, id: ar->wmi.pdev_param->tx_chain_mask,
5027 value: tx_ant);
5028 if (ret) {
5029 ath10k_warn(ar, fmt: "failed to set tx-chainmask: %d, req 0x%x\n",
5030 ret, tx_ant);
5031 return ret;
5032 }
5033
5034 ret = ath10k_wmi_pdev_set_param(ar, id: ar->wmi.pdev_param->rx_chain_mask,
5035 value: rx_ant);
5036 if (ret) {
5037 ath10k_warn(ar, fmt: "failed to set rx-chainmask: %d, req 0x%x\n",
5038 ret, rx_ant);
5039 return ret;
5040 }
5041
5042 /* Reload HT/VHT capability */
5043 ath10k_mac_setup_ht_vht_cap(ar);
5044
5045 return 0;
5046}
5047
5048static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
5049{
5050 struct ath10k *ar = hw->priv;
5051 int ret;
5052
5053 mutex_lock(&ar->conf_mutex);
5054 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
5055 mutex_unlock(lock: &ar->conf_mutex);
5056 return ret;
5057}
5058
5059static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
5060 struct wmi_bb_timing_cfg_arg *bb_timing)
5061{
5062 struct device_node *node;
5063 const char *fem_name;
5064 int ret;
5065
5066 node = ar->dev->of_node;
5067 if (!node)
5068 return -ENOENT;
5069
5070 ret = of_property_read_string_index(np: node, propname: "ext-fem-name", index: 0, output: &fem_name);
5071 if (ret)
5072 return -ENOENT;
5073
5074 /*
5075 * If external Front End module used in hardware, then default base band timing
5076 * parameter cannot be used since they were fine tuned for reference hardware,
5077 * so choosing different value suitable for that external FEM.
5078 */
5079 if (!strcmp("microsemi-lx5586", fem_name)) {
5080 bb_timing->bb_tx_timing = 0x00;
5081 bb_timing->bb_xpa_timing = 0x0101;
5082 } else {
5083 return -ENOENT;
5084 }
5085
5086 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
5087 bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing);
5088 return 0;
5089}
5090
5091static int ath10k_mac_rfkill_config(struct ath10k *ar)
5092{
5093 u32 param;
5094 int ret;
5095
5096 if (ar->hw_values->rfkill_pin == 0) {
5097 ath10k_warn(ar, fmt: "ath10k does not support hardware rfkill with this device\n");
5098 return -EOPNOTSUPP;
5099 }
5100
5101 ath10k_dbg(ar, ATH10K_DBG_MAC,
5102 "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
5103 ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
5104 ar->hw_values->rfkill_on_level);
5105
5106 param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
5107 ar->hw_values->rfkill_on_level) |
5108 FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
5109 ar->hw_values->rfkill_pin) |
5110 FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
5111 ar->hw_values->rfkill_cfg);
5112
5113 ret = ath10k_wmi_pdev_set_param(ar,
5114 id: ar->wmi.pdev_param->rfkill_config,
5115 value: param);
5116 if (ret) {
5117 ath10k_warn(ar,
5118 fmt: "failed to set rfkill config 0x%x: %d\n",
5119 param, ret);
5120 return ret;
5121 }
5122 return 0;
5123}
5124
5125int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
5126{
5127 enum wmi_tlv_rfkill_enable_radio param;
5128 int ret;
5129
5130 if (enable)
5131 param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
5132 else
5133 param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
5134
5135 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
5136
5137 ret = ath10k_wmi_pdev_set_param(ar, id: ar->wmi.pdev_param->rfkill_enable,
5138 value: param);
5139 if (ret) {
5140 ath10k_warn(ar, fmt: "failed to set rfkill enable param %d: %d\n",
5141 param, ret);
5142 return ret;
5143 }
5144
5145 return 0;
5146}
5147
5148static int ath10k_start(struct ieee80211_hw *hw)
5149{
5150 struct ath10k *ar = hw->priv;
5151 u32 param;
5152 int ret = 0;
5153 struct wmi_bb_timing_cfg_arg bb_timing = {0};
5154
5155 /*
5156 * This makes sense only when restarting hw. It is harmless to call
5157 * unconditionally. This is necessary to make sure no HTT/WMI tx
5158 * commands will be submitted while restarting.
5159 */
5160 ath10k_drain_tx(ar);
5161
5162 mutex_lock(&ar->conf_mutex);
5163
5164 switch (ar->state) {
5165 case ATH10K_STATE_OFF:
5166 ar->state = ATH10K_STATE_ON;
5167 break;
5168 case ATH10K_STATE_RESTARTING:
5169 ar->state = ATH10K_STATE_RESTARTED;
5170 break;
5171 case ATH10K_STATE_ON:
5172 case ATH10K_STATE_RESTARTED:
5173 case ATH10K_STATE_WEDGED:
5174 WARN_ON(1);
5175 ret = -EINVAL;
5176 goto err;
5177 case ATH10K_STATE_UTF:
5178 ret = -EBUSY;
5179 goto err;
5180 }
5181
5182 spin_lock_bh(lock: &ar->data_lock);
5183
5184 if (ar->hw_rfkill_on) {
5185 ar->hw_rfkill_on = false;
5186 spin_unlock_bh(lock: &ar->data_lock);
5187 goto err;
5188 }
5189
5190 spin_unlock_bh(lock: &ar->data_lock);
5191
5192 ret = ath10k_hif_power_up(ar, fw_mode: ATH10K_FIRMWARE_MODE_NORMAL);
5193 if (ret) {
5194 ath10k_err(ar, fmt: "Could not init hif: %d\n", ret);
5195 goto err_off;
5196 }
5197
5198 ret = ath10k_core_start(ar, mode: ATH10K_FIRMWARE_MODE_NORMAL,
5199 fw_components: &ar->normal_mode_fw);
5200 if (ret) {
5201 ath10k_err(ar, fmt: "Could not init core: %d\n", ret);
5202 goto err_power_down;
5203 }
5204
5205 if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
5206 ret = ath10k_mac_rfkill_config(ar);
5207 if (ret && ret != -EOPNOTSUPP) {
5208 ath10k_warn(ar, fmt: "failed to configure rfkill: %d", ret);
5209 goto err_core_stop;
5210 }
5211 }
5212
5213 param = ar->wmi.pdev_param->pmf_qos;
5214 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 1);
5215 if (ret) {
5216 ath10k_warn(ar, fmt: "failed to enable PMF QOS: %d\n", ret);
5217 goto err_core_stop;
5218 }
5219
5220 param = ar->wmi.pdev_param->dynamic_bw;
5221 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 1);
5222 if (ret) {
5223 ath10k_warn(ar, fmt: "failed to enable dynamic BW: %d\n", ret);
5224 goto err_core_stop;
5225 }
5226
5227 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
5228 ret = ath10k_wmi_scan_prob_req_oui(ar, mac_addr: ar->mac_addr);
5229 if (ret) {
5230 ath10k_err(ar, fmt: "failed to set prob req oui: %i\n", ret);
5231 goto err_core_stop;
5232 }
5233 }
5234
5235 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
5236 ret = ath10k_wmi_adaptive_qcs(ar, enable: true);
5237 if (ret) {
5238 ath10k_warn(ar, fmt: "failed to enable adaptive qcs: %d\n",
5239 ret);
5240 goto err_core_stop;
5241 }
5242 }
5243
5244 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
5245 param = ar->wmi.pdev_param->burst_enable;
5246 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 0);
5247 if (ret) {
5248 ath10k_warn(ar, fmt: "failed to disable burst: %d\n", ret);
5249 goto err_core_stop;
5250 }
5251 }
5252
5253 param = ar->wmi.pdev_param->idle_ps_config;
5254 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 1);
5255 if (ret && ret != -EOPNOTSUPP) {
5256 ath10k_warn(ar, fmt: "failed to enable idle_ps_config: %d\n", ret);
5257 goto err_core_stop;
5258 }
5259
5260 __ath10k_set_antenna(ar, tx_ant: ar->cfg_tx_chainmask, rx_ant: ar->cfg_rx_chainmask);
5261
5262 /*
5263 * By default FW set ARP frames ac to voice (6). In that case ARP
5264 * exchange is not working properly for UAPSD enabled AP. ARP requests
5265 * which arrives with access category 0 are processed by network stack
5266 * and send back with access category 0, but FW changes access category
5267 * to 6. Set ARP frames access category to best effort (0) solves
5268 * this problem.
5269 */
5270
5271 param = ar->wmi.pdev_param->arp_ac_override;
5272 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 0);
5273 if (ret) {
5274 ath10k_warn(ar, fmt: "failed to set arp ac override parameter: %d\n",
5275 ret);
5276 goto err_core_stop;
5277 }
5278
5279 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
5280 ar->running_fw->fw_file.fw_features)) {
5281 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, enable: 1,
5282 WMI_CCA_DETECT_LEVEL_AUTO,
5283 WMI_CCA_DETECT_MARGIN_AUTO);
5284 if (ret) {
5285 ath10k_warn(ar, fmt: "failed to enable adaptive cca: %d\n",
5286 ret);
5287 goto err_core_stop;
5288 }
5289 }
5290
5291 param = ar->wmi.pdev_param->ani_enable;
5292 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 1);
5293 if (ret) {
5294 ath10k_warn(ar, fmt: "failed to enable ani by default: %d\n",
5295 ret);
5296 goto err_core_stop;
5297 }
5298
5299 ar->ani_enabled = true;
5300
5301 if (ath10k_peer_stats_enabled(ar)) {
5302 param = ar->wmi.pdev_param->peer_stats_update_period;
5303 ret = ath10k_wmi_pdev_set_param(ar, id: param,
5304 PEER_DEFAULT_STATS_UPDATE_PERIOD);
5305 if (ret) {
5306 ath10k_warn(ar,
5307 fmt: "failed to set peer stats period : %d\n",
5308 ret);
5309 goto err_core_stop;
5310 }
5311 }
5312
5313 param = ar->wmi.pdev_param->enable_btcoex;
5314 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
5315 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
5316 ar->running_fw->fw_file.fw_features) &&
5317 ar->coex_support) {
5318 ret = ath10k_wmi_pdev_set_param(ar, id: param, value: 0);
5319 if (ret) {
5320 ath10k_warn(ar,
5321 fmt: "failed to set btcoex param: %d\n", ret);
5322 goto err_core_stop;
5323 }
5324 clear_bit(nr: ATH10K_FLAG_BTCOEX, addr: &ar->dev_flags);
5325 }
5326
5327 if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) {
5328 ret = __ath10k_fetch_bb_timing_dt(ar, bb_timing: &bb_timing);
5329 if (!ret) {
5330 ret = ath10k_wmi_pdev_bb_timing(ar, arg: &bb_timing);
5331 if (ret) {
5332 ath10k_warn(ar,
5333 fmt: "failed to set bb timings: %d\n",
5334 ret);
5335 goto err_core_stop;
5336 }
5337 }
5338 }
5339
5340 ar->num_started_vdevs = 0;
5341 ath10k_regd_update(ar);
5342
5343 ath10k_spectral_start(ar);
5344 ath10k_thermal_set_throttling(ar);
5345
5346 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
5347
5348 mutex_unlock(lock: &ar->conf_mutex);
5349 return 0;
5350
5351err_core_stop:
5352 ath10k_core_stop(ar);
5353
5354err_power_down:
5355 ath10k_hif_power_down(ar);
5356
5357err_off:
5358 ar->state = ATH10K_STATE_OFF;
5359
5360err:
5361 mutex_unlock(lock: &ar->conf_mutex);
5362 return ret;
5363}
5364
5365static void ath10k_stop(struct ieee80211_hw *hw)
5366{
5367 struct ath10k *ar = hw->priv;
5368 u32 opt;
5369
5370 ath10k_drain_tx(ar);
5371
5372 mutex_lock(&ar->conf_mutex);
5373 if (ar->state != ATH10K_STATE_OFF) {
5374 if (!ar->hw_rfkill_on) {
5375 /* If the current driver state is RESTARTING but not yet
5376 * fully RESTARTED because of incoming suspend event,
5377 * then ath10k_halt() is already called via
5378 * ath10k_core_restart() and should not be called here.
5379 */
5380 if (ar->state != ATH10K_STATE_RESTARTING) {
5381 ath10k_halt(ar);
5382 } else {
5383 /* Suspending here, because when in RESTARTING
5384 * state, ath10k_core_stop() skips
5385 * ath10k_wait_for_suspend().
5386 */
5387 opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
5388 ath10k_wait_for_suspend(ar, suspend_opt: opt);
5389 }
5390 }
5391 ar->state = ATH10K_STATE_OFF;
5392 }
5393 mutex_unlock(lock: &ar->conf_mutex);
5394
5395 cancel_work_sync(work: &ar->set_coverage_class_work);
5396 cancel_delayed_work_sync(dwork: &ar->scan.timeout);
5397 cancel_work_sync(work: &ar->restart_work);
5398}
5399
5400static int ath10k_config_ps(struct ath10k *ar)
5401{
5402 struct ath10k_vif *arvif;
5403 int ret = 0;
5404
5405 lockdep_assert_held(&ar->conf_mutex);
5406
5407 list_for_each_entry(arvif, &ar->arvifs, list) {
5408 ret = ath10k_mac_vif_setup_ps(arvif);
5409 if (ret) {
5410 ath10k_warn(ar, fmt: "failed to setup powersave: %d\n", ret);
5411 break;
5412 }
5413 }
5414
5415 return ret;
5416}
5417
5418static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
5419{
5420 struct ath10k *ar = hw->priv;
5421 struct ieee80211_conf *conf = &hw->conf;
5422 int ret = 0;
5423
5424 mutex_lock(&ar->conf_mutex);
5425
5426 if (changed & IEEE80211_CONF_CHANGE_PS)
5427 ath10k_config_ps(ar);
5428
5429 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
5430 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
5431 ret = ath10k_monitor_recalc(ar);
5432 if (ret)
5433 ath10k_warn(ar, fmt: "failed to recalc monitor: %d\n", ret);
5434 }
5435
5436 mutex_unlock(lock: &ar->conf_mutex);
5437 return ret;
5438}
5439
5440static u32 get_nss_from_chainmask(u16 chain_mask)
5441{
5442 if ((chain_mask & 0xf) == 0xf)
5443 return 4;
5444 else if ((chain_mask & 0x7) == 0x7)
5445 return 3;
5446 else if ((chain_mask & 0x3) == 0x3)
5447 return 2;
5448 return 1;
5449}
5450
5451static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
5452{
5453 u32 value = 0;
5454 struct ath10k *ar = arvif->ar;
5455 int nsts;
5456 int sound_dim;
5457
5458 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
5459 return 0;
5460
5461 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
5462 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
5463 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
5464 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
5465
5466 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
5467 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
5468 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
5469 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
5470
5471 if (!value)
5472 return 0;
5473
5474 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
5475 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
5476
5477 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
5478 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
5479 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
5480
5481 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
5482 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
5483
5484 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
5485 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
5486 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
5487
5488 return ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id,
5489 param_id: ar->wmi.vdev_param->txbf, param_value: value);
5490}
5491
5492static void ath10k_update_vif_offload(struct ieee80211_hw *hw,
5493 struct ieee80211_vif *vif)
5494{
5495 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5496 struct ath10k *ar = hw->priv;
5497 u32 vdev_param;
5498 int ret;
5499
5500 if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET ||
5501 ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED ||
5502 (vif->type != NL80211_IFTYPE_STATION &&
5503 vif->type != NL80211_IFTYPE_AP))
5504 vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
5505
5506 vdev_param = ar->wmi.vdev_param->tx_encap_type;
5507 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
5508 param_value: ATH10K_HW_TXRX_NATIVE_WIFI);
5509 /* 10.X firmware does not support this VDEV parameter. Do not warn */
5510 if (ret && ret != -EOPNOTSUPP) {
5511 ath10k_warn(ar, fmt: "failed to set vdev %i TX encapsulation: %d\n",
5512 arvif->vdev_id, ret);
5513 }
5514}
5515
5516/*
5517 * TODO:
5518 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
5519 * because we will send mgmt frames without CCK. This requirement
5520 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
5521 * in the TX packet.
5522 */
5523static int ath10k_add_interface(struct ieee80211_hw *hw,
5524 struct ieee80211_vif *vif)
5525{
5526 struct ath10k *ar = hw->priv;
5527 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5528 struct ath10k_peer *peer;
5529 enum wmi_sta_powersave_param param;
5530 int ret = 0;
5531 u32 value;
5532 int bit;
5533 int i;
5534 u32 vdev_param;
5535
5536 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
5537
5538 mutex_lock(&ar->conf_mutex);
5539
5540 memset(arvif, 0, sizeof(*arvif));
5541 ath10k_mac_txq_init(txq: vif->txq);
5542
5543 arvif->ar = ar;
5544 arvif->vif = vif;
5545
5546 INIT_LIST_HEAD(list: &arvif->list);
5547 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
5548 INIT_DELAYED_WORK(&arvif->connection_loss_work,
5549 ath10k_mac_vif_sta_connection_loss_work);
5550
5551 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
5552 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
5553 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
5554 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
5555 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
5556 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
5557 }
5558
5559 if (ar->num_peers >= ar->max_num_peers) {
5560 ath10k_warn(ar, fmt: "refusing vdev creation due to insufficient peer entry resources in firmware\n");
5561 ret = -ENOBUFS;
5562 goto err;
5563 }
5564
5565 if (ar->free_vdev_map == 0) {
5566 ath10k_warn(ar, fmt: "Free vdev map is empty, no more interfaces allowed.\n");
5567 ret = -EBUSY;
5568 goto err;
5569 }
5570 bit = __ffs64(word: ar->free_vdev_map);
5571
5572 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
5573 bit, ar->free_vdev_map);
5574
5575 arvif->vdev_id = bit;
5576 arvif->vdev_subtype =
5577 ath10k_wmi_get_vdev_subtype(ar, subtype: WMI_VDEV_SUBTYPE_NONE);
5578
5579 switch (vif->type) {
5580 case NL80211_IFTYPE_P2P_DEVICE:
5581 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5582 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5583 (ar, subtype: WMI_VDEV_SUBTYPE_P2P_DEVICE);
5584 break;
5585 case NL80211_IFTYPE_UNSPECIFIED:
5586 case NL80211_IFTYPE_STATION:
5587 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5588 if (vif->p2p)
5589 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5590 (ar, subtype: WMI_VDEV_SUBTYPE_P2P_CLIENT);
5591 break;
5592 case NL80211_IFTYPE_ADHOC:
5593 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
5594 break;
5595 case NL80211_IFTYPE_MESH_POINT:
5596 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
5597 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5598 (ar, subtype: WMI_VDEV_SUBTYPE_MESH_11S);
5599 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5600 ret = -EINVAL;
5601 ath10k_warn(ar, fmt: "must load driver with rawmode=1 to add mesh interfaces\n");
5602 goto err;
5603 }
5604 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5605 break;
5606 case NL80211_IFTYPE_AP:
5607 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5608
5609 if (vif->p2p)
5610 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5611 (ar, subtype: WMI_VDEV_SUBTYPE_P2P_GO);
5612 break;
5613 case NL80211_IFTYPE_MONITOR:
5614 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
5615 break;
5616 default:
5617 WARN_ON(1);
5618 break;
5619 }
5620
5621 /* Using vdev_id as queue number will make it very easy to do per-vif
5622 * tx queue locking. This shouldn't wrap due to interface combinations
5623 * but do a modulo for correctness sake and prevent using offchannel tx
5624 * queues for regular vif tx.
5625 */
5626 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5627 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
5628 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5629
5630 /* Some firmware revisions don't wait for beacon tx completion before
5631 * sending another SWBA event. This could lead to hardware using old
5632 * (freed) beacon data in some cases, e.g. tx credit starvation
5633 * combined with missed TBTT. This is very rare.
5634 *
5635 * On non-IOMMU-enabled hosts this could be a possible security issue
5636 * because hw could beacon some random data on the air. On
5637 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
5638 * device would crash.
5639 *
5640 * Since there are no beacon tx completions (implicit nor explicit)
5641 * propagated to host the only workaround for this is to allocate a
5642 * DMA-coherent buffer for a lifetime of a vif and use it for all
5643 * beacon tx commands. Worst case for this approach is some beacons may
5644 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
5645 */
5646 if (vif->type == NL80211_IFTYPE_ADHOC ||
5647 vif->type == NL80211_IFTYPE_MESH_POINT ||
5648 vif->type == NL80211_IFTYPE_AP) {
5649 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
5650 arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
5651 GFP_KERNEL);
5652
5653 /* Using a kernel pointer in place of a dma_addr_t
5654 * token can lead to undefined behavior if that
5655 * makes it into cache management functions. Use a
5656 * known-invalid address token instead, which
5657 * avoids the warning and makes it easier to catch
5658 * bugs if it does end up getting used.
5659 */
5660 arvif->beacon_paddr = DMA_MAPPING_ERROR;
5661 } else {
5662 arvif->beacon_buf =
5663 dma_alloc_coherent(dev: ar->dev,
5664 IEEE80211_MAX_FRAME_LEN,
5665 dma_handle: &arvif->beacon_paddr,
5666 GFP_ATOMIC);
5667 }
5668 if (!arvif->beacon_buf) {
5669 ret = -ENOMEM;
5670 ath10k_warn(ar, fmt: "failed to allocate beacon buffer: %d\n",
5671 ret);
5672 goto err;
5673 }
5674 }
5675 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
5676 arvif->nohwcrypt = true;
5677
5678 if (arvif->nohwcrypt &&
5679 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5680 ret = -EINVAL;
5681 ath10k_warn(ar, fmt: "cryptmode module param needed for sw crypto\n");
5682 goto err;
5683 }
5684
5685 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
5686 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
5687 arvif->beacon_buf ? "single-buf" : "per-skb");
5688
5689 ret = ath10k_wmi_vdev_create(ar, vdev_id: arvif->vdev_id, type: arvif->vdev_type,
5690 subtype: arvif->vdev_subtype, macaddr: vif->addr);
5691 if (ret) {
5692 ath10k_warn(ar, fmt: "failed to create WMI vdev %i: %d\n",
5693 arvif->vdev_id, ret);
5694 goto err;
5695 }
5696
5697 if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
5698 ar->wmi.svc_map)) {
5699 vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn;
5700 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
5701 WMI_VDEV_DISABLE_4_ADDR_SRC_LRN);
5702 if (ret && ret != -EOPNOTSUPP) {
5703 ath10k_warn(ar, fmt: "failed to disable 4addr src lrn vdev %i: %d\n",
5704 arvif->vdev_id, ret);
5705 }
5706 }
5707
5708 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
5709 spin_lock_bh(lock: &ar->data_lock);
5710 list_add(new: &arvif->list, head: &ar->arvifs);
5711 spin_unlock_bh(lock: &ar->data_lock);
5712
5713 /* It makes no sense to have firmware do keepalives. mac80211 already
5714 * takes care of this with idle connection polling.
5715 */
5716 ret = ath10k_mac_vif_disable_keepalive(arvif);
5717 if (ret) {
5718 ath10k_warn(ar, fmt: "failed to disable keepalive on vdev %i: %d\n",
5719 arvif->vdev_id, ret);
5720 goto err_vdev_delete;
5721 }
5722
5723 arvif->def_wep_key_idx = -1;
5724
5725 ath10k_update_vif_offload(hw, vif);
5726
5727 /* Configuring number of spatial stream for monitor interface is causing
5728 * target assert in qca9888 and qca6174.
5729 */
5730 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
5731 u16 nss = get_nss_from_chainmask(chain_mask: ar->cfg_tx_chainmask);
5732
5733 vdev_param = ar->wmi.vdev_param->nss;
5734 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
5735 param_value: nss);
5736 if (ret) {
5737 ath10k_warn(ar, fmt: "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
5738 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
5739 ret);
5740 goto err_vdev_delete;
5741 }
5742 }
5743
5744 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5745 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5746 ret = ath10k_peer_create(ar, vif, NULL, vdev_id: arvif->vdev_id,
5747 addr: vif->addr, peer_type: WMI_PEER_TYPE_DEFAULT);
5748 if (ret) {
5749 ath10k_warn(ar, fmt: "failed to create vdev %i peer for AP/IBSS: %d\n",
5750 arvif->vdev_id, ret);
5751 goto err_vdev_delete;
5752 }
5753
5754 spin_lock_bh(lock: &ar->data_lock);
5755
5756 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr: vif->addr);
5757 if (!peer) {
5758 ath10k_warn(ar, fmt: "failed to lookup peer %pM on vdev %i\n",
5759 vif->addr, arvif->vdev_id);
5760 spin_unlock_bh(lock: &ar->data_lock);
5761 ret = -ENOENT;
5762 goto err_peer_delete;
5763 }
5764
5765 arvif->peer_id = find_first_bit(addr: peer->peer_ids,
5766 ATH10K_MAX_NUM_PEER_IDS);
5767
5768 spin_unlock_bh(lock: &ar->data_lock);
5769 } else {
5770 arvif->peer_id = HTT_INVALID_PEERID;
5771 }
5772
5773 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5774 ret = ath10k_mac_set_kickout(arvif);
5775 if (ret) {
5776 ath10k_warn(ar, fmt: "failed to set vdev %i kickout parameters: %d\n",
5777 arvif->vdev_id, ret);
5778 goto err_peer_delete;
5779 }
5780 }
5781
5782 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5783 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5784 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5785 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id,
5786 param_id: param, value);
5787 if (ret) {
5788 ath10k_warn(ar, fmt: "failed to set vdev %i RX wake policy: %d\n",
5789 arvif->vdev_id, ret);
5790 goto err_peer_delete;
5791 }
5792
5793 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5794 if (ret) {
5795 ath10k_warn(ar, fmt: "failed to recalc ps wake threshold on vdev %i: %d\n",
5796 arvif->vdev_id, ret);
5797 goto err_peer_delete;
5798 }
5799
5800 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5801 if (ret) {
5802 ath10k_warn(ar, fmt: "failed to recalc ps poll count on vdev %i: %d\n",
5803 arvif->vdev_id, ret);
5804 goto err_peer_delete;
5805 }
5806 }
5807
5808 ret = ath10k_mac_set_txbf_conf(arvif);
5809 if (ret) {
5810 ath10k_warn(ar, fmt: "failed to set txbf for vdev %d: %d\n",
5811 arvif->vdev_id, ret);
5812 goto err_peer_delete;
5813 }
5814
5815 ret = ath10k_mac_set_rts(arvif, value: ar->hw->wiphy->rts_threshold);
5816 if (ret) {
5817 ath10k_warn(ar, fmt: "failed to set rts threshold for vdev %d: %d\n",
5818 arvif->vdev_id, ret);
5819 goto err_peer_delete;
5820 }
5821
5822 arvif->txpower = vif->bss_conf.txpower;
5823 ret = ath10k_mac_txpower_recalc(ar);
5824 if (ret) {
5825 ath10k_warn(ar, fmt: "failed to recalc tx power: %d\n", ret);
5826 goto err_peer_delete;
5827 }
5828
5829 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
5830 vdev_param = ar->wmi.vdev_param->rtt_responder_role;
5831 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
5832 param_value: arvif->ftm_responder);
5833
5834 /* It is harmless to not set FTM role. Do not warn */
5835 if (ret && ret != -EOPNOTSUPP)
5836 ath10k_warn(ar, fmt: "failed to set vdev %i FTM Responder: %d\n",
5837 arvif->vdev_id, ret);
5838 }
5839
5840 if (vif->type == NL80211_IFTYPE_MONITOR) {
5841 ar->monitor_arvif = arvif;
5842 ret = ath10k_monitor_recalc(ar);
5843 if (ret) {
5844 ath10k_warn(ar, fmt: "failed to recalc monitor: %d\n", ret);
5845 goto err_peer_delete;
5846 }
5847 }
5848
5849 spin_lock_bh(lock: &ar->htt.tx_lock);
5850 if (!ar->tx_paused)
5851 ieee80211_wake_queue(hw: ar->hw, queue: arvif->vdev_id);
5852 spin_unlock_bh(lock: &ar->htt.tx_lock);
5853
5854 mutex_unlock(lock: &ar->conf_mutex);
5855 return 0;
5856
5857err_peer_delete:
5858 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5859 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5860 ath10k_wmi_peer_delete(ar, vdev_id: arvif->vdev_id, peer_addr: vif->addr);
5861 ath10k_wait_for_peer_delete_done(ar, vdev_id: arvif->vdev_id,
5862 addr: vif->addr);
5863 }
5864
5865err_vdev_delete:
5866 ath10k_wmi_vdev_delete(ar, vdev_id: arvif->vdev_id);
5867 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5868 spin_lock_bh(lock: &ar->data_lock);
5869 list_del(entry: &arvif->list);
5870 spin_unlock_bh(lock: &ar->data_lock);
5871
5872err:
5873 if (arvif->beacon_buf) {
5874 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
5875 kfree(objp: arvif->beacon_buf);
5876 else
5877 dma_free_coherent(dev: ar->dev, IEEE80211_MAX_FRAME_LEN,
5878 cpu_addr: arvif->beacon_buf,
5879 dma_handle: arvif->beacon_paddr);
5880 arvif->beacon_buf = NULL;
5881 }
5882
5883 mutex_unlock(lock: &ar->conf_mutex);
5884
5885 return ret;
5886}
5887
5888static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5889{
5890 int i;
5891
5892 for (i = 0; i < BITS_PER_LONG; i++)
5893 ath10k_mac_vif_tx_unlock(arvif, reason: i);
5894}
5895
5896static void ath10k_remove_interface(struct ieee80211_hw *hw,
5897 struct ieee80211_vif *vif)
5898{
5899 struct ath10k *ar = hw->priv;
5900 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5901 struct ath10k_peer *peer;
5902 unsigned long time_left;
5903 int ret;
5904 int i;
5905
5906 cancel_work_sync(work: &arvif->ap_csa_work);
5907 cancel_delayed_work_sync(dwork: &arvif->connection_loss_work);
5908
5909 mutex_lock(&ar->conf_mutex);
5910
5911 ret = ath10k_spectral_vif_stop(arvif);
5912 if (ret)
5913 ath10k_warn(ar, fmt: "failed to stop spectral for vdev %i: %d\n",
5914 arvif->vdev_id, ret);
5915
5916 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5917 spin_lock_bh(lock: &ar->data_lock);
5918 list_del(entry: &arvif->list);
5919 spin_unlock_bh(lock: &ar->data_lock);
5920
5921 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5922 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5923 ret = ath10k_wmi_peer_delete(ar: arvif->ar, vdev_id: arvif->vdev_id,
5924 peer_addr: vif->addr);
5925 if (ret)
5926 ath10k_warn(ar, fmt: "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5927 arvif->vdev_id, ret);
5928
5929 ath10k_wait_for_peer_delete_done(ar, vdev_id: arvif->vdev_id,
5930 addr: vif->addr);
5931 kfree(objp: arvif->u.ap.noa_data);
5932 }
5933
5934 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5935 arvif->vdev_id);
5936
5937 ret = ath10k_wmi_vdev_delete(ar, vdev_id: arvif->vdev_id);
5938 if (ret)
5939 ath10k_warn(ar, fmt: "failed to delete WMI vdev %i: %d\n",
5940 arvif->vdev_id, ret);
5941
5942 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
5943 time_left = wait_for_completion_timeout(x: &ar->vdev_delete_done,
5944 ATH10K_VDEV_DELETE_TIMEOUT_HZ);
5945 if (time_left == 0) {
5946 ath10k_warn(ar, fmt: "Timeout in receiving vdev delete response\n");
5947 goto out;
5948 }
5949 }
5950
5951 /* Some firmware revisions don't notify host about self-peer removal
5952 * until after associated vdev is deleted.
5953 */
5954 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5955 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5956 ret = ath10k_wait_for_peer_deleted(ar, vdev_id: arvif->vdev_id,
5957 addr: vif->addr);
5958 if (ret)
5959 ath10k_warn(ar, fmt: "failed to remove AP self-peer on vdev %i: %d\n",
5960 arvif->vdev_id, ret);
5961
5962 spin_lock_bh(lock: &ar->data_lock);
5963 ar->num_peers--;
5964 spin_unlock_bh(lock: &ar->data_lock);
5965 }
5966
5967 spin_lock_bh(lock: &ar->data_lock);
5968 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5969 peer = ar->peer_map[i];
5970 if (!peer)
5971 continue;
5972
5973 if (peer->vif == vif) {
5974 ath10k_warn(ar, fmt: "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5975 vif->addr, arvif->vdev_id);
5976 peer->vif = NULL;
5977 }
5978 }
5979
5980 /* Clean this up late, less opportunity for firmware to access
5981 * DMA memory we have deleted.
5982 */
5983 ath10k_mac_vif_beacon_cleanup(arvif);
5984 spin_unlock_bh(lock: &ar->data_lock);
5985
5986 ath10k_peer_cleanup(ar, vdev_id: arvif->vdev_id);
5987 ath10k_mac_txq_unref(ar, txq: vif->txq);
5988
5989 if (vif->type == NL80211_IFTYPE_MONITOR) {
5990 ar->monitor_arvif = NULL;
5991 ret = ath10k_monitor_recalc(ar);
5992 if (ret)
5993 ath10k_warn(ar, fmt: "failed to recalc monitor: %d\n", ret);
5994 }
5995
5996 ret = ath10k_mac_txpower_recalc(ar);
5997 if (ret)
5998 ath10k_warn(ar, fmt: "failed to recalc tx power: %d\n", ret);
5999
6000 spin_lock_bh(lock: &ar->htt.tx_lock);
6001 ath10k_mac_vif_tx_unlock_all(arvif);
6002 spin_unlock_bh(lock: &ar->htt.tx_lock);
6003
6004 ath10k_mac_txq_unref(ar, txq: vif->txq);
6005
6006out:
6007 mutex_unlock(lock: &ar->conf_mutex);
6008}
6009
6010/*
6011 * FIXME: Has to be verified.
6012 */
6013#define SUPPORTED_FILTERS \
6014 (FIF_ALLMULTI | \
6015 FIF_CONTROL | \
6016 FIF_PSPOLL | \
6017 FIF_OTHER_BSS | \
6018 FIF_BCN_PRBRESP_PROMISC | \
6019 FIF_PROBE_REQ | \
6020 FIF_FCSFAIL)
6021
6022static void ath10k_configure_filter(struct ieee80211_hw *hw,
6023 unsigned int changed_flags,
6024 unsigned int *total_flags,
6025 u64 multicast)
6026{
6027 struct ath10k *ar = hw->priv;
6028 int ret;
6029 unsigned int supported = SUPPORTED_FILTERS;
6030
6031 mutex_lock(&ar->conf_mutex);
6032
6033 if (ar->hw_params.mcast_frame_registration)
6034 supported |= FIF_MCAST_ACTION;
6035
6036 *total_flags &= supported;
6037
6038 ar->filter_flags = *total_flags;
6039
6040 ret = ath10k_monitor_recalc(ar);
6041 if (ret)
6042 ath10k_warn(ar, fmt: "failed to recalc monitor: %d\n", ret);
6043
6044 mutex_unlock(lock: &ar->conf_mutex);
6045}
6046
6047static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
6048 struct ieee80211_vif *vif,
6049 struct cfg80211_chan_def *def)
6050{
6051 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6052 const struct ieee80211_supported_band *sband;
6053 u8 basic_rate_idx;
6054 int hw_rate_code;
6055 u32 vdev_param;
6056 u16 bitrate;
6057 int ret;
6058
6059 lockdep_assert_held(&ar->conf_mutex);
6060
6061 sband = ar->hw->wiphy->bands[def->chan->band];
6062 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
6063 bitrate = sband->bitrates[basic_rate_idx].bitrate;
6064
6065 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
6066 if (hw_rate_code < 0) {
6067 ath10k_warn(ar, fmt: "bitrate not supported %d\n", bitrate);
6068 return;
6069 }
6070
6071 vdev_param = ar->wmi.vdev_param->mgmt_rate;
6072 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6073 param_value: hw_rate_code);
6074 if (ret)
6075 ath10k_warn(ar, fmt: "failed to set mgmt tx rate %d\n", ret);
6076}
6077
6078static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
6079 struct ieee80211_vif *vif,
6080 struct ieee80211_bss_conf *info,
6081 u64 changed)
6082{
6083 struct ath10k *ar = hw->priv;
6084 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6085 struct cfg80211_chan_def def;
6086 u32 vdev_param, pdev_param, slottime, preamble;
6087 u16 bitrate, hw_value;
6088 u8 rate, rateidx;
6089 int ret = 0, mcast_rate;
6090 enum nl80211_band band;
6091
6092 mutex_lock(&ar->conf_mutex);
6093
6094 if (changed & BSS_CHANGED_IBSS)
6095 ath10k_control_ibss(arvif, vif);
6096
6097 if (changed & BSS_CHANGED_BEACON_INT) {
6098 arvif->beacon_interval = info->beacon_int;
6099 vdev_param = ar->wmi.vdev_param->beacon_interval;
6100 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6101 param_value: arvif->beacon_interval);
6102 ath10k_dbg(ar, ATH10K_DBG_MAC,
6103 "mac vdev %d beacon_interval %d\n",
6104 arvif->vdev_id, arvif->beacon_interval);
6105
6106 if (ret)
6107 ath10k_warn(ar, fmt: "failed to set beacon interval for vdev %d: %i\n",
6108 arvif->vdev_id, ret);
6109 }
6110
6111 if (changed & BSS_CHANGED_BEACON) {
6112 ath10k_dbg(ar, ATH10K_DBG_MAC,
6113 "vdev %d set beacon tx mode to staggered\n",
6114 arvif->vdev_id);
6115
6116 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
6117 ret = ath10k_wmi_pdev_set_param(ar, id: pdev_param,
6118 value: WMI_BEACON_STAGGERED_MODE);
6119 if (ret)
6120 ath10k_warn(ar, fmt: "failed to set beacon mode for vdev %d: %i\n",
6121 arvif->vdev_id, ret);
6122
6123 ret = ath10k_mac_setup_bcn_tmpl(arvif);
6124 if (ret)
6125 ath10k_warn(ar, fmt: "failed to update beacon template: %d\n",
6126 ret);
6127
6128 if (ieee80211_vif_is_mesh(vif)) {
6129 /* mesh doesn't use SSID but firmware needs it */
6130 arvif->u.ap.ssid_len = 4;
6131 memcpy(arvif->u.ap.ssid, "mesh", arvif->u.ap.ssid_len);
6132 }
6133 }
6134
6135 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
6136 ret = ath10k_mac_setup_prb_tmpl(arvif);
6137 if (ret)
6138 ath10k_warn(ar, fmt: "failed to setup probe resp template on vdev %i: %d\n",
6139 arvif->vdev_id, ret);
6140 }
6141
6142 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
6143 arvif->dtim_period = info->dtim_period;
6144
6145 ath10k_dbg(ar, ATH10K_DBG_MAC,
6146 "mac vdev %d dtim_period %d\n",
6147 arvif->vdev_id, arvif->dtim_period);
6148
6149 vdev_param = ar->wmi.vdev_param->dtim_period;
6150 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6151 param_value: arvif->dtim_period);
6152 if (ret)
6153 ath10k_warn(ar, fmt: "failed to set dtim period for vdev %d: %i\n",
6154 arvif->vdev_id, ret);
6155 }
6156
6157 if (changed & BSS_CHANGED_SSID &&
6158 vif->type == NL80211_IFTYPE_AP) {
6159 arvif->u.ap.ssid_len = vif->cfg.ssid_len;
6160 if (vif->cfg.ssid_len)
6161 memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
6162 vif->cfg.ssid_len);
6163 arvif->u.ap.hidden_ssid = info->hidden_ssid;
6164 }
6165
6166 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(addr: info->bssid))
6167 ether_addr_copy(dst: arvif->bssid, src: info->bssid);
6168
6169 if (changed & BSS_CHANGED_FTM_RESPONDER &&
6170 arvif->ftm_responder != info->ftm_responder &&
6171 test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
6172 arvif->ftm_responder = info->ftm_responder;
6173
6174 vdev_param = ar->wmi.vdev_param->rtt_responder_role;
6175 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6176 param_value: arvif->ftm_responder);
6177
6178 ath10k_dbg(ar, ATH10K_DBG_MAC,
6179 "mac vdev %d ftm_responder %d:ret %d\n",
6180 arvif->vdev_id, arvif->ftm_responder, ret);
6181 }
6182
6183 if (changed & BSS_CHANGED_BEACON_ENABLED)
6184 ath10k_control_beaconing(arvif, info);
6185
6186 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
6187 arvif->use_cts_prot = info->use_cts_prot;
6188
6189 ret = ath10k_recalc_rtscts_prot(arvif);
6190 if (ret)
6191 ath10k_warn(ar, fmt: "failed to recalculate rts/cts prot for vdev %d: %d\n",
6192 arvif->vdev_id, ret);
6193
6194 if (ath10k_mac_can_set_cts_prot(arvif)) {
6195 ret = ath10k_mac_set_cts_prot(arvif);
6196 if (ret)
6197 ath10k_warn(ar, fmt: "failed to set cts protection for vdev %d: %d\n",
6198 arvif->vdev_id, ret);
6199 }
6200 }
6201
6202 if (changed & BSS_CHANGED_ERP_SLOT) {
6203 if (info->use_short_slot)
6204 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
6205
6206 else
6207 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
6208
6209 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
6210 arvif->vdev_id, slottime);
6211
6212 vdev_param = ar->wmi.vdev_param->slot_time;
6213 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6214 param_value: slottime);
6215 if (ret)
6216 ath10k_warn(ar, fmt: "failed to set erp slot for vdev %d: %i\n",
6217 arvif->vdev_id, ret);
6218 }
6219
6220 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
6221 if (info->use_short_preamble)
6222 preamble = WMI_VDEV_PREAMBLE_SHORT;
6223 else
6224 preamble = WMI_VDEV_PREAMBLE_LONG;
6225
6226 ath10k_dbg(ar, ATH10K_DBG_MAC,
6227 "mac vdev %d preamble %dn",
6228 arvif->vdev_id, preamble);
6229
6230 vdev_param = ar->wmi.vdev_param->preamble;
6231 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6232 param_value: preamble);
6233 if (ret)
6234 ath10k_warn(ar, fmt: "failed to set preamble for vdev %d: %i\n",
6235 arvif->vdev_id, ret);
6236 }
6237
6238 if (changed & BSS_CHANGED_ASSOC) {
6239 if (vif->cfg.assoc) {
6240 /* Workaround: Make sure monitor vdev is not running
6241 * when associating to prevent some firmware revisions
6242 * (e.g. 10.1 and 10.2) from crashing.
6243 */
6244 if (ar->monitor_started)
6245 ath10k_monitor_stop(ar);
6246 ath10k_bss_assoc(hw, vif, bss_conf: info);
6247 ath10k_monitor_recalc(ar);
6248 } else {
6249 ath10k_bss_disassoc(hw, vif);
6250 }
6251 }
6252
6253 if (changed & BSS_CHANGED_TXPOWER) {
6254 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
6255 arvif->vdev_id, info->txpower);
6256
6257 arvif->txpower = info->txpower;
6258 ret = ath10k_mac_txpower_recalc(ar);
6259 if (ret)
6260 ath10k_warn(ar, fmt: "failed to recalc tx power: %d\n", ret);
6261 }
6262
6263 if (changed & BSS_CHANGED_PS) {
6264 arvif->ps = vif->cfg.ps;
6265
6266 ret = ath10k_config_ps(ar);
6267 if (ret)
6268 ath10k_warn(ar, fmt: "failed to setup ps on vdev %i: %d\n",
6269 arvif->vdev_id, ret);
6270 }
6271
6272 if (changed & BSS_CHANGED_MCAST_RATE &&
6273 !ath10k_mac_vif_chan(vif: arvif->vif, def: &def)) {
6274 band = def.chan->band;
6275 mcast_rate = vif->bss_conf.mcast_rate[band];
6276 if (mcast_rate > 0)
6277 rateidx = mcast_rate - 1;
6278 else
6279 rateidx = ffs(vif->bss_conf.basic_rates) - 1;
6280
6281 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
6282 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
6283
6284 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
6285 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
6286 if (ath10k_mac_bitrate_is_cck(bitrate))
6287 preamble = WMI_RATE_PREAMBLE_CCK;
6288 else
6289 preamble = WMI_RATE_PREAMBLE_OFDM;
6290
6291 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
6292
6293 ath10k_dbg(ar, ATH10K_DBG_MAC,
6294 "mac vdev %d mcast_rate %x\n",
6295 arvif->vdev_id, rate);
6296
6297 vdev_param = ar->wmi.vdev_param->mcast_data_rate;
6298 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id,
6299 param_id: vdev_param, param_value: rate);
6300 if (ret)
6301 ath10k_warn(ar,
6302 fmt: "failed to set mcast rate on vdev %i: %d\n",
6303 arvif->vdev_id, ret);
6304
6305 vdev_param = ar->wmi.vdev_param->bcast_data_rate;
6306 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id,
6307 param_id: vdev_param, param_value: rate);
6308 if (ret)
6309 ath10k_warn(ar,
6310 fmt: "failed to set bcast rate on vdev %i: %d\n",
6311 arvif->vdev_id, ret);
6312 }
6313
6314 if (changed & BSS_CHANGED_BASIC_RATES &&
6315 !ath10k_mac_vif_chan(vif: arvif->vif, def: &def))
6316 ath10k_recalculate_mgmt_rate(ar, vif, def: &def);
6317
6318 mutex_unlock(lock: &ar->conf_mutex);
6319}
6320
6321static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
6322{
6323 struct ath10k *ar = hw->priv;
6324
6325 /* This function should never be called if setting the coverage class
6326 * is not supported on this hardware.
6327 */
6328 if (!ar->hw_params.hw_ops->set_coverage_class) {
6329 WARN_ON_ONCE(1);
6330 return;
6331 }
6332 ar->hw_params.hw_ops->set_coverage_class(ar, value);
6333}
6334
6335struct ath10k_mac_tdls_iter_data {
6336 u32 num_tdls_stations;
6337 struct ieee80211_vif *curr_vif;
6338};
6339
6340static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
6341 struct ieee80211_sta *sta)
6342{
6343 struct ath10k_mac_tdls_iter_data *iter_data = data;
6344 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6345 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
6346
6347 if (sta->tdls && sta_vif == iter_data->curr_vif)
6348 iter_data->num_tdls_stations++;
6349}
6350
6351static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
6352 struct ieee80211_vif *vif)
6353{
6354 struct ath10k_mac_tdls_iter_data data = {};
6355
6356 data.curr_vif = vif;
6357
6358 ieee80211_iterate_stations_atomic(hw,
6359 iterator: ath10k_mac_tdls_vif_stations_count_iter,
6360 data: &data);
6361 return data.num_tdls_stations;
6362}
6363
6364static int ath10k_hw_scan(struct ieee80211_hw *hw,
6365 struct ieee80211_vif *vif,
6366 struct ieee80211_scan_request *hw_req)
6367{
6368 struct ath10k *ar = hw->priv;
6369 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6370 struct cfg80211_scan_request *req = &hw_req->req;
6371 struct wmi_start_scan_arg arg;
6372 int ret = 0;
6373 int i;
6374 u32 scan_timeout;
6375
6376 mutex_lock(&ar->conf_mutex);
6377
6378 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
6379 ret = -EBUSY;
6380 goto exit;
6381 }
6382
6383 spin_lock_bh(lock: &ar->data_lock);
6384 switch (ar->scan.state) {
6385 case ATH10K_SCAN_IDLE:
6386 reinit_completion(x: &ar->scan.started);
6387 reinit_completion(x: &ar->scan.completed);
6388 ar->scan.state = ATH10K_SCAN_STARTING;
6389 ar->scan.is_roc = false;
6390 ar->scan.vdev_id = arvif->vdev_id;
6391 ret = 0;
6392 break;
6393 case ATH10K_SCAN_STARTING:
6394 case ATH10K_SCAN_RUNNING:
6395 case ATH10K_SCAN_ABORTING:
6396 ret = -EBUSY;
6397 break;
6398 }
6399 spin_unlock_bh(lock: &ar->data_lock);
6400
6401 if (ret)
6402 goto exit;
6403
6404 memset(&arg, 0, sizeof(arg));
6405 ath10k_wmi_start_scan_init(ar, arg: &arg);
6406 arg.vdev_id = arvif->vdev_id;
6407 arg.scan_id = ATH10K_SCAN_ID;
6408
6409 if (req->ie_len) {
6410 arg.ie_len = req->ie_len;
6411 memcpy(arg.ie, req->ie, arg.ie_len);
6412 }
6413
6414 if (req->n_ssids) {
6415 arg.n_ssids = req->n_ssids;
6416 for (i = 0; i < arg.n_ssids; i++) {
6417 arg.ssids[i].len = req->ssids[i].ssid_len;
6418 arg.ssids[i].ssid = req->ssids[i].ssid;
6419 }
6420 } else {
6421 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6422 }
6423
6424 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
6425 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
6426 ether_addr_copy(dst: arg.mac_addr.addr, src: req->mac_addr);
6427 ether_addr_copy(dst: arg.mac_mask.addr, src: req->mac_addr_mask);
6428 }
6429
6430 if (req->n_channels) {
6431 arg.n_channels = req->n_channels;
6432 for (i = 0; i < arg.n_channels; i++)
6433 arg.channels[i] = req->channels[i]->center_freq;
6434 }
6435
6436 /* if duration is set, default dwell times will be overwritten */
6437 if (req->duration) {
6438 arg.dwell_time_active = req->duration;
6439 arg.dwell_time_passive = req->duration;
6440 arg.burst_duration_ms = req->duration;
6441
6442 scan_timeout = min_t(u32, arg.max_rest_time *
6443 (arg.n_channels - 1) + (req->duration +
6444 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
6445 arg.n_channels, arg.max_scan_time);
6446 } else {
6447 scan_timeout = arg.max_scan_time;
6448 }
6449
6450 /* Add a 200ms margin to account for event/command processing */
6451 scan_timeout += 200;
6452
6453 ret = ath10k_start_scan(ar, arg: &arg);
6454 if (ret) {
6455 ath10k_warn(ar, fmt: "failed to start hw scan: %d\n", ret);
6456 spin_lock_bh(lock: &ar->data_lock);
6457 ar->scan.state = ATH10K_SCAN_IDLE;
6458 spin_unlock_bh(lock: &ar->data_lock);
6459 }
6460
6461 ieee80211_queue_delayed_work(hw: ar->hw, dwork: &ar->scan.timeout,
6462 delay: msecs_to_jiffies(m: scan_timeout));
6463
6464exit:
6465 mutex_unlock(lock: &ar->conf_mutex);
6466 return ret;
6467}
6468
6469static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
6470 struct ieee80211_vif *vif)
6471{
6472 struct ath10k *ar = hw->priv;
6473
6474 mutex_lock(&ar->conf_mutex);
6475 ath10k_scan_abort(ar);
6476 mutex_unlock(lock: &ar->conf_mutex);
6477
6478 cancel_delayed_work_sync(dwork: &ar->scan.timeout);
6479}
6480
6481static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
6482 struct ath10k_vif *arvif,
6483 enum set_key_cmd cmd,
6484 struct ieee80211_key_conf *key)
6485{
6486 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
6487 int ret;
6488
6489 /* 10.1 firmware branch requires default key index to be set to group
6490 * key index after installing it. Otherwise FW/HW Txes corrupted
6491 * frames with multi-vif APs. This is not required for main firmware
6492 * branch (e.g. 636).
6493 *
6494 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
6495 *
6496 * FIXME: It remains unknown if this is required for multi-vif STA
6497 * interfaces on 10.1.
6498 */
6499
6500 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
6501 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
6502 return;
6503
6504 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
6505 return;
6506
6507 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
6508 return;
6509
6510 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
6511 return;
6512
6513 if (cmd != SET_KEY)
6514 return;
6515
6516 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param,
6517 param_value: key->keyidx);
6518 if (ret)
6519 ath10k_warn(ar, fmt: "failed to set vdev %i group key as default key: %d\n",
6520 arvif->vdev_id, ret);
6521}
6522
6523static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6524 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
6525 struct ieee80211_key_conf *key)
6526{
6527 struct ath10k *ar = hw->priv;
6528 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6529 struct ath10k_sta *arsta;
6530 struct ath10k_peer *peer;
6531 const u8 *peer_addr;
6532 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
6533 key->cipher == WLAN_CIPHER_SUITE_WEP104;
6534 int ret = 0;
6535 int ret2;
6536 u32 flags = 0;
6537 u32 flags2;
6538
6539 /* this one needs to be done in software */
6540 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
6541 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
6542 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
6543 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
6544 return 1;
6545
6546 if (arvif->nohwcrypt)
6547 return 1;
6548
6549 if (key->keyidx > WMI_MAX_KEY_INDEX)
6550 return -ENOSPC;
6551
6552 mutex_lock(&ar->conf_mutex);
6553
6554 if (sta) {
6555 arsta = (struct ath10k_sta *)sta->drv_priv;
6556 peer_addr = sta->addr;
6557 spin_lock_bh(lock: &ar->data_lock);
6558 arsta->ucast_cipher = key->cipher;
6559 spin_unlock_bh(lock: &ar->data_lock);
6560 } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
6561 peer_addr = vif->bss_conf.bssid;
6562 } else {
6563 peer_addr = vif->addr;
6564 }
6565
6566 key->hw_key_idx = key->keyidx;
6567
6568 if (is_wep) {
6569 if (cmd == SET_KEY)
6570 arvif->wep_keys[key->keyidx] = key;
6571 else
6572 arvif->wep_keys[key->keyidx] = NULL;
6573 }
6574
6575 /* the peer should not disappear in mid-way (unless FW goes awry) since
6576 * we already hold conf_mutex. we just make sure its there now.
6577 */
6578 spin_lock_bh(lock: &ar->data_lock);
6579 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr: peer_addr);
6580 spin_unlock_bh(lock: &ar->data_lock);
6581
6582 if (!peer) {
6583 if (cmd == SET_KEY) {
6584 ath10k_warn(ar, fmt: "failed to install key for non-existent peer %pM\n",
6585 peer_addr);
6586 ret = -EOPNOTSUPP;
6587 goto exit;
6588 } else {
6589 /* if the peer doesn't exist there is no key to disable anymore */
6590 goto exit;
6591 }
6592 }
6593
6594 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
6595 flags |= WMI_KEY_PAIRWISE;
6596 else
6597 flags |= WMI_KEY_GROUP;
6598
6599 if (is_wep) {
6600 if (cmd == DISABLE_KEY)
6601 ath10k_clear_vdev_key(arvif, key);
6602
6603 /* When WEP keys are uploaded it's possible that there are
6604 * stations associated already (e.g. when merging) without any
6605 * keys. Static WEP needs an explicit per-peer key upload.
6606 */
6607 if (vif->type == NL80211_IFTYPE_ADHOC &&
6608 cmd == SET_KEY)
6609 ath10k_mac_vif_update_wep_key(arvif, key);
6610
6611 /* 802.1x never sets the def_wep_key_idx so each set_key()
6612 * call changes default tx key.
6613 *
6614 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
6615 * after first set_key().
6616 */
6617 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
6618 flags |= WMI_KEY_TX_USAGE;
6619 }
6620
6621 ret = ath10k_install_key(arvif, key, cmd, macaddr: peer_addr, flags);
6622 if (ret) {
6623 WARN_ON(ret > 0);
6624 ath10k_warn(ar, fmt: "failed to install key for vdev %i peer %pM: %d\n",
6625 arvif->vdev_id, peer_addr, ret);
6626 goto exit;
6627 }
6628
6629 /* mac80211 sets static WEP keys as groupwise while firmware requires
6630 * them to be installed twice as both pairwise and groupwise.
6631 */
6632 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
6633 flags2 = flags;
6634 flags2 &= ~WMI_KEY_GROUP;
6635 flags2 |= WMI_KEY_PAIRWISE;
6636
6637 ret = ath10k_install_key(arvif, key, cmd, macaddr: peer_addr, flags: flags2);
6638 if (ret) {
6639 WARN_ON(ret > 0);
6640 ath10k_warn(ar, fmt: "failed to install (ucast) key for vdev %i peer %pM: %d\n",
6641 arvif->vdev_id, peer_addr, ret);
6642 ret2 = ath10k_install_key(arvif, key, cmd: DISABLE_KEY,
6643 macaddr: peer_addr, flags);
6644 if (ret2) {
6645 WARN_ON(ret2 > 0);
6646 ath10k_warn(ar, fmt: "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
6647 arvif->vdev_id, peer_addr, ret2);
6648 }
6649 goto exit;
6650 }
6651 }
6652
6653 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
6654
6655 spin_lock_bh(lock: &ar->data_lock);
6656 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr: peer_addr);
6657 if (peer && cmd == SET_KEY)
6658 peer->keys[key->keyidx] = key;
6659 else if (peer && cmd == DISABLE_KEY)
6660 peer->keys[key->keyidx] = NULL;
6661 else if (peer == NULL)
6662 /* impossible unless FW goes crazy */
6663 ath10k_warn(ar, fmt: "Peer %pM disappeared!\n", peer_addr);
6664 spin_unlock_bh(lock: &ar->data_lock);
6665
6666 if (sta && sta->tdls)
6667 ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6668 param_id: ar->wmi.peer_param->authorize, param_value: 1);
6669 else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
6670 ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr,
6671 param_id: ar->wmi.peer_param->authorize, param_value: 1);
6672
6673exit:
6674 mutex_unlock(lock: &ar->conf_mutex);
6675 return ret;
6676}
6677
6678static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
6679 struct ieee80211_vif *vif,
6680 int keyidx)
6681{
6682 struct ath10k *ar = hw->priv;
6683 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6684 int ret;
6685
6686 mutex_lock(&arvif->ar->conf_mutex);
6687
6688 if (arvif->ar->state != ATH10K_STATE_ON)
6689 goto unlock;
6690
6691 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
6692 arvif->vdev_id, keyidx);
6693
6694 ret = ath10k_wmi_vdev_set_param(ar: arvif->ar,
6695 vdev_id: arvif->vdev_id,
6696 param_id: arvif->ar->wmi.vdev_param->def_keyid,
6697 param_value: keyidx);
6698
6699 if (ret) {
6700 ath10k_warn(ar, fmt: "failed to update wep key index for vdev %d: %d\n",
6701 arvif->vdev_id,
6702 ret);
6703 goto unlock;
6704 }
6705
6706 arvif->def_wep_key_idx = keyidx;
6707
6708unlock:
6709 mutex_unlock(lock: &arvif->ar->conf_mutex);
6710}
6711
6712static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6713{
6714 struct ath10k *ar;
6715 struct ath10k_vif *arvif;
6716 struct ath10k_sta *arsta;
6717 struct ieee80211_sta *sta;
6718 struct cfg80211_chan_def def;
6719 enum nl80211_band band;
6720 const u8 *ht_mcs_mask;
6721 const u16 *vht_mcs_mask;
6722 u32 changed, bw, nss, smps;
6723 int err;
6724
6725 arsta = container_of(wk, struct ath10k_sta, update_wk);
6726 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
6727 arvif = arsta->arvif;
6728 ar = arvif->ar;
6729
6730 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
6731 return;
6732
6733 band = def.chan->band;
6734 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
6735 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
6736
6737 spin_lock_bh(lock: &ar->data_lock);
6738
6739 changed = arsta->changed;
6740 arsta->changed = 0;
6741
6742 bw = arsta->bw;
6743 nss = arsta->nss;
6744 smps = arsta->smps;
6745
6746 spin_unlock_bh(lock: &ar->data_lock);
6747
6748 mutex_lock(&ar->conf_mutex);
6749
6750 nss = max_t(u32, 1, nss);
6751 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6752 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6753
6754 if (changed & IEEE80211_RC_BW_CHANGED) {
6755 enum wmi_phy_mode mode;
6756
6757 mode = chan_to_phymode(chandef: &def);
6758 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n",
6759 sta->addr, bw, mode);
6760
6761 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6762 param_id: ar->wmi.peer_param->phymode, param_value: mode);
6763 if (err) {
6764 ath10k_warn(ar, fmt: "failed to update STA %pM peer phymode %d: %d\n",
6765 sta->addr, mode, err);
6766 goto exit;
6767 }
6768
6769 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6770 param_id: ar->wmi.peer_param->chan_width, param_value: bw);
6771 if (err)
6772 ath10k_warn(ar, fmt: "failed to update STA %pM peer bw %d: %d\n",
6773 sta->addr, bw, err);
6774 }
6775
6776 if (changed & IEEE80211_RC_NSS_CHANGED) {
6777 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n",
6778 sta->addr, nss);
6779
6780 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6781 param_id: ar->wmi.peer_param->nss, param_value: nss);
6782 if (err)
6783 ath10k_warn(ar, fmt: "failed to update STA %pM nss %d: %d\n",
6784 sta->addr, nss, err);
6785 }
6786
6787 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6788 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n",
6789 sta->addr, smps);
6790
6791 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6792 param_id: ar->wmi.peer_param->smps_state, param_value: smps);
6793 if (err)
6794 ath10k_warn(ar, fmt: "failed to update STA %pM smps %d: %d\n",
6795 sta->addr, smps, err);
6796 }
6797
6798 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
6799 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n",
6800 sta->addr);
6801
6802 err = ath10k_station_assoc(ar, vif: arvif->vif, sta, reassoc: true);
6803 if (err)
6804 ath10k_warn(ar, fmt: "failed to reassociate station: %pM\n",
6805 sta->addr);
6806 }
6807
6808exit:
6809 mutex_unlock(lock: &ar->conf_mutex);
6810}
6811
6812static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
6813 struct ieee80211_sta *sta)
6814{
6815 struct ath10k *ar = arvif->ar;
6816
6817 lockdep_assert_held(&ar->conf_mutex);
6818
6819 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6820 return 0;
6821
6822 if (ar->num_stations >= ar->max_num_stations)
6823 return -ENOBUFS;
6824
6825 ar->num_stations++;
6826
6827 return 0;
6828}
6829
6830static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
6831 struct ieee80211_sta *sta)
6832{
6833 struct ath10k *ar = arvif->ar;
6834
6835 lockdep_assert_held(&ar->conf_mutex);
6836
6837 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6838 return;
6839
6840 ar->num_stations--;
6841}
6842
6843static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
6844 struct ieee80211_vif *vif,
6845 struct ieee80211_sta *sta)
6846{
6847 struct ath10k *ar = hw->priv;
6848 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6849 int ret = 0;
6850 s16 txpwr;
6851
6852 if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
6853 txpwr = 0;
6854 } else {
6855 txpwr = sta->deflink.txpwr.power;
6856 if (!txpwr)
6857 return -EINVAL;
6858 }
6859
6860 if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL)
6861 return -EINVAL;
6862
6863 mutex_lock(&ar->conf_mutex);
6864
6865 ret = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
6866 param_id: ar->wmi.peer_param->use_fixed_power, param_value: txpwr);
6867 if (ret) {
6868 ath10k_warn(ar, fmt: "failed to set tx power for station ret: %d\n",
6869 ret);
6870 goto out;
6871 }
6872
6873out:
6874 mutex_unlock(lock: &ar->conf_mutex);
6875 return ret;
6876}
6877
6878struct ath10k_mac_iter_tid_conf_data {
6879 struct ieee80211_vif *curr_vif;
6880 struct ath10k *ar;
6881 bool reset_config;
6882};
6883
6884static bool
6885ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6886 enum nl80211_band band,
6887 const struct cfg80211_bitrate_mask *mask,
6888 int *vht_num_rates)
6889{
6890 int num_rates = 0;
6891 int i, tmp;
6892
6893 num_rates += hweight32(mask->control[band].legacy);
6894
6895 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6896 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6897
6898 *vht_num_rates = 0;
6899 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6900 tmp = hweight16(mask->control[band].vht_mcs[i]);
6901 num_rates += tmp;
6902 *vht_num_rates += tmp;
6903 }
6904
6905 return num_rates == 1;
6906}
6907
6908static int
6909ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6910 enum nl80211_band band,
6911 const struct cfg80211_bitrate_mask *mask,
6912 u8 *rate, u8 *nss, bool vht_only)
6913{
6914 int rate_idx;
6915 int i;
6916 u16 bitrate;
6917 u8 preamble;
6918 u8 hw_rate;
6919
6920 if (vht_only)
6921 goto next;
6922
6923 if (hweight32(mask->control[band].legacy) == 1) {
6924 rate_idx = ffs(mask->control[band].legacy) - 1;
6925
6926 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
6927 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
6928
6929 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
6930 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
6931
6932 if (ath10k_mac_bitrate_is_cck(bitrate))
6933 preamble = WMI_RATE_PREAMBLE_CCK;
6934 else
6935 preamble = WMI_RATE_PREAMBLE_OFDM;
6936
6937 *nss = 1;
6938 *rate = preamble << 6 |
6939 (*nss - 1) << 4 |
6940 hw_rate << 0;
6941
6942 return 0;
6943 }
6944
6945 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6946 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6947 *nss = i + 1;
6948 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6949 (*nss - 1) << 4 |
6950 (ffs(mask->control[band].ht_mcs[i]) - 1);
6951
6952 return 0;
6953 }
6954 }
6955
6956next:
6957 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6958 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6959 *nss = i + 1;
6960 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6961 (*nss - 1) << 4 |
6962 (ffs(mask->control[band].vht_mcs[i]) - 1);
6963
6964 return 0;
6965 }
6966 }
6967
6968 return -EINVAL;
6969}
6970
6971static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
6972 struct ieee80211_sta *sta,
6973 u32 rate_ctrl_flag, u8 nss)
6974{
6975 struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
6976 struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
6977
6978 if (nss > sta->deflink.rx_nss) {
6979 ath10k_warn(ar, fmt: "Invalid nss field, configured %u limit %u\n",
6980 nss, sta->deflink.rx_nss);
6981 return -EINVAL;
6982 }
6983
6984 if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
6985 if (!vht_cap->vht_supported) {
6986 ath10k_warn(ar, fmt: "Invalid VHT rate for sta %pM\n",
6987 sta->addr);
6988 return -EINVAL;
6989 }
6990 } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
6991 if (!ht_cap->ht_supported || vht_cap->vht_supported) {
6992 ath10k_warn(ar, fmt: "Invalid HT rate for sta %pM\n",
6993 sta->addr);
6994 return -EINVAL;
6995 }
6996 } else {
6997 if (ht_cap->ht_supported || vht_cap->vht_supported)
6998 return -EINVAL;
6999 }
7000
7001 return 0;
7002}
7003
7004static int
7005ath10k_mac_tid_bitrate_config(struct ath10k *ar,
7006 struct ieee80211_vif *vif,
7007 struct ieee80211_sta *sta,
7008 u32 *rate_ctrl_flag, u8 *rate_ctrl,
7009 enum nl80211_tx_rate_setting txrate_type,
7010 const struct cfg80211_bitrate_mask *mask)
7011{
7012 struct cfg80211_chan_def def;
7013 enum nl80211_band band;
7014 u8 nss, rate;
7015 int vht_num_rates, ret;
7016
7017 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
7018 return -EINVAL;
7019
7020 if (txrate_type == NL80211_TX_RATE_AUTOMATIC) {
7021 *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7022 *rate_ctrl_flag = 0;
7023 return 0;
7024 }
7025
7026 band = def.chan->band;
7027
7028 if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
7029 vht_num_rates: &vht_num_rates)) {
7030 return -EINVAL;
7031 }
7032
7033 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
7034 rate: &rate, nss: &nss, vht_only: false);
7035 if (ret) {
7036 ath10k_warn(ar, fmt: "failed to get single rate: %d\n",
7037 ret);
7038 return ret;
7039 }
7040
7041 *rate_ctrl_flag = rate;
7042
7043 if (sta && ath10k_mac_validate_rate_mask(ar, sta, rate_ctrl_flag: *rate_ctrl_flag, nss))
7044 return -EINVAL;
7045
7046 if (txrate_type == NL80211_TX_RATE_FIXED)
7047 *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE;
7048 else if (txrate_type == NL80211_TX_RATE_LIMITED &&
7049 (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
7050 ar->wmi.svc_map)))
7051 *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP;
7052 else
7053 return -EOPNOTSUPP;
7054
7055 return 0;
7056}
7057
7058static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta,
7059 struct ieee80211_vif *vif, u32 changed,
7060 struct wmi_per_peer_per_tid_cfg_arg *arg)
7061{
7062 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7063 struct ath10k_sta *arsta;
7064 int ret;
7065
7066 if (sta) {
7067 if (!sta->wme)
7068 return -EOPNOTSUPP;
7069
7070 arsta = (struct ath10k_sta *)sta->drv_priv;
7071
7072 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7073 if ((arsta->retry_long[arg->tid] > 0 ||
7074 arsta->rate_code[arg->tid] > 0 ||
7075 arsta->ampdu[arg->tid] ==
7076 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
7077 arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
7078 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
7079 arg->ack_policy = 0;
7080 arg->aggr_control = 0;
7081 arg->rate_ctrl = 0;
7082 arg->rcode_flags = 0;
7083 }
7084 }
7085
7086 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7087 if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
7088 arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7089 arg->aggr_control = 0;
7090 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
7091 }
7092 }
7093
7094 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7095 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7096 if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
7097 arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7098 arg->rate_ctrl = 0;
7099 arg->rcode_flags = 0;
7100 }
7101 }
7102
7103 ether_addr_copy(dst: arg->peer_macaddr.addr, src: sta->addr);
7104
7105 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg);
7106 if (ret)
7107 return ret;
7108
7109 /* Store the configured parameters in success case */
7110 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7111 arsta->noack[arg->tid] = arg->ack_policy;
7112 arg->ack_policy = 0;
7113 arg->aggr_control = 0;
7114 arg->rate_ctrl = 0;
7115 arg->rcode_flags = 0;
7116 }
7117
7118 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7119 arsta->retry_long[arg->tid] = arg->retry_count;
7120 arg->retry_count = 0;
7121 }
7122
7123 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7124 arsta->ampdu[arg->tid] = arg->aggr_control;
7125 arg->aggr_control = 0;
7126 }
7127
7128 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7129 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7130 arsta->rate_ctrl[arg->tid] = arg->rate_ctrl;
7131 arg->rate_ctrl = 0;
7132 arg->rcode_flags = 0;
7133 }
7134
7135 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7136 arsta->rtscts[arg->tid] = arg->rtscts_ctrl;
7137 arg->ext_tid_cfg_bitmap = 0;
7138 }
7139 } else {
7140 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7141 if ((arvif->retry_long[arg->tid] ||
7142 arvif->rate_code[arg->tid] ||
7143 arvif->ampdu[arg->tid] ==
7144 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
7145 arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
7146 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
7147 } else {
7148 arvif->noack[arg->tid] = arg->ack_policy;
7149 arvif->ampdu[arg->tid] = arg->aggr_control;
7150 arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
7151 }
7152 }
7153
7154 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7155 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
7156 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
7157 else
7158 arvif->retry_long[arg->tid] = arg->retry_count;
7159 }
7160
7161 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7162 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
7163 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
7164 else
7165 arvif->ampdu[arg->tid] = arg->aggr_control;
7166 }
7167
7168 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7169 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7170 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7171 changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7172 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE));
7173 } else {
7174 arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
7175 arvif->rate_code[arg->tid] = arg->rcode_flags;
7176 }
7177 }
7178
7179 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7180 arvif->rtscts[arg->tid] = arg->rtscts_ctrl;
7181 arg->ext_tid_cfg_bitmap = 0;
7182 }
7183
7184 if (changed)
7185 arvif->tid_conf_changed[arg->tid] |= changed;
7186 }
7187
7188 return 0;
7189}
7190
7191static int
7192ath10k_mac_parse_tid_config(struct ath10k *ar,
7193 struct ieee80211_sta *sta,
7194 struct ieee80211_vif *vif,
7195 struct cfg80211_tid_cfg *tid_conf,
7196 struct wmi_per_peer_per_tid_cfg_arg *arg)
7197{
7198 u32 changed = tid_conf->mask;
7199 int ret = 0, i = 0;
7200
7201 if (!changed)
7202 return -EINVAL;
7203
7204 while (i < ATH10K_TID_MAX) {
7205 if (!(tid_conf->tids & BIT(i))) {
7206 i++;
7207 continue;
7208 }
7209
7210 arg->tid = i;
7211
7212 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7213 if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) {
7214 arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK;
7215 arg->rate_ctrl =
7216 WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
7217 arg->aggr_control =
7218 WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
7219 } else {
7220 arg->ack_policy =
7221 WMI_PEER_TID_CONFIG_ACK;
7222 arg->rate_ctrl =
7223 WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7224 arg->aggr_control =
7225 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7226 }
7227 }
7228
7229 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG))
7230 arg->retry_count = tid_conf->retry_long;
7231
7232 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7233 if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE)
7234 arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7235 else
7236 arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
7237 }
7238
7239 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7240 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7241 ret = ath10k_mac_tid_bitrate_config(ar, vif, sta,
7242 rate_ctrl_flag: &arg->rcode_flags,
7243 rate_ctrl: &arg->rate_ctrl,
7244 txrate_type: tid_conf->txrate_type,
7245 mask: &tid_conf->txrate_mask);
7246 if (ret) {
7247 ath10k_warn(ar, fmt: "failed to configure bitrate mask %d\n",
7248 ret);
7249 arg->rcode_flags = 0;
7250 arg->rate_ctrl = 0;
7251 }
7252 }
7253
7254 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7255 if (tid_conf->rtscts)
7256 arg->rtscts_ctrl = tid_conf->rtscts;
7257
7258 arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
7259 }
7260
7261 ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg);
7262 if (ret)
7263 return ret;
7264 i++;
7265 }
7266
7267 return ret;
7268}
7269
7270static int ath10k_mac_reset_tid_config(struct ath10k *ar,
7271 struct ieee80211_sta *sta,
7272 struct ath10k_vif *arvif,
7273 u8 tids)
7274{
7275 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7276 struct wmi_per_peer_per_tid_cfg_arg arg;
7277 int ret = 0, i = 0;
7278
7279 arg.vdev_id = arvif->vdev_id;
7280 while (i < ATH10K_TID_MAX) {
7281 if (!(tids & BIT(i))) {
7282 i++;
7283 continue;
7284 }
7285
7286 arg.tid = i;
7287 arg.ack_policy = WMI_PEER_TID_CONFIG_ACK;
7288 arg.retry_count = ATH10K_MAX_RETRY_COUNT;
7289 arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7290 arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7291 arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE;
7292 arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
7293
7294 ether_addr_copy(dst: arg.peer_macaddr.addr, src: sta->addr);
7295
7296 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg: &arg);
7297 if (ret)
7298 return ret;
7299
7300 if (!arvif->tids_rst) {
7301 arsta->retry_long[i] = -1;
7302 arsta->noack[i] = -1;
7303 arsta->ampdu[i] = -1;
7304 arsta->rate_code[i] = -1;
7305 arsta->rate_ctrl[i] = 0;
7306 arsta->rtscts[i] = -1;
7307 } else {
7308 arvif->retry_long[i] = 0;
7309 arvif->noack[i] = 0;
7310 arvif->ampdu[i] = 0;
7311 arvif->rate_code[i] = 0;
7312 arvif->rate_ctrl[i] = 0;
7313 arvif->rtscts[i] = 0;
7314 }
7315
7316 i++;
7317 }
7318
7319 return ret;
7320}
7321
7322static void ath10k_sta_tid_cfg_wk(struct work_struct *wk)
7323{
7324 struct wmi_per_peer_per_tid_cfg_arg arg = {};
7325 struct ieee80211_sta *sta;
7326 struct ath10k_sta *arsta;
7327 struct ath10k_vif *arvif;
7328 struct ath10k *ar;
7329 bool config_apply;
7330 int ret, i;
7331 u32 changed;
7332 u8 nss;
7333
7334 arsta = container_of(wk, struct ath10k_sta, tid_config_wk);
7335 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
7336 arvif = arsta->arvif;
7337 ar = arvif->ar;
7338
7339 mutex_lock(&ar->conf_mutex);
7340
7341 if (arvif->tids_rst) {
7342 ret = ath10k_mac_reset_tid_config(ar, sta, arvif,
7343 tids: arvif->tids_rst);
7344 goto exit;
7345 }
7346
7347 ether_addr_copy(dst: arg.peer_macaddr.addr, src: sta->addr);
7348
7349 for (i = 0; i < ATH10K_TID_MAX; i++) {
7350 config_apply = false;
7351 changed = arvif->tid_conf_changed[i];
7352
7353 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7354 if (arsta->noack[i] != -1) {
7355 arg.ack_policy = 0;
7356 } else {
7357 config_apply = true;
7358 arg.ack_policy = arvif->noack[i];
7359 arg.aggr_control = arvif->ampdu[i];
7360 arg.rate_ctrl = arvif->rate_ctrl[i];
7361 }
7362 }
7363
7364 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7365 if (arsta->retry_long[i] != -1 ||
7366 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7367 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7368 arg.retry_count = 0;
7369 } else {
7370 arg.retry_count = arvif->retry_long[i];
7371 config_apply = true;
7372 }
7373 }
7374
7375 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7376 if (arsta->ampdu[i] != -1 ||
7377 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7378 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7379 arg.aggr_control = 0;
7380 } else {
7381 arg.aggr_control = arvif->ampdu[i];
7382 config_apply = true;
7383 }
7384 }
7385
7386 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7387 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7388 nss = ATH10K_HW_NSS(arvif->rate_code[i]);
7389 ret = ath10k_mac_validate_rate_mask(ar, sta,
7390 rate_ctrl_flag: arvif->rate_code[i],
7391 nss);
7392 if (ret &&
7393 arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) {
7394 arg.rate_ctrl = 0;
7395 arg.rcode_flags = 0;
7396 }
7397
7398 if (arsta->rate_ctrl[i] >
7399 WMI_TID_CONFIG_RATE_CONTROL_AUTO ||
7400 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7401 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7402 arg.rate_ctrl = 0;
7403 arg.rcode_flags = 0;
7404 } else {
7405 arg.rate_ctrl = arvif->rate_ctrl[i];
7406 arg.rcode_flags = arvif->rate_code[i];
7407 config_apply = true;
7408 }
7409 }
7410
7411 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7412 if (arsta->rtscts[i]) {
7413 arg.rtscts_ctrl = 0;
7414 arg.ext_tid_cfg_bitmap = 0;
7415 } else {
7416 arg.rtscts_ctrl = arvif->rtscts[i] - 1;
7417 arg.ext_tid_cfg_bitmap =
7418 WMI_EXT_TID_RTS_CTS_CONFIG;
7419 config_apply = true;
7420 }
7421 }
7422
7423 arg.tid = i;
7424
7425 if (config_apply) {
7426 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg: &arg);
7427 if (ret)
7428 ath10k_warn(ar, fmt: "failed to set per tid config for sta %pM: %d\n",
7429 sta->addr, ret);
7430 }
7431
7432 arg.ack_policy = 0;
7433 arg.retry_count = 0;
7434 arg.aggr_control = 0;
7435 arg.rate_ctrl = 0;
7436 arg.rcode_flags = 0;
7437 }
7438
7439exit:
7440 mutex_unlock(lock: &ar->conf_mutex);
7441}
7442
7443static void ath10k_mac_vif_stations_tid_conf(void *data,
7444 struct ieee80211_sta *sta)
7445{
7446 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7447 struct ath10k_mac_iter_tid_conf_data *iter_data = data;
7448 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
7449
7450 if (sta_vif != iter_data->curr_vif || !sta->wme)
7451 return;
7452
7453 ieee80211_queue_work(hw: iter_data->ar->hw, work: &arsta->tid_config_wk);
7454}
7455
7456static int ath10k_sta_state(struct ieee80211_hw *hw,
7457 struct ieee80211_vif *vif,
7458 struct ieee80211_sta *sta,
7459 enum ieee80211_sta_state old_state,
7460 enum ieee80211_sta_state new_state)
7461{
7462 struct ath10k *ar = hw->priv;
7463 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7464 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7465 struct ath10k_peer *peer;
7466 int ret = 0;
7467 int i;
7468
7469 if (old_state == IEEE80211_STA_NOTEXIST &&
7470 new_state == IEEE80211_STA_NONE) {
7471 memset(arsta, 0, sizeof(*arsta));
7472 arsta->arvif = arvif;
7473 arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
7474 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
7475 INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
7476
7477 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
7478 ath10k_mac_txq_init(txq: sta->txq[i]);
7479 }
7480
7481 /* cancel must be done outside the mutex to avoid deadlock */
7482 if ((old_state == IEEE80211_STA_NONE &&
7483 new_state == IEEE80211_STA_NOTEXIST)) {
7484 cancel_work_sync(work: &arsta->update_wk);
7485 cancel_work_sync(work: &arsta->tid_config_wk);
7486 }
7487
7488 mutex_lock(&ar->conf_mutex);
7489
7490 if (old_state == IEEE80211_STA_NOTEXIST &&
7491 new_state == IEEE80211_STA_NONE) {
7492 /*
7493 * New station addition.
7494 */
7495 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
7496 u32 num_tdls_stations;
7497
7498 ath10k_dbg(ar, ATH10K_DBG_STA,
7499 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
7500 arvif->vdev_id, sta->addr,
7501 ar->num_stations + 1, ar->max_num_stations,
7502 ar->num_peers + 1, ar->max_num_peers);
7503
7504 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
7505
7506 if (sta->tdls) {
7507 if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
7508 ath10k_warn(ar, fmt: "vdev %i exceeded maximum number of tdls vdevs %i\n",
7509 arvif->vdev_id,
7510 ar->max_num_tdls_vdevs);
7511 ret = -ELNRNG;
7512 goto exit;
7513 }
7514 peer_type = WMI_PEER_TYPE_TDLS;
7515 }
7516
7517 ret = ath10k_mac_inc_num_stations(arvif, sta);
7518 if (ret) {
7519 ath10k_warn(ar, fmt: "refusing to associate station: too many connected already (%d)\n",
7520 ar->max_num_stations);
7521 goto exit;
7522 }
7523
7524 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
7525 arsta->tx_stats = kzalloc(size: sizeof(*arsta->tx_stats),
7526 GFP_KERNEL);
7527 if (!arsta->tx_stats) {
7528 ath10k_mac_dec_num_stations(arvif, sta);
7529 ret = -ENOMEM;
7530 goto exit;
7531 }
7532 }
7533
7534 ret = ath10k_peer_create(ar, vif, sta, vdev_id: arvif->vdev_id,
7535 addr: sta->addr, peer_type);
7536 if (ret) {
7537 ath10k_warn(ar, fmt: "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
7538 sta->addr, arvif->vdev_id, ret);
7539 ath10k_mac_dec_num_stations(arvif, sta);
7540 kfree(objp: arsta->tx_stats);
7541 goto exit;
7542 }
7543
7544 spin_lock_bh(lock: &ar->data_lock);
7545
7546 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr: sta->addr);
7547 if (!peer) {
7548 ath10k_warn(ar, fmt: "failed to lookup peer %pM on vdev %i\n",
7549 vif->addr, arvif->vdev_id);
7550 spin_unlock_bh(lock: &ar->data_lock);
7551 ath10k_peer_delete(ar, vdev_id: arvif->vdev_id, addr: sta->addr);
7552 ath10k_mac_dec_num_stations(arvif, sta);
7553 kfree(objp: arsta->tx_stats);
7554 ret = -ENOENT;
7555 goto exit;
7556 }
7557
7558 arsta->peer_id = find_first_bit(addr: peer->peer_ids,
7559 ATH10K_MAX_NUM_PEER_IDS);
7560
7561 spin_unlock_bh(lock: &ar->data_lock);
7562
7563 if (!sta->tdls)
7564 goto exit;
7565
7566 ret = ath10k_wmi_update_fw_tdls_state(ar, vdev_id: arvif->vdev_id,
7567 state: WMI_TDLS_ENABLE_ACTIVE);
7568 if (ret) {
7569 ath10k_warn(ar, fmt: "failed to update fw tdls state on vdev %i: %i\n",
7570 arvif->vdev_id, ret);
7571 ath10k_peer_delete(ar, vdev_id: arvif->vdev_id,
7572 addr: sta->addr);
7573 ath10k_mac_dec_num_stations(arvif, sta);
7574 kfree(objp: arsta->tx_stats);
7575 goto exit;
7576 }
7577
7578 ret = ath10k_mac_tdls_peer_update(ar, vdev_id: arvif->vdev_id, sta,
7579 state: WMI_TDLS_PEER_STATE_PEERING);
7580 if (ret) {
7581 ath10k_warn(ar,
7582 fmt: "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
7583 sta->addr, arvif->vdev_id, ret);
7584 ath10k_peer_delete(ar, vdev_id: arvif->vdev_id, addr: sta->addr);
7585 ath10k_mac_dec_num_stations(arvif, sta);
7586 kfree(objp: arsta->tx_stats);
7587
7588 if (num_tdls_stations != 0)
7589 goto exit;
7590 ath10k_wmi_update_fw_tdls_state(ar, vdev_id: arvif->vdev_id,
7591 state: WMI_TDLS_DISABLE);
7592 }
7593 } else if ((old_state == IEEE80211_STA_NONE &&
7594 new_state == IEEE80211_STA_NOTEXIST)) {
7595 /*
7596 * Existing station deletion.
7597 */
7598 ath10k_dbg(ar, ATH10K_DBG_STA,
7599 "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
7600 arvif->vdev_id, sta->addr, sta);
7601
7602 if (sta->tdls) {
7603 ret = ath10k_mac_tdls_peer_update(ar, vdev_id: arvif->vdev_id,
7604 sta,
7605 state: WMI_TDLS_PEER_STATE_TEARDOWN);
7606 if (ret)
7607 ath10k_warn(ar, fmt: "failed to update tdls peer state for %pM state %d: %i\n",
7608 sta->addr,
7609 WMI_TDLS_PEER_STATE_TEARDOWN, ret);
7610 }
7611
7612 ret = ath10k_peer_delete(ar, vdev_id: arvif->vdev_id, addr: sta->addr);
7613 if (ret)
7614 ath10k_warn(ar, fmt: "failed to delete peer %pM for vdev %d: %i\n",
7615 sta->addr, arvif->vdev_id, ret);
7616
7617 ath10k_mac_dec_num_stations(arvif, sta);
7618
7619 spin_lock_bh(lock: &ar->data_lock);
7620 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
7621 peer = ar->peer_map[i];
7622 if (!peer)
7623 continue;
7624
7625 if (peer->sta == sta) {
7626 ath10k_warn(ar, fmt: "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
7627 sta->addr, peer, i, arvif->vdev_id);
7628 peer->sta = NULL;
7629
7630 /* Clean up the peer object as well since we
7631 * must have failed to do this above.
7632 */
7633 ath10k_peer_map_cleanup(ar, peer);
7634 }
7635 }
7636 spin_unlock_bh(lock: &ar->data_lock);
7637
7638 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
7639 kfree(objp: arsta->tx_stats);
7640 arsta->tx_stats = NULL;
7641 }
7642
7643 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
7644 ath10k_mac_txq_unref(ar, txq: sta->txq[i]);
7645
7646 if (!sta->tdls)
7647 goto exit;
7648
7649 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
7650 goto exit;
7651
7652 /* This was the last tdls peer in current vif */
7653 ret = ath10k_wmi_update_fw_tdls_state(ar, vdev_id: arvif->vdev_id,
7654 state: WMI_TDLS_DISABLE);
7655 if (ret) {
7656 ath10k_warn(ar, fmt: "failed to update fw tdls state on vdev %i: %i\n",
7657 arvif->vdev_id, ret);
7658 }
7659 } else if (old_state == IEEE80211_STA_AUTH &&
7660 new_state == IEEE80211_STA_ASSOC &&
7661 (vif->type == NL80211_IFTYPE_AP ||
7662 vif->type == NL80211_IFTYPE_MESH_POINT ||
7663 vif->type == NL80211_IFTYPE_ADHOC)) {
7664 /*
7665 * New association.
7666 */
7667 ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n",
7668 sta->addr);
7669
7670 ret = ath10k_station_assoc(ar, vif, sta, reassoc: false);
7671 if (ret)
7672 ath10k_warn(ar, fmt: "failed to associate station %pM for vdev %i: %i\n",
7673 sta->addr, arvif->vdev_id, ret);
7674 } else if (old_state == IEEE80211_STA_ASSOC &&
7675 new_state == IEEE80211_STA_AUTHORIZED &&
7676 sta->tdls) {
7677 /*
7678 * Tdls station authorized.
7679 */
7680 ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n",
7681 sta->addr);
7682
7683 ret = ath10k_station_assoc(ar, vif, sta, reassoc: false);
7684 if (ret) {
7685 ath10k_warn(ar, fmt: "failed to associate tdls station %pM for vdev %i: %i\n",
7686 sta->addr, arvif->vdev_id, ret);
7687 goto exit;
7688 }
7689
7690 ret = ath10k_mac_tdls_peer_update(ar, vdev_id: arvif->vdev_id, sta,
7691 state: WMI_TDLS_PEER_STATE_CONNECTED);
7692 if (ret)
7693 ath10k_warn(ar, fmt: "failed to update tdls peer %pM for vdev %i: %i\n",
7694 sta->addr, arvif->vdev_id, ret);
7695 } else if (old_state == IEEE80211_STA_ASSOC &&
7696 new_state == IEEE80211_STA_AUTH &&
7697 (vif->type == NL80211_IFTYPE_AP ||
7698 vif->type == NL80211_IFTYPE_MESH_POINT ||
7699 vif->type == NL80211_IFTYPE_ADHOC)) {
7700 /*
7701 * Disassociation.
7702 */
7703 ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n",
7704 sta->addr);
7705
7706 ret = ath10k_station_disassoc(ar, vif, sta);
7707 if (ret)
7708 ath10k_warn(ar, fmt: "failed to disassociate station: %pM vdev %i: %i\n",
7709 sta->addr, arvif->vdev_id, ret);
7710 }
7711exit:
7712 mutex_unlock(lock: &ar->conf_mutex);
7713 return ret;
7714}
7715
7716static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
7717 u16 ac, bool enable)
7718{
7719 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7720 struct wmi_sta_uapsd_auto_trig_arg arg = {};
7721 u32 prio = 0, acc = 0;
7722 u32 value = 0;
7723 int ret = 0;
7724
7725 lockdep_assert_held(&ar->conf_mutex);
7726
7727 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
7728 return 0;
7729
7730 switch (ac) {
7731 case IEEE80211_AC_VO:
7732 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
7733 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
7734 prio = 7;
7735 acc = 3;
7736 break;
7737 case IEEE80211_AC_VI:
7738 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
7739 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
7740 prio = 5;
7741 acc = 2;
7742 break;
7743 case IEEE80211_AC_BE:
7744 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
7745 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
7746 prio = 2;
7747 acc = 1;
7748 break;
7749 case IEEE80211_AC_BK:
7750 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
7751 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
7752 prio = 0;
7753 acc = 0;
7754 break;
7755 }
7756
7757 if (enable)
7758 arvif->u.sta.uapsd |= value;
7759 else
7760 arvif->u.sta.uapsd &= ~value;
7761
7762 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id,
7763 param_id: WMI_STA_PS_PARAM_UAPSD,
7764 value: arvif->u.sta.uapsd);
7765 if (ret) {
7766 ath10k_warn(ar, fmt: "failed to set uapsd params: %d\n", ret);
7767 goto exit;
7768 }
7769
7770 if (arvif->u.sta.uapsd)
7771 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
7772 else
7773 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
7774
7775 ret = ath10k_wmi_set_sta_ps_param(ar, vdev_id: arvif->vdev_id,
7776 param_id: WMI_STA_PS_PARAM_RX_WAKE_POLICY,
7777 value);
7778 if (ret)
7779 ath10k_warn(ar, fmt: "failed to set rx wake param: %d\n", ret);
7780
7781 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
7782 if (ret) {
7783 ath10k_warn(ar, fmt: "failed to recalc ps wake threshold on vdev %i: %d\n",
7784 arvif->vdev_id, ret);
7785 return ret;
7786 }
7787
7788 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
7789 if (ret) {
7790 ath10k_warn(ar, fmt: "failed to recalc ps poll count on vdev %i: %d\n",
7791 arvif->vdev_id, ret);
7792 return ret;
7793 }
7794
7795 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
7796 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
7797 /* Only userspace can make an educated decision when to send
7798 * trigger frame. The following effectively disables u-UAPSD
7799 * autotrigger in firmware (which is enabled by default
7800 * provided the autotrigger service is available).
7801 */
7802
7803 arg.wmm_ac = acc;
7804 arg.user_priority = prio;
7805 arg.service_interval = 0;
7806 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
7807 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
7808
7809 ret = ath10k_wmi_vdev_sta_uapsd(ar, vdev_id: arvif->vdev_id,
7810 peer_addr: arvif->bssid, args: &arg, num_ac: 1);
7811 if (ret) {
7812 ath10k_warn(ar, fmt: "failed to set uapsd auto trigger %d\n",
7813 ret);
7814 return ret;
7815 }
7816 }
7817
7818exit:
7819 return ret;
7820}
7821
7822static int ath10k_conf_tx(struct ieee80211_hw *hw,
7823 struct ieee80211_vif *vif,
7824 unsigned int link_id, u16 ac,
7825 const struct ieee80211_tx_queue_params *params)
7826{
7827 struct ath10k *ar = hw->priv;
7828 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7829 struct wmi_wmm_params_arg *p = NULL;
7830 int ret;
7831
7832 mutex_lock(&ar->conf_mutex);
7833
7834 switch (ac) {
7835 case IEEE80211_AC_VO:
7836 p = &arvif->wmm_params.ac_vo;
7837 break;
7838 case IEEE80211_AC_VI:
7839 p = &arvif->wmm_params.ac_vi;
7840 break;
7841 case IEEE80211_AC_BE:
7842 p = &arvif->wmm_params.ac_be;
7843 break;
7844 case IEEE80211_AC_BK:
7845 p = &arvif->wmm_params.ac_bk;
7846 break;
7847 }
7848
7849 if (WARN_ON(!p)) {
7850 ret = -EINVAL;
7851 goto exit;
7852 }
7853
7854 p->cwmin = params->cw_min;
7855 p->cwmax = params->cw_max;
7856 p->aifs = params->aifs;
7857
7858 /*
7859 * The channel time duration programmed in the HW is in absolute
7860 * microseconds, while mac80211 gives the txop in units of
7861 * 32 microseconds.
7862 */
7863 p->txop = params->txop * 32;
7864
7865 if (ar->wmi.ops->gen_vdev_wmm_conf) {
7866 ret = ath10k_wmi_vdev_wmm_conf(ar, vdev_id: arvif->vdev_id,
7867 arg: &arvif->wmm_params);
7868 if (ret) {
7869 ath10k_warn(ar, fmt: "failed to set vdev wmm params on vdev %i: %d\n",
7870 arvif->vdev_id, ret);
7871 goto exit;
7872 }
7873 } else {
7874 /* This won't work well with multi-interface cases but it's
7875 * better than nothing.
7876 */
7877 ret = ath10k_wmi_pdev_set_wmm_params(ar, arg: &arvif->wmm_params);
7878 if (ret) {
7879 ath10k_warn(ar, fmt: "failed to set wmm params: %d\n", ret);
7880 goto exit;
7881 }
7882 }
7883
7884 ret = ath10k_conf_tx_uapsd(ar, vif, ac, enable: params->uapsd);
7885 if (ret)
7886 ath10k_warn(ar, fmt: "failed to set sta uapsd: %d\n", ret);
7887
7888exit:
7889 mutex_unlock(lock: &ar->conf_mutex);
7890 return ret;
7891}
7892
7893static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
7894 struct ieee80211_vif *vif,
7895 struct ieee80211_channel *chan,
7896 int duration,
7897 enum ieee80211_roc_type type)
7898{
7899 struct ath10k *ar = hw->priv;
7900 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7901 struct wmi_start_scan_arg arg;
7902 int ret = 0;
7903 u32 scan_time_msec;
7904
7905 mutex_lock(&ar->conf_mutex);
7906
7907 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
7908 ret = -EBUSY;
7909 goto exit;
7910 }
7911
7912 spin_lock_bh(lock: &ar->data_lock);
7913 switch (ar->scan.state) {
7914 case ATH10K_SCAN_IDLE:
7915 reinit_completion(x: &ar->scan.started);
7916 reinit_completion(x: &ar->scan.completed);
7917 reinit_completion(x: &ar->scan.on_channel);
7918 ar->scan.state = ATH10K_SCAN_STARTING;
7919 ar->scan.is_roc = true;
7920 ar->scan.vdev_id = arvif->vdev_id;
7921 ar->scan.roc_freq = chan->center_freq;
7922 ar->scan.roc_notify = true;
7923 ret = 0;
7924 break;
7925 case ATH10K_SCAN_STARTING:
7926 case ATH10K_SCAN_RUNNING:
7927 case ATH10K_SCAN_ABORTING:
7928 ret = -EBUSY;
7929 break;
7930 }
7931 spin_unlock_bh(lock: &ar->data_lock);
7932
7933 if (ret)
7934 goto exit;
7935
7936 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
7937
7938 memset(&arg, 0, sizeof(arg));
7939 ath10k_wmi_start_scan_init(ar, arg: &arg);
7940 arg.vdev_id = arvif->vdev_id;
7941 arg.scan_id = ATH10K_SCAN_ID;
7942 arg.n_channels = 1;
7943 arg.channels[0] = chan->center_freq;
7944 arg.dwell_time_active = scan_time_msec;
7945 arg.dwell_time_passive = scan_time_msec;
7946 arg.max_scan_time = scan_time_msec;
7947 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
7948 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
7949 arg.burst_duration_ms = duration;
7950
7951 ret = ath10k_start_scan(ar, arg: &arg);
7952 if (ret) {
7953 ath10k_warn(ar, fmt: "failed to start roc scan: %d\n", ret);
7954 spin_lock_bh(lock: &ar->data_lock);
7955 ar->scan.state = ATH10K_SCAN_IDLE;
7956 spin_unlock_bh(lock: &ar->data_lock);
7957 goto exit;
7958 }
7959
7960 ret = wait_for_completion_timeout(x: &ar->scan.on_channel, timeout: 3 * HZ);
7961 if (ret == 0) {
7962 ath10k_warn(ar, fmt: "failed to switch to channel for roc scan\n");
7963
7964 ret = ath10k_scan_stop(ar);
7965 if (ret)
7966 ath10k_warn(ar, fmt: "failed to stop scan: %d\n", ret);
7967
7968 ret = -ETIMEDOUT;
7969 goto exit;
7970 }
7971
7972 ieee80211_queue_delayed_work(hw: ar->hw, dwork: &ar->scan.timeout,
7973 delay: msecs_to_jiffies(m: duration));
7974
7975 ret = 0;
7976exit:
7977 mutex_unlock(lock: &ar->conf_mutex);
7978 return ret;
7979}
7980
7981static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
7982 struct ieee80211_vif *vif)
7983{
7984 struct ath10k *ar = hw->priv;
7985
7986 mutex_lock(&ar->conf_mutex);
7987
7988 spin_lock_bh(lock: &ar->data_lock);
7989 ar->scan.roc_notify = false;
7990 spin_unlock_bh(lock: &ar->data_lock);
7991
7992 ath10k_scan_abort(ar);
7993
7994 mutex_unlock(lock: &ar->conf_mutex);
7995
7996 cancel_delayed_work_sync(dwork: &ar->scan.timeout);
7997
7998 return 0;
7999}
8000
8001/*
8002 * Both RTS and Fragmentation threshold are interface-specific
8003 * in ath10k, but device-specific in mac80211.
8004 */
8005
8006static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
8007{
8008 struct ath10k *ar = hw->priv;
8009 struct ath10k_vif *arvif;
8010 int ret = 0;
8011
8012 mutex_lock(&ar->conf_mutex);
8013 list_for_each_entry(arvif, &ar->arvifs, list) {
8014 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
8015 arvif->vdev_id, value);
8016
8017 ret = ath10k_mac_set_rts(arvif, value);
8018 if (ret) {
8019 ath10k_warn(ar, fmt: "failed to set rts threshold for vdev %d: %d\n",
8020 arvif->vdev_id, ret);
8021 break;
8022 }
8023 }
8024 mutex_unlock(lock: &ar->conf_mutex);
8025
8026 return ret;
8027}
8028
8029static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
8030{
8031 /* Even though there's a WMI enum for fragmentation threshold no known
8032 * firmware actually implements it. Moreover it is not possible to rely
8033 * frame fragmentation to mac80211 because firmware clears the "more
8034 * fragments" bit in frame control making it impossible for remote
8035 * devices to reassemble frames.
8036 *
8037 * Hence implement a dummy callback just to say fragmentation isn't
8038 * supported. This effectively prevents mac80211 from doing frame
8039 * fragmentation in software.
8040 */
8041 return -EOPNOTSUPP;
8042}
8043
8044void ath10k_mac_wait_tx_complete(struct ath10k *ar)
8045{
8046 bool skip;
8047 long time_left;
8048
8049 /* mac80211 doesn't care if we really xmit queued frames or not
8050 * we'll collect those frames either way if we stop/delete vdevs
8051 */
8052
8053 if (ar->state == ATH10K_STATE_WEDGED)
8054 return;
8055
8056 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
8057 bool empty;
8058
8059 spin_lock_bh(&ar->htt.tx_lock);
8060 empty = (ar->htt.num_pending_tx == 0);
8061 spin_unlock_bh(&ar->htt.tx_lock);
8062
8063 skip = (ar->state == ATH10K_STATE_WEDGED) ||
8064 test_bit(ATH10K_FLAG_CRASH_FLUSH,
8065 &ar->dev_flags);
8066
8067 (empty || skip);
8068 }), ATH10K_FLUSH_TIMEOUT_HZ);
8069
8070 if (time_left == 0 || skip)
8071 ath10k_warn(ar, fmt: "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
8072 skip, ar->state, time_left);
8073}
8074
8075static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
8076 u32 queues, bool drop)
8077{
8078 struct ath10k *ar = hw->priv;
8079 struct ath10k_vif *arvif;
8080 u32 bitmap;
8081
8082 if (drop) {
8083 if (vif && vif->type == NL80211_IFTYPE_STATION) {
8084 bitmap = ~(1 << WMI_MGMT_TID);
8085 list_for_each_entry(arvif, &ar->arvifs, list) {
8086 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
8087 ath10k_wmi_peer_flush(ar, vdev_id: arvif->vdev_id,
8088 peer_addr: arvif->bssid, tid_bitmap: bitmap);
8089 }
8090 ath10k_htt_flush_tx(htt: &ar->htt);
8091 }
8092 return;
8093 }
8094
8095 mutex_lock(&ar->conf_mutex);
8096 ath10k_mac_wait_tx_complete(ar);
8097 mutex_unlock(lock: &ar->conf_mutex);
8098}
8099
8100/* TODO: Implement this function properly
8101 * For now it is needed to reply to Probe Requests in IBSS mode.
8102 * Probably we need this information from FW.
8103 */
8104static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
8105{
8106 return 1;
8107}
8108
8109static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
8110 enum ieee80211_reconfig_type reconfig_type)
8111{
8112 struct ath10k *ar = hw->priv;
8113 struct ath10k_vif *arvif;
8114
8115 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
8116 return;
8117
8118 mutex_lock(&ar->conf_mutex);
8119
8120 /* If device failed to restart it will be in a different state, e.g.
8121 * ATH10K_STATE_WEDGED
8122 */
8123 if (ar->state == ATH10K_STATE_RESTARTED) {
8124 ath10k_info(ar, fmt: "device successfully recovered\n");
8125 ar->state = ATH10K_STATE_ON;
8126 ieee80211_wake_queues(hw: ar->hw);
8127 clear_bit(nr: ATH10K_FLAG_RESTARTING, addr: &ar->dev_flags);
8128 if (ar->hw_params.hw_restart_disconnect) {
8129 list_for_each_entry(arvif, &ar->arvifs, list) {
8130 if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
8131 ieee80211_hw_restart_disconnect(vif: arvif->vif);
8132 }
8133 }
8134 }
8135
8136 mutex_unlock(lock: &ar->conf_mutex);
8137}
8138
8139static void
8140ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
8141 struct ieee80211_channel *channel)
8142{
8143 int ret;
8144 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
8145
8146 lockdep_assert_held(&ar->conf_mutex);
8147
8148 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
8149 (ar->rx_channel != channel))
8150 return;
8151
8152 if (ar->scan.state != ATH10K_SCAN_IDLE) {
8153 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
8154 return;
8155 }
8156
8157 reinit_completion(x: &ar->bss_survey_done);
8158
8159 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
8160 if (ret) {
8161 ath10k_warn(ar, fmt: "failed to send pdev bss chan info request\n");
8162 return;
8163 }
8164
8165 ret = wait_for_completion_timeout(x: &ar->bss_survey_done, timeout: 3 * HZ);
8166 if (!ret) {
8167 ath10k_warn(ar, fmt: "bss channel survey timed out\n");
8168 return;
8169 }
8170}
8171
8172static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
8173 struct survey_info *survey)
8174{
8175 struct ath10k *ar = hw->priv;
8176 struct ieee80211_supported_band *sband;
8177 struct survey_info *ar_survey = &ar->survey[idx];
8178 int ret = 0;
8179
8180 mutex_lock(&ar->conf_mutex);
8181
8182 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
8183 if (sband && idx >= sband->n_channels) {
8184 idx -= sband->n_channels;
8185 sband = NULL;
8186 }
8187
8188 if (!sband)
8189 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
8190
8191 if (!sband || idx >= sband->n_channels) {
8192 ret = -ENOENT;
8193 goto exit;
8194 }
8195
8196 ath10k_mac_update_bss_chan_survey(ar, channel: &sband->channels[idx]);
8197
8198 spin_lock_bh(lock: &ar->data_lock);
8199 memcpy(survey, ar_survey, sizeof(*survey));
8200 spin_unlock_bh(lock: &ar->data_lock);
8201
8202 survey->channel = &sband->channels[idx];
8203
8204 if (ar->rx_channel == survey->channel)
8205 survey->filled |= SURVEY_INFO_IN_USE;
8206
8207exit:
8208 mutex_unlock(lock: &ar->conf_mutex);
8209 return ret;
8210}
8211
8212static bool
8213ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
8214 enum nl80211_band band,
8215 const struct cfg80211_bitrate_mask *mask,
8216 int *nss)
8217{
8218 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
8219 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
8220 u8 ht_nss_mask = 0;
8221 u8 vht_nss_mask = 0;
8222 int i;
8223
8224 if (mask->control[band].legacy)
8225 return false;
8226
8227 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
8228 if (mask->control[band].ht_mcs[i] == 0)
8229 continue;
8230 else if (mask->control[band].ht_mcs[i] ==
8231 sband->ht_cap.mcs.rx_mask[i])
8232 ht_nss_mask |= BIT(i);
8233 else
8234 return false;
8235 }
8236
8237 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
8238 if (mask->control[band].vht_mcs[i] == 0)
8239 continue;
8240 else if (mask->control[band].vht_mcs[i] ==
8241 ath10k_mac_get_max_vht_mcs_map(mcs_map: vht_mcs_map, nss: i))
8242 vht_nss_mask |= BIT(i);
8243 else
8244 return false;
8245 }
8246
8247 if (ht_nss_mask != vht_nss_mask)
8248 return false;
8249
8250 if (ht_nss_mask == 0)
8251 return false;
8252
8253 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
8254 return false;
8255
8256 *nss = fls(x: ht_nss_mask);
8257
8258 return true;
8259}
8260
8261static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
8262 u8 rate, u8 nss, u8 sgi, u8 ldpc)
8263{
8264 struct ath10k *ar = arvif->ar;
8265 u32 vdev_param;
8266 int ret;
8267
8268 lockdep_assert_held(&ar->conf_mutex);
8269
8270 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
8271 arvif->vdev_id, rate, nss, sgi);
8272
8273 vdev_param = ar->wmi.vdev_param->fixed_rate;
8274 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param, param_value: rate);
8275 if (ret) {
8276 ath10k_warn(ar, fmt: "failed to set fixed rate param 0x%02x: %d\n",
8277 rate, ret);
8278 return ret;
8279 }
8280
8281 vdev_param = ar->wmi.vdev_param->nss;
8282 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param, param_value: nss);
8283 if (ret) {
8284 ath10k_warn(ar, fmt: "failed to set nss param %d: %d\n", nss, ret);
8285 return ret;
8286 }
8287
8288 vdev_param = ar->wmi.vdev_param->sgi;
8289 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param, param_value: sgi);
8290 if (ret) {
8291 ath10k_warn(ar, fmt: "failed to set sgi param %d: %d\n", sgi, ret);
8292 return ret;
8293 }
8294
8295 vdev_param = ar->wmi.vdev_param->ldpc;
8296 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id, param_id: vdev_param, param_value: ldpc);
8297 if (ret) {
8298 ath10k_warn(ar, fmt: "failed to set ldpc param %d: %d\n", ldpc, ret);
8299 return ret;
8300 }
8301
8302 return 0;
8303}
8304
8305static bool
8306ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
8307 enum nl80211_band band,
8308 const struct cfg80211_bitrate_mask *mask,
8309 bool allow_pfr)
8310{
8311 int i;
8312 u16 vht_mcs;
8313
8314 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
8315 * to express all VHT MCS rate masks. Effectively only the following
8316 * ranges can be used: none, 0-7, 0-8 and 0-9.
8317 */
8318 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
8319 vht_mcs = mask->control[band].vht_mcs[i];
8320
8321 switch (vht_mcs) {
8322 case 0:
8323 case BIT(8) - 1:
8324 case BIT(9) - 1:
8325 case BIT(10) - 1:
8326 break;
8327 default:
8328 if (!allow_pfr)
8329 ath10k_warn(ar, fmt: "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
8330 return false;
8331 }
8332 }
8333
8334 return true;
8335}
8336
8337static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
8338 struct ath10k_vif *arvif,
8339 struct ieee80211_sta *sta)
8340{
8341 int err;
8342 u8 rate = arvif->vht_pfr;
8343
8344 /* skip non vht and multiple rate peers */
8345 if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1)
8346 return false;
8347
8348 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
8349 param_id: WMI_PEER_PARAM_FIXED_RATE, param_value: rate);
8350 if (err)
8351 ath10k_warn(ar, fmt: "failed to enable STA %pM peer fixed rate: %d\n",
8352 sta->addr, err);
8353
8354 return true;
8355}
8356
8357static void ath10k_mac_set_bitrate_mask_iter(void *data,
8358 struct ieee80211_sta *sta)
8359{
8360 struct ath10k_vif *arvif = data;
8361 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8362 struct ath10k *ar = arvif->ar;
8363
8364 if (arsta->arvif != arvif)
8365 return;
8366
8367 if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta))
8368 return;
8369
8370 spin_lock_bh(lock: &ar->data_lock);
8371 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
8372 spin_unlock_bh(lock: &ar->data_lock);
8373
8374 ieee80211_queue_work(hw: ar->hw, work: &arsta->update_wk);
8375}
8376
8377static void ath10k_mac_clr_bitrate_mask_iter(void *data,
8378 struct ieee80211_sta *sta)
8379{
8380 struct ath10k_vif *arvif = data;
8381 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8382 struct ath10k *ar = arvif->ar;
8383 int err;
8384
8385 /* clear vht peers only */
8386 if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported)
8387 return;
8388
8389 err = ath10k_wmi_peer_set_param(ar, vdev_id: arvif->vdev_id, peer_addr: sta->addr,
8390 param_id: WMI_PEER_PARAM_FIXED_RATE,
8391 WMI_FIXED_RATE_NONE);
8392 if (err)
8393 ath10k_warn(ar, fmt: "failed to clear STA %pM peer fixed rate: %d\n",
8394 sta->addr, err);
8395}
8396
8397static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
8398 struct ieee80211_vif *vif,
8399 const struct cfg80211_bitrate_mask *mask)
8400{
8401 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8402 struct cfg80211_chan_def def;
8403 struct ath10k *ar = arvif->ar;
8404 enum nl80211_band band;
8405 const u8 *ht_mcs_mask;
8406 const u16 *vht_mcs_mask;
8407 u8 rate;
8408 u8 nss;
8409 u8 sgi;
8410 u8 ldpc;
8411 int single_nss;
8412 int ret;
8413 int vht_num_rates, allow_pfr;
8414 u8 vht_pfr;
8415 bool update_bitrate_mask = true;
8416
8417 if (ath10k_mac_vif_chan(vif, def: &def))
8418 return -EPERM;
8419
8420 band = def.chan->band;
8421 ht_mcs_mask = mask->control[band].ht_mcs;
8422 vht_mcs_mask = mask->control[band].vht_mcs;
8423 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
8424
8425 sgi = mask->control[band].gi;
8426 if (sgi == NL80211_TXRATE_FORCE_LGI)
8427 return -EINVAL;
8428
8429 allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE,
8430 ar->normal_mode_fw.fw_file.fw_features);
8431 if (allow_pfr) {
8432 mutex_lock(&ar->conf_mutex);
8433 ieee80211_iterate_stations_atomic(hw: ar->hw,
8434 iterator: ath10k_mac_clr_bitrate_mask_iter,
8435 data: arvif);
8436 mutex_unlock(lock: &ar->conf_mutex);
8437 }
8438
8439 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
8440 vht_num_rates: &vht_num_rates)) {
8441 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
8442 rate: &rate, nss: &nss,
8443 vht_only: false);
8444 if (ret) {
8445 ath10k_warn(ar, fmt: "failed to get single rate for vdev %i: %d\n",
8446 arvif->vdev_id, ret);
8447 return ret;
8448 }
8449 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
8450 nss: &single_nss)) {
8451 rate = WMI_FIXED_RATE_NONE;
8452 nss = single_nss;
8453 } else {
8454 rate = WMI_FIXED_RATE_NONE;
8455 nss = min(ar->num_rf_chains,
8456 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
8457 ath10k_mac_max_vht_nss(vht_mcs_mask)));
8458
8459 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask,
8460 allow_pfr)) {
8461 u8 vht_nss;
8462
8463 if (!allow_pfr || vht_num_rates != 1)
8464 return -EINVAL;
8465
8466 /* Reach here, firmware supports peer fixed rate and has
8467 * single vht rate, and don't update vif birate_mask, as
8468 * the rate only for specific peer.
8469 */
8470 ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
8471 rate: &vht_pfr,
8472 nss: &vht_nss,
8473 vht_only: true);
8474 update_bitrate_mask = false;
8475 } else {
8476 vht_pfr = 0;
8477 }
8478
8479 mutex_lock(&ar->conf_mutex);
8480
8481 if (update_bitrate_mask)
8482 arvif->bitrate_mask = *mask;
8483 arvif->vht_num_rates = vht_num_rates;
8484 arvif->vht_pfr = vht_pfr;
8485 ieee80211_iterate_stations_atomic(hw: ar->hw,
8486 iterator: ath10k_mac_set_bitrate_mask_iter,
8487 data: arvif);
8488
8489 mutex_unlock(lock: &ar->conf_mutex);
8490 }
8491
8492 mutex_lock(&ar->conf_mutex);
8493
8494 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
8495 if (ret) {
8496 ath10k_warn(ar, fmt: "failed to set fixed rate params on vdev %i: %d\n",
8497 arvif->vdev_id, ret);
8498 goto exit;
8499 }
8500
8501exit:
8502 mutex_unlock(lock: &ar->conf_mutex);
8503
8504 return ret;
8505}
8506
8507static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
8508 struct ieee80211_vif *vif,
8509 struct ieee80211_sta *sta,
8510 u32 changed)
8511{
8512 struct ath10k *ar = hw->priv;
8513 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8514 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8515 struct ath10k_peer *peer;
8516 u32 bw, smps;
8517
8518 spin_lock_bh(lock: &ar->data_lock);
8519
8520 peer = ath10k_peer_find(ar, vdev_id: arvif->vdev_id, addr: sta->addr);
8521 if (!peer) {
8522 spin_unlock_bh(lock: &ar->data_lock);
8523 ath10k_warn(ar, fmt: "mac sta rc update failed to find peer %pM on vdev %i\n",
8524 sta->addr, arvif->vdev_id);
8525 return;
8526 }
8527
8528 ath10k_dbg(ar, ATH10K_DBG_STA,
8529 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
8530 sta->addr, changed, sta->deflink.bandwidth,
8531 sta->deflink.rx_nss,
8532 sta->deflink.smps_mode);
8533
8534 if (changed & IEEE80211_RC_BW_CHANGED) {
8535 bw = WMI_PEER_CHWIDTH_20MHZ;
8536
8537 switch (sta->deflink.bandwidth) {
8538 case IEEE80211_STA_RX_BW_20:
8539 bw = WMI_PEER_CHWIDTH_20MHZ;
8540 break;
8541 case IEEE80211_STA_RX_BW_40:
8542 bw = WMI_PEER_CHWIDTH_40MHZ;
8543 break;
8544 case IEEE80211_STA_RX_BW_80:
8545 bw = WMI_PEER_CHWIDTH_80MHZ;
8546 break;
8547 case IEEE80211_STA_RX_BW_160:
8548 bw = WMI_PEER_CHWIDTH_160MHZ;
8549 break;
8550 default:
8551 ath10k_warn(ar, fmt: "Invalid bandwidth %d in rc update for %pM\n",
8552 sta->deflink.bandwidth, sta->addr);
8553 bw = WMI_PEER_CHWIDTH_20MHZ;
8554 break;
8555 }
8556
8557 arsta->bw = bw;
8558 }
8559
8560 if (changed & IEEE80211_RC_NSS_CHANGED)
8561 arsta->nss = sta->deflink.rx_nss;
8562
8563 if (changed & IEEE80211_RC_SMPS_CHANGED) {
8564 smps = WMI_PEER_SMPS_PS_NONE;
8565
8566 switch (sta->deflink.smps_mode) {
8567 case IEEE80211_SMPS_AUTOMATIC:
8568 case IEEE80211_SMPS_OFF:
8569 smps = WMI_PEER_SMPS_PS_NONE;
8570 break;
8571 case IEEE80211_SMPS_STATIC:
8572 smps = WMI_PEER_SMPS_STATIC;
8573 break;
8574 case IEEE80211_SMPS_DYNAMIC:
8575 smps = WMI_PEER_SMPS_DYNAMIC;
8576 break;
8577 case IEEE80211_SMPS_NUM_MODES:
8578 ath10k_warn(ar, fmt: "Invalid smps %d in sta rc update for %pM\n",
8579 sta->deflink.smps_mode, sta->addr);
8580 smps = WMI_PEER_SMPS_PS_NONE;
8581 break;
8582 }
8583
8584 arsta->smps = smps;
8585 }
8586
8587 arsta->changed |= changed;
8588
8589 spin_unlock_bh(lock: &ar->data_lock);
8590
8591 ieee80211_queue_work(hw, work: &arsta->update_wk);
8592}
8593
8594static void ath10k_offset_tsf(struct ieee80211_hw *hw,
8595 struct ieee80211_vif *vif, s64 tsf_offset)
8596{
8597 struct ath10k *ar = hw->priv;
8598 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8599 u32 offset, vdev_param;
8600 int ret;
8601
8602 if (tsf_offset < 0) {
8603 vdev_param = ar->wmi.vdev_param->dec_tsf;
8604 offset = -tsf_offset;
8605 } else {
8606 vdev_param = ar->wmi.vdev_param->inc_tsf;
8607 offset = tsf_offset;
8608 }
8609
8610 ret = ath10k_wmi_vdev_set_param(ar, vdev_id: arvif->vdev_id,
8611 param_id: vdev_param, param_value: offset);
8612
8613 if (ret && ret != -EOPNOTSUPP)
8614 ath10k_warn(ar, fmt: "failed to set tsf offset %d cmd %d: %d\n",
8615 offset, vdev_param, ret);
8616}
8617
8618static int ath10k_ampdu_action(struct ieee80211_hw *hw,
8619 struct ieee80211_vif *vif,
8620 struct ieee80211_ampdu_params *params)
8621{
8622 struct ath10k *ar = hw->priv;
8623 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8624 struct ieee80211_sta *sta = params->sta;
8625 enum ieee80211_ampdu_mlme_action action = params->action;
8626 u16 tid = params->tid;
8627
8628 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n",
8629 arvif->vdev_id, sta->addr, tid, action);
8630
8631 switch (action) {
8632 case IEEE80211_AMPDU_RX_START:
8633 case IEEE80211_AMPDU_RX_STOP:
8634 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
8635 * creation/removal. Do we need to verify this?
8636 */
8637 return 0;
8638 case IEEE80211_AMPDU_TX_START:
8639 case IEEE80211_AMPDU_TX_STOP_CONT:
8640 case IEEE80211_AMPDU_TX_STOP_FLUSH:
8641 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
8642 case IEEE80211_AMPDU_TX_OPERATIONAL:
8643 /* Firmware offloads Tx aggregation entirely so deny mac80211
8644 * Tx aggregation requests.
8645 */
8646 return -EOPNOTSUPP;
8647 }
8648
8649 return -EINVAL;
8650}
8651
8652static void
8653ath10k_mac_update_rx_channel(struct ath10k *ar,
8654 struct ieee80211_chanctx_conf *ctx,
8655 struct ieee80211_vif_chanctx_switch *vifs,
8656 int n_vifs)
8657{
8658 struct cfg80211_chan_def *def = NULL;
8659
8660 /* Both locks are required because ar->rx_channel is modified. This
8661 * allows readers to hold either lock.
8662 */
8663 lockdep_assert_held(&ar->conf_mutex);
8664 lockdep_assert_held(&ar->data_lock);
8665
8666 WARN_ON(ctx && vifs);
8667 WARN_ON(vifs && !n_vifs);
8668
8669 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
8670 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
8671 * ppdu on Rx may reduce performance on low-end systems. It should be
8672 * possible to make tables/hashmaps to speed the lookup up (be vary of
8673 * cpu data cache lines though regarding sizes) but to keep the initial
8674 * implementation simple and less intrusive fallback to the slow lookup
8675 * only for multi-channel cases. Single-channel cases will remain to
8676 * use the old channel derival and thus performance should not be
8677 * affected much.
8678 */
8679 rcu_read_lock();
8680 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
8681 ieee80211_iter_chan_contexts_atomic(hw: ar->hw,
8682 iter: ath10k_mac_get_any_chandef_iter,
8683 iter_data: &def);
8684
8685 if (vifs)
8686 def = &vifs[0].new_ctx->def;
8687
8688 ar->rx_channel = def->chan;
8689 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
8690 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
8691 /* During driver restart due to firmware assert, since mac80211
8692 * already has valid channel context for given radio, channel
8693 * context iteration return num_chanctx > 0. So fix rx_channel
8694 * when restart is in progress.
8695 */
8696 ar->rx_channel = ctx->def.chan;
8697 } else {
8698 ar->rx_channel = NULL;
8699 }
8700 rcu_read_unlock();
8701}
8702
8703static void
8704ath10k_mac_update_vif_chan(struct ath10k *ar,
8705 struct ieee80211_vif_chanctx_switch *vifs,
8706 int n_vifs)
8707{
8708 struct ath10k_vif *arvif;
8709 int ret;
8710 int i;
8711
8712 lockdep_assert_held(&ar->conf_mutex);
8713
8714 /* First stop monitor interface. Some FW versions crash if there's a
8715 * lone monitor interface.
8716 */
8717 if (ar->monitor_started)
8718 ath10k_monitor_stop(ar);
8719
8720 for (i = 0; i < n_vifs; i++) {
8721 arvif = (void *)vifs[i].vif->drv_priv;
8722
8723 ath10k_dbg(ar, ATH10K_DBG_MAC,
8724 "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
8725 arvif->vdev_id,
8726 vifs[i].old_ctx->def.chan->center_freq,
8727 vifs[i].new_ctx->def.chan->center_freq,
8728 vifs[i].old_ctx->def.width,
8729 vifs[i].new_ctx->def.width);
8730
8731 if (WARN_ON(!arvif->is_started))
8732 continue;
8733
8734 if (WARN_ON(!arvif->is_up))
8735 continue;
8736
8737 ret = ath10k_wmi_vdev_down(ar, vdev_id: arvif->vdev_id);
8738 if (ret) {
8739 ath10k_warn(ar, fmt: "failed to down vdev %d: %d\n",
8740 arvif->vdev_id, ret);
8741 continue;
8742 }
8743 }
8744
8745 /* All relevant vdevs are downed and associated channel resources
8746 * should be available for the channel switch now.
8747 */
8748
8749 spin_lock_bh(lock: &ar->data_lock);
8750 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
8751 spin_unlock_bh(lock: &ar->data_lock);
8752
8753 for (i = 0; i < n_vifs; i++) {
8754 arvif = (void *)vifs[i].vif->drv_priv;
8755
8756 if (WARN_ON(!arvif->is_started))
8757 continue;
8758
8759 if (WARN_ON(!arvif->is_up))
8760 continue;
8761
8762 ret = ath10k_mac_setup_bcn_tmpl(arvif);
8763 if (ret)
8764 ath10k_warn(ar, fmt: "failed to update bcn tmpl during csa: %d\n",
8765 ret);
8766
8767 ret = ath10k_mac_setup_prb_tmpl(arvif);
8768 if (ret)
8769 ath10k_warn(ar, fmt: "failed to update prb tmpl during csa: %d\n",
8770 ret);
8771
8772 ret = ath10k_vdev_restart(arvif, def: &vifs[i].new_ctx->def);
8773 if (ret) {
8774 ath10k_warn(ar, fmt: "failed to restart vdev %d: %d\n",
8775 arvif->vdev_id, ret);
8776 continue;
8777 }
8778
8779 ret = ath10k_wmi_vdev_up(ar: arvif->ar, vdev_id: arvif->vdev_id, aid: arvif->aid,
8780 bssid: arvif->bssid);
8781 if (ret) {
8782 ath10k_warn(ar, fmt: "failed to bring vdev up %d: %d\n",
8783 arvif->vdev_id, ret);
8784 continue;
8785 }
8786 }
8787
8788 ath10k_monitor_recalc(ar);
8789}
8790
8791static int
8792ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
8793 struct ieee80211_chanctx_conf *ctx)
8794{
8795 struct ath10k *ar = hw->priv;
8796
8797 ath10k_dbg(ar, ATH10K_DBG_MAC,
8798 "mac chanctx add freq %u width %d ptr %pK\n",
8799 ctx->def.chan->center_freq, ctx->def.width, ctx);
8800
8801 mutex_lock(&ar->conf_mutex);
8802
8803 spin_lock_bh(lock: &ar->data_lock);
8804 ath10k_mac_update_rx_channel(ar, ctx, NULL, n_vifs: 0);
8805 spin_unlock_bh(lock: &ar->data_lock);
8806
8807 ath10k_recalc_radar_detection(ar);
8808 ath10k_monitor_recalc(ar);
8809
8810 mutex_unlock(lock: &ar->conf_mutex);
8811
8812 return 0;
8813}
8814
8815static void
8816ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
8817 struct ieee80211_chanctx_conf *ctx)
8818{
8819 struct ath10k *ar = hw->priv;
8820
8821 ath10k_dbg(ar, ATH10K_DBG_MAC,
8822 "mac chanctx remove freq %u width %d ptr %pK\n",
8823 ctx->def.chan->center_freq, ctx->def.width, ctx);
8824
8825 mutex_lock(&ar->conf_mutex);
8826
8827 spin_lock_bh(lock: &ar->data_lock);
8828 ath10k_mac_update_rx_channel(ar, NULL, NULL, n_vifs: 0);
8829 spin_unlock_bh(lock: &ar->data_lock);
8830
8831 ath10k_recalc_radar_detection(ar);
8832 ath10k_monitor_recalc(ar);
8833
8834 mutex_unlock(lock: &ar->conf_mutex);
8835}
8836
8837struct ath10k_mac_change_chanctx_arg {
8838 struct ieee80211_chanctx_conf *ctx;
8839 struct ieee80211_vif_chanctx_switch *vifs;
8840 int n_vifs;
8841 int next_vif;
8842};
8843
8844static void
8845ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
8846 struct ieee80211_vif *vif)
8847{
8848 struct ath10k_mac_change_chanctx_arg *arg = data;
8849
8850 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
8851 return;
8852
8853 arg->n_vifs++;
8854}
8855
8856static void
8857ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
8858 struct ieee80211_vif *vif)
8859{
8860 struct ath10k_mac_change_chanctx_arg *arg = data;
8861 struct ieee80211_chanctx_conf *ctx;
8862
8863 ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
8864 if (ctx != arg->ctx)
8865 return;
8866
8867 if (WARN_ON(arg->next_vif == arg->n_vifs))
8868 return;
8869
8870 arg->vifs[arg->next_vif].vif = vif;
8871 arg->vifs[arg->next_vif].old_ctx = ctx;
8872 arg->vifs[arg->next_vif].new_ctx = ctx;
8873 arg->next_vif++;
8874}
8875
8876static void
8877ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
8878 struct ieee80211_chanctx_conf *ctx,
8879 u32 changed)
8880{
8881 struct ath10k *ar = hw->priv;
8882 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
8883
8884 mutex_lock(&ar->conf_mutex);
8885
8886 ath10k_dbg(ar, ATH10K_DBG_MAC,
8887 "mac chanctx change freq %u width %d ptr %pK changed %x\n",
8888 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
8889
8890 /* This shouldn't really happen because channel switching should use
8891 * switch_vif_chanctx().
8892 */
8893 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
8894 goto unlock;
8895
8896 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
8897 ieee80211_iterate_active_interfaces_atomic(
8898 hw,
8899 ATH10K_ITER_NORMAL_FLAGS,
8900 iterator: ath10k_mac_change_chanctx_cnt_iter,
8901 data: &arg);
8902 if (arg.n_vifs == 0)
8903 goto radar;
8904
8905 arg.vifs = kcalloc(n: arg.n_vifs, size: sizeof(arg.vifs[0]),
8906 GFP_KERNEL);
8907 if (!arg.vifs)
8908 goto radar;
8909
8910 ieee80211_iterate_active_interfaces_atomic(
8911 hw,
8912 ATH10K_ITER_NORMAL_FLAGS,
8913 iterator: ath10k_mac_change_chanctx_fill_iter,
8914 data: &arg);
8915 ath10k_mac_update_vif_chan(ar, vifs: arg.vifs, n_vifs: arg.n_vifs);
8916 kfree(objp: arg.vifs);
8917 }
8918
8919radar:
8920 ath10k_recalc_radar_detection(ar);
8921
8922 /* FIXME: How to configure Rx chains properly? */
8923
8924 /* No other actions are actually necessary. Firmware maintains channel
8925 * definitions per vdev internally and there's no host-side channel
8926 * context abstraction to configure, e.g. channel width.
8927 */
8928
8929unlock:
8930 mutex_unlock(lock: &ar->conf_mutex);
8931}
8932
8933static int
8934ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
8935 struct ieee80211_vif *vif,
8936 struct ieee80211_bss_conf *link_conf,
8937 struct ieee80211_chanctx_conf *ctx)
8938{
8939 struct ath10k *ar = hw->priv;
8940 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8941 int ret;
8942
8943 mutex_lock(&ar->conf_mutex);
8944
8945 ath10k_dbg(ar, ATH10K_DBG_MAC,
8946 "mac chanctx assign ptr %pK vdev_id %i\n",
8947 ctx, arvif->vdev_id);
8948
8949 if (WARN_ON(arvif->is_started)) {
8950 mutex_unlock(lock: &ar->conf_mutex);
8951 return -EBUSY;
8952 }
8953
8954 ret = ath10k_vdev_start(arvif, def: &ctx->def);
8955 if (ret) {
8956 ath10k_warn(ar, fmt: "failed to start vdev %i addr %pM on freq %d: %d\n",
8957 arvif->vdev_id, vif->addr,
8958 ctx->def.chan->center_freq, ret);
8959 goto err;
8960 }
8961
8962 arvif->is_started = true;
8963
8964 ret = ath10k_mac_vif_setup_ps(arvif);
8965 if (ret) {
8966 ath10k_warn(ar, fmt: "failed to update vdev %i ps: %d\n",
8967 arvif->vdev_id, ret);
8968 goto err_stop;
8969 }
8970
8971 if (vif->type == NL80211_IFTYPE_MONITOR) {
8972 ret = ath10k_wmi_vdev_up(ar, vdev_id: arvif->vdev_id, aid: 0, bssid: vif->addr);
8973 if (ret) {
8974 ath10k_warn(ar, fmt: "failed to up monitor vdev %i: %d\n",
8975 arvif->vdev_id, ret);
8976 goto err_stop;
8977 }
8978
8979 arvif->is_up = true;
8980 }
8981
8982 if (ath10k_mac_can_set_cts_prot(arvif)) {
8983 ret = ath10k_mac_set_cts_prot(arvif);
8984 if (ret)
8985 ath10k_warn(ar, fmt: "failed to set cts protection for vdev %d: %d\n",
8986 arvif->vdev_id, ret);
8987 }
8988
8989 if (ath10k_peer_stats_enabled(ar) &&
8990 ar->hw_params.tx_stats_over_pktlog) {
8991 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
8992 ret = ath10k_wmi_pdev_pktlog_enable(ar,
8993 filter: ar->pktlog_filter);
8994 if (ret) {
8995 ath10k_warn(ar, fmt: "failed to enable pktlog %d\n", ret);
8996 goto err_stop;
8997 }
8998 }
8999
9000 mutex_unlock(lock: &ar->conf_mutex);
9001 return 0;
9002
9003err_stop:
9004 ath10k_vdev_stop(arvif);
9005 arvif->is_started = false;
9006 ath10k_mac_vif_setup_ps(arvif);
9007
9008err:
9009 mutex_unlock(lock: &ar->conf_mutex);
9010 return ret;
9011}
9012
9013static void
9014ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
9015 struct ieee80211_vif *vif,
9016 struct ieee80211_bss_conf *link_conf,
9017 struct ieee80211_chanctx_conf *ctx)
9018{
9019 struct ath10k *ar = hw->priv;
9020 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9021 int ret;
9022
9023 mutex_lock(&ar->conf_mutex);
9024
9025 ath10k_dbg(ar, ATH10K_DBG_MAC,
9026 "mac chanctx unassign ptr %pK vdev_id %i\n",
9027 ctx, arvif->vdev_id);
9028
9029 WARN_ON(!arvif->is_started);
9030
9031 if (vif->type == NL80211_IFTYPE_MONITOR) {
9032 WARN_ON(!arvif->is_up);
9033
9034 ret = ath10k_wmi_vdev_down(ar, vdev_id: arvif->vdev_id);
9035 if (ret)
9036 ath10k_warn(ar, fmt: "failed to down monitor vdev %i: %d\n",
9037 arvif->vdev_id, ret);
9038
9039 arvif->is_up = false;
9040 }
9041
9042 ret = ath10k_vdev_stop(arvif);
9043 if (ret)
9044 ath10k_warn(ar, fmt: "failed to stop vdev %i: %d\n",
9045 arvif->vdev_id, ret);
9046
9047 arvif->is_started = false;
9048
9049 mutex_unlock(lock: &ar->conf_mutex);
9050}
9051
9052static int
9053ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
9054 struct ieee80211_vif_chanctx_switch *vifs,
9055 int n_vifs,
9056 enum ieee80211_chanctx_switch_mode mode)
9057{
9058 struct ath10k *ar = hw->priv;
9059
9060 mutex_lock(&ar->conf_mutex);
9061
9062 ath10k_dbg(ar, ATH10K_DBG_MAC,
9063 "mac chanctx switch n_vifs %d mode %d\n",
9064 n_vifs, mode);
9065 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
9066
9067 mutex_unlock(lock: &ar->conf_mutex);
9068 return 0;
9069}
9070
9071static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
9072 struct ieee80211_vif *vif,
9073 struct ieee80211_sta *sta)
9074{
9075 struct ath10k *ar;
9076 struct ath10k_peer *peer;
9077
9078 ar = hw->priv;
9079
9080 list_for_each_entry(peer, &ar->peers, list)
9081 if (peer->sta == sta)
9082 peer->removed = true;
9083}
9084
9085/* HT MCS parameters with Nss = 1 */
9086static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
9087 /* MCS L20 L40 S20 S40 */
9088 {0, { 65, 135, 72, 150} },
9089 {1, { 130, 270, 144, 300} },
9090 {2, { 195, 405, 217, 450} },
9091 {3, { 260, 540, 289, 600} },
9092 {4, { 390, 810, 433, 900} },
9093 {5, { 520, 1080, 578, 1200} },
9094 {6, { 585, 1215, 650, 1350} },
9095 {7, { 650, 1350, 722, 1500} }
9096};
9097
9098/* HT MCS parameters with Nss = 2 */
9099static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
9100 /* MCS L20 L40 S20 S40 */
9101 {0, {130, 270, 144, 300} },
9102 {1, {260, 540, 289, 600} },
9103 {2, {390, 810, 433, 900} },
9104 {3, {520, 1080, 578, 1200} },
9105 {4, {780, 1620, 867, 1800} },
9106 {5, {1040, 2160, 1156, 2400} },
9107 {6, {1170, 2430, 1300, 2700} },
9108 {7, {1300, 2700, 1444, 3000} }
9109};
9110
9111/* MCS parameters with Nss = 1 */
9112static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
9113 /* MCS L80 S80 L40 S40 L20 S20 */
9114 {0, {293, 325}, {135, 150}, {65, 72} },
9115 {1, {585, 650}, {270, 300}, {130, 144} },
9116 {2, {878, 975}, {405, 450}, {195, 217} },
9117 {3, {1170, 1300}, {540, 600}, {260, 289} },
9118 {4, {1755, 1950}, {810, 900}, {390, 433} },
9119 {5, {2340, 2600}, {1080, 1200}, {520, 578} },
9120 {6, {2633, 2925}, {1215, 1350}, {585, 650} },
9121 {7, {2925, 3250}, {1350, 1500}, {650, 722} },
9122 {8, {3510, 3900}, {1620, 1800}, {780, 867} },
9123 {9, {3900, 4333}, {1800, 2000}, {780, 867} }
9124};
9125
9126/*MCS parameters with Nss = 2 */
9127static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
9128 /* MCS L80 S80 L40 S40 L20 S20 */
9129 {0, {585, 650}, {270, 300}, {130, 144} },
9130 {1, {1170, 1300}, {540, 600}, {260, 289} },
9131 {2, {1755, 1950}, {810, 900}, {390, 433} },
9132 {3, {2340, 2600}, {1080, 1200}, {520, 578} },
9133 {4, {3510, 3900}, {1620, 1800}, {780, 867} },
9134 {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
9135 {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
9136 {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
9137 {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
9138 {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
9139};
9140
9141static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
9142 u8 *flags, u8 *bw)
9143{
9144 struct ath10k_index_ht_data_rate_type *mcs_rate;
9145 u8 index;
9146 size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1);
9147 size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2);
9148
9149 if (mcs >= (len_nss1 + len_nss2)) {
9150 ath10k_warn(ar, fmt: "not supported mcs %d in current rate table", mcs);
9151 return;
9152 }
9153
9154 mcs_rate = (struct ath10k_index_ht_data_rate_type *)
9155 ((nss == 1) ? &supported_ht_mcs_rate_nss1 :
9156 &supported_ht_mcs_rate_nss2);
9157
9158 if (mcs >= len_nss1)
9159 index = mcs - len_nss1;
9160 else
9161 index = mcs;
9162
9163 if (rate == mcs_rate[index].supported_rate[0]) {
9164 *bw = RATE_INFO_BW_20;
9165 } else if (rate == mcs_rate[index].supported_rate[1]) {
9166 *bw |= RATE_INFO_BW_40;
9167 } else if (rate == mcs_rate[index].supported_rate[2]) {
9168 *bw |= RATE_INFO_BW_20;
9169 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9170 } else if (rate == mcs_rate[index].supported_rate[3]) {
9171 *bw |= RATE_INFO_BW_40;
9172 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9173 } else {
9174 ath10k_warn(ar, fmt: "invalid ht params rate %d 100kbps nss %d mcs %d",
9175 rate, nss, mcs);
9176 }
9177}
9178
9179static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
9180 u8 *flags, u8 *bw)
9181{
9182 struct ath10k_index_vht_data_rate_type *mcs_rate;
9183
9184 mcs_rate = (struct ath10k_index_vht_data_rate_type *)
9185 ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
9186 &supported_vht_mcs_rate_nss2);
9187
9188 if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
9189 *bw = RATE_INFO_BW_80;
9190 } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
9191 *bw = RATE_INFO_BW_80;
9192 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9193 } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
9194 *bw = RATE_INFO_BW_40;
9195 } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
9196 *bw = RATE_INFO_BW_40;
9197 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9198 } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
9199 *bw = RATE_INFO_BW_20;
9200 } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
9201 *bw = RATE_INFO_BW_20;
9202 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9203 } else {
9204 ath10k_warn(ar, fmt: "invalid vht params rate %d 100kbps nss %d mcs %d",
9205 rate, nss, mcs);
9206 }
9207}
9208
9209static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
9210 enum ath10k_phy_mode mode, u8 nss, u8 mcs,
9211 u8 *flags, u8 *bw)
9212{
9213 if (mode == ATH10K_PHY_MODE_HT) {
9214 *flags = RATE_INFO_FLAGS_MCS;
9215 ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
9216 } else if (mode == ATH10K_PHY_MODE_VHT) {
9217 *flags = RATE_INFO_FLAGS_VHT_MCS;
9218 ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
9219 }
9220}
9221
9222static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
9223 u32 bitrate_kbps, struct rate_info *rate)
9224{
9225 enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
9226 enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
9227 u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
9228 u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
9229 u8 flags = 0, bw = 0;
9230
9231 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n",
9232 rate_code, bitrate_kbps);
9233
9234 if (preamble == WMI_RATE_PREAMBLE_HT)
9235 mode = ATH10K_PHY_MODE_HT;
9236 else if (preamble == WMI_RATE_PREAMBLE_VHT)
9237 mode = ATH10K_PHY_MODE_VHT;
9238
9239 ath10k_mac_get_rate_flags(ar, rate: bitrate_kbps / 100, mode, nss, mcs, flags: &flags, bw: &bw);
9240
9241 ath10k_dbg(ar, ATH10K_DBG_MAC,
9242 "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
9243 preamble, mode, nss, mcs, flags, bw);
9244
9245 rate->flags = flags;
9246 rate->bw = bw;
9247 rate->legacy = bitrate_kbps / 100;
9248 rate->nss = nss;
9249 rate->mcs = mcs;
9250}
9251
9252static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
9253 struct ieee80211_sta *sta,
9254 struct station_info *sinfo)
9255{
9256 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
9257 struct ath10k_peer *peer;
9258 unsigned long time_left;
9259 int ret;
9260
9261 if (!(ar->hw_params.supports_peer_stats_info &&
9262 arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
9263 return;
9264
9265 spin_lock_bh(lock: &ar->data_lock);
9266 peer = ath10k_peer_find(ar, vdev_id: arsta->arvif->vdev_id, addr: sta->addr);
9267 spin_unlock_bh(lock: &ar->data_lock);
9268 if (!peer)
9269 return;
9270
9271 reinit_completion(x: &ar->peer_stats_info_complete);
9272
9273 ret = ath10k_wmi_request_peer_stats_info(ar,
9274 vdev_id: arsta->arvif->vdev_id,
9275 type: WMI_REQUEST_ONE_PEER_STATS_INFO,
9276 addr: arsta->arvif->bssid,
9277 reset: 0);
9278 if (ret && ret != -EOPNOTSUPP) {
9279 ath10k_warn(ar, fmt: "could not request peer stats info: %d\n", ret);
9280 return;
9281 }
9282
9283 time_left = wait_for_completion_timeout(x: &ar->peer_stats_info_complete, timeout: 3 * HZ);
9284 if (time_left == 0) {
9285 ath10k_warn(ar, fmt: "timed out waiting peer stats info\n");
9286 return;
9287 }
9288
9289 if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
9290 ath10k_mac_parse_bitrate(ar, rate_code: arsta->rx_rate_code,
9291 bitrate_kbps: arsta->rx_bitrate_kbps,
9292 rate: &sinfo->rxrate);
9293
9294 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
9295 arsta->rx_rate_code = 0;
9296 arsta->rx_bitrate_kbps = 0;
9297 }
9298
9299 if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
9300 ath10k_mac_parse_bitrate(ar, rate_code: arsta->tx_rate_code,
9301 bitrate_kbps: arsta->tx_bitrate_kbps,
9302 rate: &sinfo->txrate);
9303
9304 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
9305 arsta->tx_rate_code = 0;
9306 arsta->tx_bitrate_kbps = 0;
9307 }
9308}
9309
9310static void ath10k_sta_statistics(struct ieee80211_hw *hw,
9311 struct ieee80211_vif *vif,
9312 struct ieee80211_sta *sta,
9313 struct station_info *sinfo)
9314{
9315 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
9316 struct ath10k *ar = arsta->arvif->ar;
9317
9318 if (!ath10k_peer_stats_enabled(ar))
9319 return;
9320
9321 mutex_lock(&ar->conf_mutex);
9322 ath10k_debug_fw_stats_request(ar);
9323 mutex_unlock(lock: &ar->conf_mutex);
9324
9325 sinfo->rx_duration = arsta->rx_duration;
9326 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
9327
9328 if (arsta->txrate.legacy || arsta->txrate.nss) {
9329 if (arsta->txrate.legacy) {
9330 sinfo->txrate.legacy = arsta->txrate.legacy;
9331 } else {
9332 sinfo->txrate.mcs = arsta->txrate.mcs;
9333 sinfo->txrate.nss = arsta->txrate.nss;
9334 sinfo->txrate.bw = arsta->txrate.bw;
9335 }
9336 sinfo->txrate.flags = arsta->txrate.flags;
9337 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
9338 }
9339
9340 if (ar->htt.disable_tx_comp) {
9341 sinfo->tx_failed = arsta->tx_failed;
9342 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
9343 }
9344
9345 sinfo->tx_retries = arsta->tx_retries;
9346 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
9347
9348 ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
9349}
9350
9351static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
9352 struct ieee80211_vif *vif,
9353 struct ieee80211_sta *sta,
9354 struct cfg80211_tid_config *tid_config)
9355{
9356 struct ath10k *ar = hw->priv;
9357 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9358 struct ath10k_mac_iter_tid_conf_data data = {};
9359 struct wmi_per_peer_per_tid_cfg_arg arg = {};
9360 int ret, i;
9361
9362 mutex_lock(&ar->conf_mutex);
9363 arg.vdev_id = arvif->vdev_id;
9364
9365 arvif->tids_rst = 0;
9366 memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed));
9367
9368 for (i = 0; i < tid_config->n_tid_conf; i++) {
9369 ret = ath10k_mac_parse_tid_config(ar, sta, vif,
9370 tid_conf: &tid_config->tid_conf[i],
9371 arg: &arg);
9372 if (ret)
9373 goto exit;
9374 }
9375
9376 ret = 0;
9377
9378 if (sta)
9379 goto exit;
9380
9381 arvif->tids_rst = 0;
9382 data.curr_vif = vif;
9383 data.ar = ar;
9384
9385 ieee80211_iterate_stations_atomic(hw, iterator: ath10k_mac_vif_stations_tid_conf,
9386 data: &data);
9387
9388exit:
9389 mutex_unlock(lock: &ar->conf_mutex);
9390 return ret;
9391}
9392
9393static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw,
9394 struct ieee80211_vif *vif,
9395 struct ieee80211_sta *sta,
9396 u8 tids)
9397{
9398 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9399 struct ath10k_mac_iter_tid_conf_data data = {};
9400 struct ath10k *ar = hw->priv;
9401 int ret = 0;
9402
9403 mutex_lock(&ar->conf_mutex);
9404
9405 if (sta) {
9406 arvif->tids_rst = 0;
9407 ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids);
9408 goto exit;
9409 }
9410
9411 arvif->tids_rst = tids;
9412 data.curr_vif = vif;
9413 data.ar = ar;
9414 ieee80211_iterate_stations_atomic(hw, iterator: ath10k_mac_vif_stations_tid_conf,
9415 data: &data);
9416
9417exit:
9418 mutex_unlock(lock: &ar->conf_mutex);
9419 return ret;
9420}
9421
9422static const struct ieee80211_ops ath10k_ops = {
9423 .tx = ath10k_mac_op_tx,
9424 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
9425 .start = ath10k_start,
9426 .stop = ath10k_stop,
9427 .config = ath10k_config,
9428 .add_interface = ath10k_add_interface,
9429 .update_vif_offload = ath10k_update_vif_offload,
9430 .remove_interface = ath10k_remove_interface,
9431 .configure_filter = ath10k_configure_filter,
9432 .bss_info_changed = ath10k_bss_info_changed,
9433 .set_coverage_class = ath10k_mac_op_set_coverage_class,
9434 .hw_scan = ath10k_hw_scan,
9435 .cancel_hw_scan = ath10k_cancel_hw_scan,
9436 .set_key = ath10k_set_key,
9437 .set_default_unicast_key = ath10k_set_default_unicast_key,
9438 .sta_state = ath10k_sta_state,
9439 .sta_set_txpwr = ath10k_sta_set_txpwr,
9440 .conf_tx = ath10k_conf_tx,
9441 .remain_on_channel = ath10k_remain_on_channel,
9442 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
9443 .set_rts_threshold = ath10k_set_rts_threshold,
9444 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
9445 .flush = ath10k_flush,
9446 .tx_last_beacon = ath10k_tx_last_beacon,
9447 .set_antenna = ath10k_set_antenna,
9448 .get_antenna = ath10k_get_antenna,
9449 .reconfig_complete = ath10k_reconfig_complete,
9450 .get_survey = ath10k_get_survey,
9451 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
9452 .sta_rc_update = ath10k_sta_rc_update,
9453 .offset_tsf = ath10k_offset_tsf,
9454 .ampdu_action = ath10k_ampdu_action,
9455 .get_et_sset_count = ath10k_debug_get_et_sset_count,
9456 .get_et_stats = ath10k_debug_get_et_stats,
9457 .get_et_strings = ath10k_debug_get_et_strings,
9458 .add_chanctx = ath10k_mac_op_add_chanctx,
9459 .remove_chanctx = ath10k_mac_op_remove_chanctx,
9460 .change_chanctx = ath10k_mac_op_change_chanctx,
9461 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
9462 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
9463 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
9464 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
9465 .sta_statistics = ath10k_sta_statistics,
9466 .set_tid_config = ath10k_mac_op_set_tid_config,
9467 .reset_tid_config = ath10k_mac_op_reset_tid_config,
9468
9469 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
9470
9471#ifdef CONFIG_PM
9472 .suspend = ath10k_wow_op_suspend,
9473 .resume = ath10k_wow_op_resume,
9474 .set_wakeup = ath10k_wow_op_set_wakeup,
9475#endif
9476#ifdef CONFIG_MAC80211_DEBUGFS
9477 .sta_add_debugfs = ath10k_sta_add_debugfs,
9478#endif
9479 .set_sar_specs = ath10k_mac_set_sar_specs,
9480};
9481
9482#define CHAN2G(_channel, _freq, _flags) { \
9483 .band = NL80211_BAND_2GHZ, \
9484 .hw_value = (_channel), \
9485 .center_freq = (_freq), \
9486 .flags = (_flags), \
9487 .max_antenna_gain = 0, \
9488 .max_power = 30, \
9489}
9490
9491#define CHAN5G(_channel, _freq, _flags) { \
9492 .band = NL80211_BAND_5GHZ, \
9493 .hw_value = (_channel), \
9494 .center_freq = (_freq), \
9495 .flags = (_flags), \
9496 .max_antenna_gain = 0, \
9497 .max_power = 30, \
9498}
9499
9500static const struct ieee80211_channel ath10k_2ghz_channels[] = {
9501 CHAN2G(1, 2412, 0),
9502 CHAN2G(2, 2417, 0),
9503 CHAN2G(3, 2422, 0),
9504 CHAN2G(4, 2427, 0),
9505 CHAN2G(5, 2432, 0),
9506 CHAN2G(6, 2437, 0),
9507 CHAN2G(7, 2442, 0),
9508 CHAN2G(8, 2447, 0),
9509 CHAN2G(9, 2452, 0),
9510 CHAN2G(10, 2457, 0),
9511 CHAN2G(11, 2462, 0),
9512 CHAN2G(12, 2467, 0),
9513 CHAN2G(13, 2472, 0),
9514 CHAN2G(14, 2484, 0),
9515};
9516
9517static const struct ieee80211_channel ath10k_5ghz_channels[] = {
9518 CHAN5G(36, 5180, 0),
9519 CHAN5G(40, 5200, 0),
9520 CHAN5G(44, 5220, 0),
9521 CHAN5G(48, 5240, 0),
9522 CHAN5G(52, 5260, 0),
9523 CHAN5G(56, 5280, 0),
9524 CHAN5G(60, 5300, 0),
9525 CHAN5G(64, 5320, 0),
9526 CHAN5G(100, 5500, 0),
9527 CHAN5G(104, 5520, 0),
9528 CHAN5G(108, 5540, 0),
9529 CHAN5G(112, 5560, 0),
9530 CHAN5G(116, 5580, 0),
9531 CHAN5G(120, 5600, 0),
9532 CHAN5G(124, 5620, 0),
9533 CHAN5G(128, 5640, 0),
9534 CHAN5G(132, 5660, 0),
9535 CHAN5G(136, 5680, 0),
9536 CHAN5G(140, 5700, 0),
9537 CHAN5G(144, 5720, 0),
9538 CHAN5G(149, 5745, 0),
9539 CHAN5G(153, 5765, 0),
9540 CHAN5G(157, 5785, 0),
9541 CHAN5G(161, 5805, 0),
9542 CHAN5G(165, 5825, 0),
9543 CHAN5G(169, 5845, 0),
9544 CHAN5G(173, 5865, 0),
9545 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
9546 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
9547};
9548
9549struct ath10k *ath10k_mac_create(size_t priv_size)
9550{
9551 struct ieee80211_hw *hw;
9552 struct ieee80211_ops *ops;
9553 struct ath10k *ar;
9554
9555 ops = kmemdup(p: &ath10k_ops, size: sizeof(ath10k_ops), GFP_KERNEL);
9556 if (!ops)
9557 return NULL;
9558
9559 hw = ieee80211_alloc_hw(priv_data_len: sizeof(struct ath10k) + priv_size, ops);
9560 if (!hw) {
9561 kfree(objp: ops);
9562 return NULL;
9563 }
9564
9565 ar = hw->priv;
9566 ar->hw = hw;
9567 ar->ops = ops;
9568
9569 return ar;
9570}
9571
9572void ath10k_mac_destroy(struct ath10k *ar)
9573{
9574 struct ieee80211_ops *ops = ar->ops;
9575
9576 ieee80211_free_hw(hw: ar->hw);
9577 kfree(objp: ops);
9578}
9579
9580static const struct ieee80211_iface_limit ath10k_if_limits[] = {
9581 {
9582 .max = 8,
9583 .types = BIT(NL80211_IFTYPE_STATION)
9584 | BIT(NL80211_IFTYPE_P2P_CLIENT)
9585 },
9586 {
9587 .max = 3,
9588 .types = BIT(NL80211_IFTYPE_P2P_GO)
9589 },
9590 {
9591 .max = 1,
9592 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
9593 },
9594 {
9595 .max = 7,
9596 .types = BIT(NL80211_IFTYPE_AP)
9597#ifdef CONFIG_MAC80211_MESH
9598 | BIT(NL80211_IFTYPE_MESH_POINT)
9599#endif
9600 },
9601};
9602
9603static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
9604 {
9605 .max = 8,
9606 .types = BIT(NL80211_IFTYPE_AP)
9607#ifdef CONFIG_MAC80211_MESH
9608 | BIT(NL80211_IFTYPE_MESH_POINT)
9609#endif
9610 },
9611 {
9612 .max = 1,
9613 .types = BIT(NL80211_IFTYPE_STATION)
9614 },
9615};
9616
9617static const struct ieee80211_iface_combination ath10k_if_comb[] = {
9618 {
9619 .limits = ath10k_if_limits,
9620 .n_limits = ARRAY_SIZE(ath10k_if_limits),
9621 .max_interfaces = 8,
9622 .num_different_channels = 1,
9623 .beacon_int_infra_match = true,
9624 },
9625};
9626
9627static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
9628 {
9629 .limits = ath10k_10x_if_limits,
9630 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
9631 .max_interfaces = 8,
9632 .num_different_channels = 1,
9633 .beacon_int_infra_match = true,
9634 .beacon_int_min_gcd = 1,
9635#ifdef CONFIG_ATH10K_DFS_CERTIFIED
9636 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9637 BIT(NL80211_CHAN_WIDTH_20) |
9638 BIT(NL80211_CHAN_WIDTH_40) |
9639 BIT(NL80211_CHAN_WIDTH_80),
9640#endif
9641 },
9642};
9643
9644static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
9645 {
9646 .max = 2,
9647 .types = BIT(NL80211_IFTYPE_STATION),
9648 },
9649 {
9650 .max = 2,
9651 .types = BIT(NL80211_IFTYPE_AP) |
9652#ifdef CONFIG_MAC80211_MESH
9653 BIT(NL80211_IFTYPE_MESH_POINT) |
9654#endif
9655 BIT(NL80211_IFTYPE_P2P_CLIENT) |
9656 BIT(NL80211_IFTYPE_P2P_GO),
9657 },
9658 {
9659 .max = 1,
9660 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
9661 },
9662};
9663
9664static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
9665 {
9666 .max = 2,
9667 .types = BIT(NL80211_IFTYPE_STATION),
9668 },
9669 {
9670 .max = 2,
9671 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
9672 },
9673 {
9674 .max = 1,
9675 .types = BIT(NL80211_IFTYPE_AP) |
9676#ifdef CONFIG_MAC80211_MESH
9677 BIT(NL80211_IFTYPE_MESH_POINT) |
9678#endif
9679 BIT(NL80211_IFTYPE_P2P_GO),
9680 },
9681 {
9682 .max = 1,
9683 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
9684 },
9685};
9686
9687static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
9688 {
9689 .max = 1,
9690 .types = BIT(NL80211_IFTYPE_STATION),
9691 },
9692 {
9693 .max = 1,
9694 .types = BIT(NL80211_IFTYPE_ADHOC),
9695 },
9696};
9697
9698/* FIXME: This is not thoroughly tested. These combinations may over- or
9699 * underestimate hw/fw capabilities.
9700 */
9701static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
9702 {
9703 .limits = ath10k_tlv_if_limit,
9704 .num_different_channels = 1,
9705 .max_interfaces = 4,
9706 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
9707 },
9708 {
9709 .limits = ath10k_tlv_if_limit_ibss,
9710 .num_different_channels = 1,
9711 .max_interfaces = 2,
9712 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
9713 },
9714};
9715
9716static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
9717 {
9718 .limits = ath10k_tlv_if_limit,
9719 .num_different_channels = 1,
9720 .max_interfaces = 4,
9721 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
9722 },
9723 {
9724 .limits = ath10k_tlv_qcs_if_limit,
9725 .num_different_channels = 2,
9726 .max_interfaces = 4,
9727 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
9728 },
9729 {
9730 .limits = ath10k_tlv_if_limit_ibss,
9731 .num_different_channels = 1,
9732 .max_interfaces = 2,
9733 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
9734 },
9735};
9736
9737static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
9738 {
9739 .max = 1,
9740 .types = BIT(NL80211_IFTYPE_STATION),
9741 },
9742 {
9743 .max = 16,
9744 .types = BIT(NL80211_IFTYPE_AP)
9745#ifdef CONFIG_MAC80211_MESH
9746 | BIT(NL80211_IFTYPE_MESH_POINT)
9747#endif
9748 },
9749};
9750
9751static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
9752 {
9753 .limits = ath10k_10_4_if_limits,
9754 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
9755 .max_interfaces = 16,
9756 .num_different_channels = 1,
9757 .beacon_int_infra_match = true,
9758 .beacon_int_min_gcd = 1,
9759#ifdef CONFIG_ATH10K_DFS_CERTIFIED
9760 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9761 BIT(NL80211_CHAN_WIDTH_20) |
9762 BIT(NL80211_CHAN_WIDTH_40) |
9763 BIT(NL80211_CHAN_WIDTH_80) |
9764 BIT(NL80211_CHAN_WIDTH_80P80) |
9765 BIT(NL80211_CHAN_WIDTH_160),
9766#endif
9767 },
9768};
9769
9770static const struct
9771ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
9772 {
9773 .limits = ath10k_10_4_if_limits,
9774 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
9775 .max_interfaces = 16,
9776 .num_different_channels = 1,
9777 .beacon_int_infra_match = true,
9778 .beacon_int_min_gcd = 100,
9779#ifdef CONFIG_ATH10K_DFS_CERTIFIED
9780 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9781 BIT(NL80211_CHAN_WIDTH_20) |
9782 BIT(NL80211_CHAN_WIDTH_40) |
9783 BIT(NL80211_CHAN_WIDTH_80) |
9784 BIT(NL80211_CHAN_WIDTH_80P80) |
9785 BIT(NL80211_CHAN_WIDTH_160),
9786#endif
9787 },
9788};
9789
9790static void ath10k_get_arvif_iter(void *data, u8 *mac,
9791 struct ieee80211_vif *vif)
9792{
9793 struct ath10k_vif_iter *arvif_iter = data;
9794 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9795
9796 if (arvif->vdev_id == arvif_iter->vdev_id)
9797 arvif_iter->arvif = arvif;
9798}
9799
9800struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
9801{
9802 struct ath10k_vif_iter arvif_iter;
9803
9804 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
9805 arvif_iter.vdev_id = vdev_id;
9806
9807 ieee80211_iterate_active_interfaces_atomic(hw: ar->hw,
9808 ATH10K_ITER_RESUME_FLAGS,
9809 iterator: ath10k_get_arvif_iter,
9810 data: &arvif_iter);
9811 if (!arvif_iter.arvif) {
9812 ath10k_warn(ar, fmt: "No VIF found for vdev %d\n", vdev_id);
9813 return NULL;
9814 }
9815
9816 return arvif_iter.arvif;
9817}
9818
9819#define WRD_METHOD "WRDD"
9820#define WRDD_WIFI (0x07)
9821
9822static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
9823{
9824 union acpi_object *mcc_pkg;
9825 union acpi_object *domain_type;
9826 union acpi_object *mcc_value;
9827 u32 i;
9828
9829 if (wrdd->type != ACPI_TYPE_PACKAGE ||
9830 wrdd->package.count < 2 ||
9831 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
9832 wrdd->package.elements[0].integer.value != 0) {
9833 ath10k_warn(ar, fmt: "ignoring malformed/unsupported wrdd structure\n");
9834 return 0;
9835 }
9836
9837 for (i = 1; i < wrdd->package.count; ++i) {
9838 mcc_pkg = &wrdd->package.elements[i];
9839
9840 if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
9841 continue;
9842 if (mcc_pkg->package.count < 2)
9843 continue;
9844 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
9845 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
9846 continue;
9847
9848 domain_type = &mcc_pkg->package.elements[0];
9849 if (domain_type->integer.value != WRDD_WIFI)
9850 continue;
9851
9852 mcc_value = &mcc_pkg->package.elements[1];
9853 return mcc_value->integer.value;
9854 }
9855 return 0;
9856}
9857
9858static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
9859{
9860 acpi_handle root_handle;
9861 acpi_handle handle;
9862 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
9863 acpi_status status;
9864 u32 alpha2_code;
9865 char alpha2[3];
9866
9867 root_handle = ACPI_HANDLE(ar->dev);
9868 if (!root_handle)
9869 return -EOPNOTSUPP;
9870
9871 status = acpi_get_handle(parent: root_handle, pathname: (acpi_string)WRD_METHOD, ret_handle: &handle);
9872 if (ACPI_FAILURE(status)) {
9873 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9874 "failed to get wrd method %d\n", status);
9875 return -EIO;
9876 }
9877
9878 status = acpi_evaluate_object(object: handle, NULL, NULL, return_object_buffer: &wrdd);
9879 if (ACPI_FAILURE(status)) {
9880 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9881 "failed to call wrdc %d\n", status);
9882 return -EIO;
9883 }
9884
9885 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd: wrdd.pointer);
9886 kfree(objp: wrdd.pointer);
9887 if (!alpha2_code)
9888 return -EIO;
9889
9890 alpha2[0] = (alpha2_code >> 8) & 0xff;
9891 alpha2[1] = (alpha2_code >> 0) & 0xff;
9892 alpha2[2] = '\0';
9893
9894 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9895 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
9896
9897 *rd = ath_regd_find_country_by_name(alpha2);
9898 if (*rd == 0xffff)
9899 return -EIO;
9900
9901 *rd |= COUNTRY_ERD_FLAG;
9902 return 0;
9903}
9904
9905static int ath10k_mac_init_rd(struct ath10k *ar)
9906{
9907 int ret;
9908 u16 rd;
9909
9910 ret = ath10k_mac_get_wrdd_regulatory(ar, rd: &rd);
9911 if (ret) {
9912 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9913 "fallback to eeprom programmed regulatory settings\n");
9914 rd = ar->hw_eeprom_rd;
9915 }
9916
9917 ar->ath_common.regulatory.current_rd = rd;
9918 return 0;
9919}
9920
9921int ath10k_mac_register(struct ath10k *ar)
9922{
9923 static const u32 cipher_suites[] = {
9924 WLAN_CIPHER_SUITE_WEP40,
9925 WLAN_CIPHER_SUITE_WEP104,
9926 WLAN_CIPHER_SUITE_TKIP,
9927 WLAN_CIPHER_SUITE_CCMP,
9928
9929 /* Do not add hardware supported ciphers before this line.
9930 * Allow software encryption for all chips. Don't forget to
9931 * update n_cipher_suites below.
9932 */
9933 WLAN_CIPHER_SUITE_AES_CMAC,
9934 WLAN_CIPHER_SUITE_BIP_CMAC_256,
9935 WLAN_CIPHER_SUITE_BIP_GMAC_128,
9936 WLAN_CIPHER_SUITE_BIP_GMAC_256,
9937
9938 /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
9939 * and CCMP-256 in hardware.
9940 */
9941 WLAN_CIPHER_SUITE_GCMP,
9942 WLAN_CIPHER_SUITE_GCMP_256,
9943 WLAN_CIPHER_SUITE_CCMP_256,
9944 };
9945 struct ieee80211_supported_band *band;
9946 void *channels;
9947 int ret;
9948
9949 if (!is_valid_ether_addr(addr: ar->mac_addr)) {
9950 ath10k_warn(ar, fmt: "invalid MAC address; choosing random\n");
9951 eth_random_addr(addr: ar->mac_addr);
9952 }
9953 SET_IEEE80211_PERM_ADDR(hw: ar->hw, addr: ar->mac_addr);
9954
9955 SET_IEEE80211_DEV(hw: ar->hw, dev: ar->dev);
9956
9957 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
9958 ARRAY_SIZE(ath10k_5ghz_channels)) !=
9959 ATH10K_NUM_CHANS);
9960
9961 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
9962 channels = kmemdup(p: ath10k_2ghz_channels,
9963 size: sizeof(ath10k_2ghz_channels),
9964 GFP_KERNEL);
9965 if (!channels) {
9966 ret = -ENOMEM;
9967 goto err_free;
9968 }
9969
9970 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
9971 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
9972 band->channels = channels;
9973
9974 if (ar->hw_params.cck_rate_map_rev2) {
9975 band->n_bitrates = ath10k_g_rates_rev2_size;
9976 band->bitrates = ath10k_g_rates_rev2;
9977 } else {
9978 band->n_bitrates = ath10k_g_rates_size;
9979 band->bitrates = ath10k_g_rates;
9980 }
9981
9982 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
9983 }
9984
9985 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
9986 channels = kmemdup(p: ath10k_5ghz_channels,
9987 size: sizeof(ath10k_5ghz_channels),
9988 GFP_KERNEL);
9989 if (!channels) {
9990 ret = -ENOMEM;
9991 goto err_free;
9992 }
9993
9994 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
9995 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
9996 band->channels = channels;
9997 band->n_bitrates = ath10k_a_rates_size;
9998 band->bitrates = ath10k_a_rates;
9999 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
10000 }
10001
10002 wiphy_read_of_freq_limits(wiphy: ar->hw->wiphy);
10003 ath10k_mac_setup_ht_vht_cap(ar);
10004
10005 ar->hw->wiphy->interface_modes =
10006 BIT(NL80211_IFTYPE_STATION) |
10007 BIT(NL80211_IFTYPE_AP) |
10008 BIT(NL80211_IFTYPE_MESH_POINT);
10009
10010 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
10011 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
10012
10013 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
10014 ar->hw->wiphy->interface_modes |=
10015 BIT(NL80211_IFTYPE_P2P_DEVICE) |
10016 BIT(NL80211_IFTYPE_P2P_CLIENT) |
10017 BIT(NL80211_IFTYPE_P2P_GO);
10018
10019 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
10020
10021 if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
10022 ar->running_fw->fw_file.fw_features)) {
10023 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
10024 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
10025 }
10026
10027 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
10028 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
10029 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
10030 ieee80211_hw_set(ar->hw, AP_LINK_PS);
10031 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
10032 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
10033 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
10034 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
10035 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
10036 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
10037 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
10038 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
10039 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
10040
10041 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
10042 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
10043
10044 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
10045 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
10046
10047 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
10048 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
10049
10050 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
10051 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
10052 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
10053 }
10054
10055 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
10056 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
10057
10058 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
10059 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
10060 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
10061 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
10062 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
10063 ar->hw->wiphy->max_sched_scan_plan_interval =
10064 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
10065 ar->hw->wiphy->max_sched_scan_plan_iterations =
10066 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
10067 ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
10068 }
10069
10070 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
10071 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
10072 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
10073
10074 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
10075
10076 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
10077 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
10078
10079 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
10080 * that userspace (e.g. wpa_supplicant/hostapd) can generate
10081 * correct Probe Responses. This is more of a hack advert..
10082 */
10083 ar->hw->wiphy->probe_resp_offload |=
10084 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
10085 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
10086 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
10087 }
10088
10089 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
10090 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
10091 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
10092 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
10093 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
10094 }
10095
10096 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
10097 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
10098
10099 if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) {
10100 if (ar->wmi.vdev_param->tx_encap_type !=
10101 WMI_VDEV_PARAM_UNSUPPORTED)
10102 ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
10103 }
10104
10105 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
10106 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
10107 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
10108
10109 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
10110 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
10111 NL80211_FEATURE_AP_SCAN;
10112
10113 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
10114
10115 ret = ath10k_wow_init(ar);
10116 if (ret) {
10117 ath10k_warn(ar, fmt: "failed to init wow: %d\n", ret);
10118 goto err_free;
10119 }
10120
10121 wiphy_ext_feature_set(wiphy: ar->hw->wiphy, ftidx: NL80211_EXT_FEATURE_VHT_IBSS);
10122 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10123 ftidx: NL80211_EXT_FEATURE_SET_SCAN_DWELL);
10124 wiphy_ext_feature_set(wiphy: ar->hw->wiphy, ftidx: NL80211_EXT_FEATURE_AQL);
10125
10126 if (ar->hw_params.mcast_frame_registration)
10127 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10128 ftidx: NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
10129
10130 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
10131 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
10132 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10133 ftidx: NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
10134
10135 if (ath10k_peer_stats_enabled(ar) ||
10136 test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
10137 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10138 ftidx: NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
10139
10140 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map))
10141 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10142 ftidx: NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
10143
10144 if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
10145 wiphy_ext_feature_set(wiphy: ar->hw->wiphy,
10146 ftidx: NL80211_EXT_FEATURE_STA_TX_PWR);
10147
10148 if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) {
10149 ar->hw->wiphy->tid_config_support.vif |=
10150 BIT(NL80211_TID_CONFIG_ATTR_NOACK) |
10151 BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) |
10152 BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) |
10153 BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) |
10154 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
10155 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE);
10156
10157 if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
10158 ar->wmi.svc_map)) {
10159 ar->hw->wiphy->tid_config_support.vif |=
10160 BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
10161 }
10162
10163 ar->hw->wiphy->tid_config_support.peer =
10164 ar->hw->wiphy->tid_config_support.vif;
10165 ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT;
10166 } else {
10167 ar->ops->set_tid_config = NULL;
10168 }
10169 /*
10170 * on LL hardware queues are managed entirely by the FW
10171 * so we only advertise to mac we can do the queues thing
10172 */
10173 ar->hw->queues = IEEE80211_MAX_QUEUES;
10174
10175 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
10176 * something that vdev_ids can't reach so that we don't stop the queue
10177 * accidentally.
10178 */
10179 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
10180
10181 switch (ar->running_fw->fw_file.wmi_op_version) {
10182 case ATH10K_FW_WMI_OP_VERSION_MAIN:
10183 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
10184 ar->hw->wiphy->n_iface_combinations =
10185 ARRAY_SIZE(ath10k_if_comb);
10186 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
10187 break;
10188 case ATH10K_FW_WMI_OP_VERSION_TLV:
10189 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
10190 ar->hw->wiphy->iface_combinations =
10191 ath10k_tlv_qcs_if_comb;
10192 ar->hw->wiphy->n_iface_combinations =
10193 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
10194 } else {
10195 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
10196 ar->hw->wiphy->n_iface_combinations =
10197 ARRAY_SIZE(ath10k_tlv_if_comb);
10198 }
10199 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
10200 break;
10201 case ATH10K_FW_WMI_OP_VERSION_10_1:
10202 case ATH10K_FW_WMI_OP_VERSION_10_2:
10203 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
10204 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
10205 ar->hw->wiphy->n_iface_combinations =
10206 ARRAY_SIZE(ath10k_10x_if_comb);
10207 break;
10208 case ATH10K_FW_WMI_OP_VERSION_10_4:
10209 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
10210 ar->hw->wiphy->n_iface_combinations =
10211 ARRAY_SIZE(ath10k_10_4_if_comb);
10212 if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
10213 ar->wmi.svc_map)) {
10214 ar->hw->wiphy->iface_combinations =
10215 ath10k_10_4_bcn_int_if_comb;
10216 ar->hw->wiphy->n_iface_combinations =
10217 ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
10218 }
10219 break;
10220 case ATH10K_FW_WMI_OP_VERSION_UNSET:
10221 case ATH10K_FW_WMI_OP_VERSION_MAX:
10222 WARN_ON(1);
10223 ret = -EINVAL;
10224 goto err_free;
10225 }
10226
10227 if (ar->hw_params.dynamic_sar_support)
10228 ar->hw->wiphy->sar_capa = &ath10k_sar_capa;
10229
10230 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
10231 ar->hw->netdev_features = NETIF_F_HW_CSUM;
10232
10233 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
10234 /* Init ath dfs pattern detector */
10235 ar->ath_common.debug_mask = ATH_DBG_DFS;
10236 ar->dfs_detector = dfs_pattern_detector_init(common: &ar->ath_common,
10237 region: NL80211_DFS_UNSET);
10238
10239 if (!ar->dfs_detector)
10240 ath10k_warn(ar, fmt: "failed to initialise DFS pattern detector\n");
10241 }
10242
10243 ret = ath10k_mac_init_rd(ar);
10244 if (ret) {
10245 ath10k_err(ar, fmt: "failed to derive regdom: %d\n", ret);
10246 goto err_dfs_detector_exit;
10247 }
10248
10249 /* Disable set_coverage_class for chipsets that do not support it. */
10250 if (!ar->hw_params.hw_ops->set_coverage_class)
10251 ar->ops->set_coverage_class = NULL;
10252
10253 ret = ath_regd_init(reg: &ar->ath_common.regulatory, wiphy: ar->hw->wiphy,
10254 reg_notifier: ath10k_reg_notifier);
10255 if (ret) {
10256 ath10k_err(ar, fmt: "failed to initialise regulatory: %i\n", ret);
10257 goto err_dfs_detector_exit;
10258 }
10259
10260 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
10261 ar->hw->wiphy->features |=
10262 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
10263 }
10264
10265 ar->hw->wiphy->cipher_suites = cipher_suites;
10266
10267 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
10268 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
10269 * from chip specific hw_param table.
10270 */
10271 if (!ar->hw_params.n_cipher_suites ||
10272 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
10273 ath10k_err(ar, fmt: "invalid hw_params.n_cipher_suites %d\n",
10274 ar->hw_params.n_cipher_suites);
10275 ar->hw_params.n_cipher_suites = 8;
10276 }
10277 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
10278
10279 wiphy_ext_feature_set(wiphy: ar->hw->wiphy, ftidx: NL80211_EXT_FEATURE_CQM_RSSI_LIST);
10280
10281 ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
10282
10283 ret = ieee80211_register_hw(hw: ar->hw);
10284 if (ret) {
10285 ath10k_err(ar, fmt: "failed to register ieee80211: %d\n", ret);
10286 goto err_dfs_detector_exit;
10287 }
10288
10289 if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) {
10290 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
10291 ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
10292 }
10293
10294 if (!ath_is_world_regd(reg: &ar->ath_common.reg_world_copy) &&
10295 !ath_is_world_regd(reg: &ar->ath_common.regulatory)) {
10296 ret = regulatory_hint(wiphy: ar->hw->wiphy,
10297 alpha2: ar->ath_common.regulatory.alpha2);
10298 if (ret)
10299 goto err_unregister;
10300 }
10301
10302 return 0;
10303
10304err_unregister:
10305 ieee80211_unregister_hw(hw: ar->hw);
10306
10307err_dfs_detector_exit:
10308 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
10309 ar->dfs_detector->exit(ar->dfs_detector);
10310
10311err_free:
10312 kfree(objp: ar->mac.sbands[NL80211_BAND_2GHZ].channels);
10313 kfree(objp: ar->mac.sbands[NL80211_BAND_5GHZ].channels);
10314
10315 SET_IEEE80211_DEV(hw: ar->hw, NULL);
10316 return ret;
10317}
10318
10319void ath10k_mac_unregister(struct ath10k *ar)
10320{
10321 ieee80211_unregister_hw(hw: ar->hw);
10322
10323 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
10324 ar->dfs_detector->exit(ar->dfs_detector);
10325
10326 kfree(objp: ar->mac.sbands[NL80211_BAND_2GHZ].channels);
10327 kfree(objp: ar->mac.sbands[NL80211_BAND_5GHZ].channels);
10328
10329 SET_IEEE80211_DEV(hw: ar->hw, NULL);
10330}
10331

source code of linux/drivers/net/wireless/ath/ath10k/mac.c