1 | // SPDX-License-Identifier: BSD-3-Clause-Clear |
2 | /* |
3 | * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. |
4 | * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #include <linux/ieee80211.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/skbuff.h> |
10 | #include <crypto/hash.h> |
11 | #include "core.h" |
12 | #include "debug.h" |
13 | #include "hal_desc.h" |
14 | #include "hw.h" |
15 | #include "dp_rx.h" |
16 | #include "hal_rx.h" |
17 | #include "dp_tx.h" |
18 | #include "peer.h" |
19 | #include "dp_mon.h" |
20 | |
21 | #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) |
22 | |
23 | static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, |
24 | struct hal_rx_desc *desc) |
25 | { |
26 | if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) |
27 | return HAL_ENCRYPT_TYPE_OPEN; |
28 | |
29 | return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); |
30 | } |
31 | |
32 | u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, |
33 | struct hal_rx_desc *desc) |
34 | { |
35 | return ab->hal_rx_ops->rx_desc_get_decap_type(desc); |
36 | } |
37 | |
38 | static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, |
39 | struct hal_rx_desc *desc) |
40 | { |
41 | return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); |
42 | } |
43 | |
44 | static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, |
45 | struct hal_rx_desc *desc) |
46 | { |
47 | return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); |
48 | } |
49 | |
50 | static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, |
51 | struct hal_rx_desc *desc) |
52 | { |
53 | return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); |
54 | } |
55 | |
56 | static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, |
57 | struct sk_buff *skb) |
58 | { |
59 | struct ieee80211_hdr *hdr; |
60 | |
61 | hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); |
62 | return ieee80211_has_morefrags(fc: hdr->frame_control); |
63 | } |
64 | |
65 | static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, |
66 | struct sk_buff *skb) |
67 | { |
68 | struct ieee80211_hdr *hdr; |
69 | |
70 | hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); |
71 | return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
72 | } |
73 | |
74 | static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, |
75 | struct hal_rx_desc *desc) |
76 | { |
77 | return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); |
78 | } |
79 | |
80 | static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, |
81 | struct hal_rx_desc *desc) |
82 | { |
83 | return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); |
84 | } |
85 | |
86 | static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, |
87 | struct hal_rx_desc *desc) |
88 | { |
89 | return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); |
90 | } |
91 | |
92 | static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, |
93 | struct hal_rx_desc *desc) |
94 | { |
95 | return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); |
96 | } |
97 | |
98 | static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, |
99 | struct hal_rx_desc *desc) |
100 | { |
101 | return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); |
102 | } |
103 | |
104 | u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, |
105 | struct hal_rx_desc *desc) |
106 | { |
107 | return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); |
108 | } |
109 | |
110 | static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, |
111 | struct hal_rx_desc *desc) |
112 | { |
113 | return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); |
114 | } |
115 | |
116 | static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, |
117 | struct hal_rx_desc *desc) |
118 | { |
119 | return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); |
120 | } |
121 | |
122 | static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, |
123 | struct hal_rx_desc *desc) |
124 | { |
125 | return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); |
126 | } |
127 | |
128 | static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, |
129 | struct hal_rx_desc *desc) |
130 | { |
131 | return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); |
132 | } |
133 | |
134 | static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, |
135 | struct hal_rx_desc *desc) |
136 | { |
137 | return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); |
138 | } |
139 | |
140 | static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, |
141 | struct hal_rx_desc *desc) |
142 | { |
143 | return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); |
144 | } |
145 | |
146 | static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, |
147 | struct hal_rx_desc *desc) |
148 | { |
149 | return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); |
150 | } |
151 | |
152 | static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, |
153 | struct hal_rx_desc *desc) |
154 | { |
155 | return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); |
156 | } |
157 | |
158 | static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, |
159 | struct hal_rx_desc *desc) |
160 | { |
161 | return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); |
162 | } |
163 | |
164 | u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, |
165 | struct hal_rx_desc *desc) |
166 | { |
167 | return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); |
168 | } |
169 | |
170 | static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, |
171 | struct hal_rx_desc *desc) |
172 | { |
173 | return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); |
174 | } |
175 | |
176 | static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, |
177 | struct hal_rx_desc *desc) |
178 | { |
179 | return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); |
180 | } |
181 | |
182 | static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, |
183 | struct hal_rx_desc *fdesc, |
184 | struct hal_rx_desc *ldesc) |
185 | { |
186 | ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); |
187 | } |
188 | |
189 | static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, |
190 | struct hal_rx_desc *desc, |
191 | u16 len) |
192 | { |
193 | ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); |
194 | } |
195 | |
196 | static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, |
197 | struct hal_rx_desc *desc) |
198 | { |
199 | return (ath12k_dp_rx_h_first_msdu(ab, desc) && |
200 | ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); |
201 | } |
202 | |
203 | static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, |
204 | struct hal_rx_desc *desc) |
205 | { |
206 | return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); |
207 | } |
208 | |
209 | static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, |
210 | struct hal_rx_desc *desc) |
211 | { |
212 | return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); |
213 | } |
214 | |
215 | static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, |
216 | struct hal_rx_desc *desc, |
217 | struct ieee80211_hdr *hdr) |
218 | { |
219 | ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); |
220 | } |
221 | |
222 | static void (struct ath12k_base *ab, |
223 | struct hal_rx_desc *desc, |
224 | u8 *crypto_hdr, |
225 | enum hal_encrypt_type enctype) |
226 | { |
227 | ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); |
228 | } |
229 | |
230 | static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab, |
231 | struct hal_rx_desc *desc) |
232 | { |
233 | return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc); |
234 | } |
235 | |
236 | static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, |
237 | struct hal_rx_desc *desc) |
238 | { |
239 | return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); |
240 | } |
241 | |
242 | static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab) |
243 | { |
244 | int i, reaped = 0; |
245 | unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); |
246 | |
247 | do { |
248 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) |
249 | reaped += ath12k_dp_mon_process_ring(ab, mac_id: i, NULL, |
250 | DP_MON_SERVICE_BUDGET, |
251 | monitor_mode: ATH12K_DP_RX_MONITOR_MODE); |
252 | |
253 | /* nothing more to reap */ |
254 | if (reaped < DP_MON_SERVICE_BUDGET) |
255 | return 0; |
256 | |
257 | } while (time_before(jiffies, timeout)); |
258 | |
259 | ath12k_warn(ab, fmt: "dp mon ring purge timeout" ); |
260 | |
261 | return -ETIMEDOUT; |
262 | } |
263 | |
264 | /* Returns number of Rx buffers replenished */ |
265 | int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, |
266 | struct dp_rxdma_ring *rx_ring, |
267 | int req_entries) |
268 | { |
269 | struct ath12k_buffer_addr *desc; |
270 | struct hal_srng *srng; |
271 | struct sk_buff *skb; |
272 | int num_free; |
273 | int num_remain; |
274 | u32 cookie; |
275 | dma_addr_t paddr; |
276 | struct ath12k_dp *dp = &ab->dp; |
277 | struct ath12k_rx_desc_info *rx_desc; |
278 | enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; |
279 | |
280 | req_entries = min(req_entries, rx_ring->bufs_max); |
281 | |
282 | srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; |
283 | |
284 | spin_lock_bh(lock: &srng->lock); |
285 | |
286 | ath12k_hal_srng_access_begin(ab, srng); |
287 | |
288 | num_free = ath12k_hal_srng_src_num_free(ab, srng, sync_hw_ptr: true); |
289 | if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) |
290 | req_entries = num_free; |
291 | |
292 | req_entries = min(num_free, req_entries); |
293 | num_remain = req_entries; |
294 | |
295 | while (num_remain > 0) { |
296 | skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + |
297 | DP_RX_BUFFER_ALIGN_SIZE); |
298 | if (!skb) |
299 | break; |
300 | |
301 | if (!IS_ALIGNED((unsigned long)skb->data, |
302 | DP_RX_BUFFER_ALIGN_SIZE)) { |
303 | skb_pull(skb, |
304 | PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - |
305 | skb->data); |
306 | } |
307 | |
308 | paddr = dma_map_single(ab->dev, skb->data, |
309 | skb->len + skb_tailroom(skb), |
310 | DMA_FROM_DEVICE); |
311 | if (dma_mapping_error(dev: ab->dev, dma_addr: paddr)) |
312 | goto fail_free_skb; |
313 | |
314 | spin_lock_bh(lock: &dp->rx_desc_lock); |
315 | |
316 | /* Get desc from free list and store in used list |
317 | * for cleanup purposes |
318 | * |
319 | * TODO: pass the removed descs rather than |
320 | * add/read to optimize |
321 | */ |
322 | rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list, |
323 | struct ath12k_rx_desc_info, |
324 | list); |
325 | if (!rx_desc) { |
326 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
327 | goto fail_dma_unmap; |
328 | } |
329 | |
330 | rx_desc->skb = skb; |
331 | cookie = rx_desc->cookie; |
332 | list_del(entry: &rx_desc->list); |
333 | list_add_tail(new: &rx_desc->list, head: &dp->rx_desc_used_list); |
334 | |
335 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
336 | |
337 | desc = ath12k_hal_srng_src_get_next_entry(ab, srng); |
338 | if (!desc) |
339 | goto fail_buf_unassign; |
340 | |
341 | ATH12K_SKB_RXCB(skb)->paddr = paddr; |
342 | |
343 | num_remain--; |
344 | |
345 | ath12k_hal_rx_buf_addr_info_set(binfo: desc, paddr, cookie, manager: mgr); |
346 | } |
347 | |
348 | ath12k_hal_srng_access_end(ab, srng); |
349 | |
350 | spin_unlock_bh(lock: &srng->lock); |
351 | |
352 | return req_entries - num_remain; |
353 | |
354 | fail_buf_unassign: |
355 | spin_lock_bh(lock: &dp->rx_desc_lock); |
356 | list_del(entry: &rx_desc->list); |
357 | list_add_tail(new: &rx_desc->list, head: &dp->rx_desc_free_list); |
358 | rx_desc->skb = NULL; |
359 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
360 | fail_dma_unmap: |
361 | dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), |
362 | DMA_FROM_DEVICE); |
363 | fail_free_skb: |
364 | dev_kfree_skb_any(skb); |
365 | |
366 | ath12k_hal_srng_access_end(ab, srng); |
367 | |
368 | spin_unlock_bh(lock: &srng->lock); |
369 | |
370 | return req_entries - num_remain; |
371 | } |
372 | |
373 | static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, |
374 | struct dp_rxdma_mon_ring *rx_ring) |
375 | { |
376 | struct sk_buff *skb; |
377 | int buf_id; |
378 | |
379 | spin_lock_bh(lock: &rx_ring->idr_lock); |
380 | idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { |
381 | idr_remove(&rx_ring->bufs_idr, id: buf_id); |
382 | /* TODO: Understand where internal driver does this dma_unmap |
383 | * of rxdma_buffer. |
384 | */ |
385 | dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, |
386 | skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); |
387 | dev_kfree_skb_any(skb); |
388 | } |
389 | |
390 | idr_destroy(&rx_ring->bufs_idr); |
391 | spin_unlock_bh(lock: &rx_ring->idr_lock); |
392 | |
393 | return 0; |
394 | } |
395 | |
396 | static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) |
397 | { |
398 | struct ath12k_dp *dp = &ab->dp; |
399 | |
400 | ath12k_dp_rxdma_mon_buf_ring_free(ab, rx_ring: &dp->rxdma_mon_buf_ring); |
401 | |
402 | ath12k_dp_rxdma_mon_buf_ring_free(ab, rx_ring: &dp->tx_mon_buf_ring); |
403 | |
404 | return 0; |
405 | } |
406 | |
407 | static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, |
408 | struct dp_rxdma_mon_ring *rx_ring, |
409 | u32 ringtype) |
410 | { |
411 | int num_entries; |
412 | |
413 | num_entries = rx_ring->refill_buf_ring.size / |
414 | ath12k_hal_srng_get_entrysize(ab, ring_type: ringtype); |
415 | |
416 | rx_ring->bufs_max = num_entries; |
417 | ath12k_dp_mon_buf_replenish(ab, buf_ring: rx_ring, req_entries: num_entries); |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, |
423 | struct dp_rxdma_ring *rx_ring) |
424 | { |
425 | int num_entries; |
426 | |
427 | num_entries = rx_ring->refill_buf_ring.size / |
428 | ath12k_hal_srng_get_entrysize(ab, ring_type: HAL_RXDMA_BUF); |
429 | |
430 | rx_ring->bufs_max = num_entries; |
431 | ath12k_dp_rx_bufs_replenish(ab, rx_ring, req_entries: num_entries); |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) |
437 | { |
438 | struct ath12k_dp *dp = &ab->dp; |
439 | int ret; |
440 | |
441 | ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring: &dp->rx_refill_buf_ring); |
442 | if (ret) { |
443 | ath12k_warn(ab, |
444 | fmt: "failed to setup HAL_RXDMA_BUF\n" ); |
445 | return ret; |
446 | } |
447 | |
448 | if (ab->hw_params->rxdma1_enable) { |
449 | ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, |
450 | rx_ring: &dp->rxdma_mon_buf_ring, |
451 | ringtype: HAL_RXDMA_MONITOR_BUF); |
452 | if (ret) { |
453 | ath12k_warn(ab, |
454 | fmt: "failed to setup HAL_RXDMA_MONITOR_BUF\n" ); |
455 | return ret; |
456 | } |
457 | |
458 | ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, |
459 | rx_ring: &dp->tx_mon_buf_ring, |
460 | ringtype: HAL_TX_MONITOR_BUF); |
461 | if (ret) { |
462 | ath12k_warn(ab, |
463 | fmt: "failed to setup HAL_TX_MONITOR_BUF\n" ); |
464 | return ret; |
465 | } |
466 | } |
467 | |
468 | return 0; |
469 | } |
470 | |
471 | static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) |
472 | { |
473 | struct ath12k_pdev_dp *dp = &ar->dp; |
474 | struct ath12k_base *ab = ar->ab; |
475 | int i; |
476 | |
477 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
478 | ath12k_dp_srng_cleanup(ab, ring: &dp->rxdma_mon_dst_ring[i]); |
479 | ath12k_dp_srng_cleanup(ab, ring: &dp->tx_mon_dst_ring[i]); |
480 | } |
481 | } |
482 | |
483 | void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) |
484 | { |
485 | struct ath12k_dp *dp = &ab->dp; |
486 | int i; |
487 | |
488 | for (i = 0; i < DP_REO_DST_RING_MAX; i++) |
489 | ath12k_dp_srng_cleanup(ab, ring: &dp->reo_dst_ring[i]); |
490 | } |
491 | |
492 | int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) |
493 | { |
494 | struct ath12k_dp *dp = &ab->dp; |
495 | int ret; |
496 | int i; |
497 | |
498 | for (i = 0; i < DP_REO_DST_RING_MAX; i++) { |
499 | ret = ath12k_dp_srng_setup(ab, ring: &dp->reo_dst_ring[i], |
500 | type: HAL_REO_DST, ring_num: i, mac_id: 0, |
501 | DP_REO_DST_RING_SIZE); |
502 | if (ret) { |
503 | ath12k_warn(ab, fmt: "failed to setup reo_dst_ring\n" ); |
504 | goto err_reo_cleanup; |
505 | } |
506 | } |
507 | |
508 | return 0; |
509 | |
510 | err_reo_cleanup: |
511 | ath12k_dp_rx_pdev_reo_cleanup(ab); |
512 | |
513 | return ret; |
514 | } |
515 | |
516 | static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) |
517 | { |
518 | struct ath12k_pdev_dp *dp = &ar->dp; |
519 | struct ath12k_base *ab = ar->ab; |
520 | int i; |
521 | int ret; |
522 | u32 mac_id = dp->mac_id; |
523 | |
524 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
525 | ret = ath12k_dp_srng_setup(ab: ar->ab, |
526 | ring: &dp->rxdma_mon_dst_ring[i], |
527 | type: HAL_RXDMA_MONITOR_DST, |
528 | ring_num: 0, mac_id: mac_id + i, |
529 | DP_RXDMA_MONITOR_DST_RING_SIZE); |
530 | if (ret) { |
531 | ath12k_warn(ab: ar->ab, |
532 | fmt: "failed to setup HAL_RXDMA_MONITOR_DST\n" ); |
533 | return ret; |
534 | } |
535 | |
536 | ret = ath12k_dp_srng_setup(ab: ar->ab, |
537 | ring: &dp->tx_mon_dst_ring[i], |
538 | type: HAL_TX_MONITOR_DST, |
539 | ring_num: 0, mac_id: mac_id + i, |
540 | DP_TX_MONITOR_DEST_RING_SIZE); |
541 | if (ret) { |
542 | ath12k_warn(ab: ar->ab, |
543 | fmt: "failed to setup HAL_TX_MONITOR_DST\n" ); |
544 | return ret; |
545 | } |
546 | } |
547 | |
548 | return 0; |
549 | } |
550 | |
551 | void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) |
552 | { |
553 | struct ath12k_dp *dp = &ab->dp; |
554 | struct ath12k_dp_rx_reo_cmd *cmd, *tmp; |
555 | struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; |
556 | |
557 | spin_lock_bh(lock: &dp->reo_cmd_lock); |
558 | list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { |
559 | list_del(entry: &cmd->list); |
560 | dma_unmap_single(ab->dev, cmd->data.paddr, |
561 | cmd->data.size, DMA_BIDIRECTIONAL); |
562 | kfree(objp: cmd->data.vaddr); |
563 | kfree(objp: cmd); |
564 | } |
565 | |
566 | list_for_each_entry_safe(cmd_cache, tmp_cache, |
567 | &dp->reo_cmd_cache_flush_list, list) { |
568 | list_del(entry: &cmd_cache->list); |
569 | dp->reo_cmd_cache_flush_count--; |
570 | dma_unmap_single(ab->dev, cmd_cache->data.paddr, |
571 | cmd_cache->data.size, DMA_BIDIRECTIONAL); |
572 | kfree(objp: cmd_cache->data.vaddr); |
573 | kfree(objp: cmd_cache); |
574 | } |
575 | spin_unlock_bh(lock: &dp->reo_cmd_lock); |
576 | } |
577 | |
578 | static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, |
579 | enum hal_reo_cmd_status status) |
580 | { |
581 | struct ath12k_dp_rx_tid *rx_tid = ctx; |
582 | |
583 | if (status != HAL_REO_CMD_SUCCESS) |
584 | ath12k_warn(ab: dp->ab, fmt: "failed to flush rx tid hw desc, tid %d status %d\n" , |
585 | rx_tid->tid, status); |
586 | |
587 | dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, |
588 | DMA_BIDIRECTIONAL); |
589 | kfree(objp: rx_tid->vaddr); |
590 | rx_tid->vaddr = NULL; |
591 | } |
592 | |
593 | static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, |
594 | enum hal_reo_cmd_type type, |
595 | struct ath12k_hal_reo_cmd *cmd, |
596 | void (*cb)(struct ath12k_dp *dp, void *ctx, |
597 | enum hal_reo_cmd_status status)) |
598 | { |
599 | struct ath12k_dp *dp = &ab->dp; |
600 | struct ath12k_dp_rx_reo_cmd *dp_cmd; |
601 | struct hal_srng *cmd_ring; |
602 | int cmd_num; |
603 | |
604 | cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; |
605 | cmd_num = ath12k_hal_reo_cmd_send(ab, srng: cmd_ring, type, cmd); |
606 | |
607 | /* cmd_num should start from 1, during failure return the error code */ |
608 | if (cmd_num < 0) |
609 | return cmd_num; |
610 | |
611 | /* reo cmd ring descriptors has cmd_num starting from 1 */ |
612 | if (cmd_num == 0) |
613 | return -EINVAL; |
614 | |
615 | if (!cb) |
616 | return 0; |
617 | |
618 | /* Can this be optimized so that we keep the pending command list only |
619 | * for tid delete command to free up the resource on the command status |
620 | * indication? |
621 | */ |
622 | dp_cmd = kzalloc(size: sizeof(*dp_cmd), GFP_ATOMIC); |
623 | |
624 | if (!dp_cmd) |
625 | return -ENOMEM; |
626 | |
627 | memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); |
628 | dp_cmd->cmd_num = cmd_num; |
629 | dp_cmd->handler = cb; |
630 | |
631 | spin_lock_bh(lock: &dp->reo_cmd_lock); |
632 | list_add_tail(new: &dp_cmd->list, head: &dp->reo_cmd_list); |
633 | spin_unlock_bh(lock: &dp->reo_cmd_lock); |
634 | |
635 | return 0; |
636 | } |
637 | |
638 | static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, |
639 | struct ath12k_dp_rx_tid *rx_tid) |
640 | { |
641 | struct ath12k_hal_reo_cmd cmd = {0}; |
642 | unsigned long tot_desc_sz, desc_sz; |
643 | int ret; |
644 | |
645 | tot_desc_sz = rx_tid->size; |
646 | desc_sz = ath12k_hal_reo_qdesc_size(ba_window_size: 0, HAL_DESC_REO_NON_QOS_TID); |
647 | |
648 | while (tot_desc_sz > desc_sz) { |
649 | tot_desc_sz -= desc_sz; |
650 | cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); |
651 | cmd.addr_hi = upper_32_bits(rx_tid->paddr); |
652 | ret = ath12k_dp_reo_cmd_send(ab, rx_tid, |
653 | type: HAL_REO_CMD_FLUSH_CACHE, cmd: &cmd, |
654 | NULL); |
655 | if (ret) |
656 | ath12k_warn(ab, |
657 | fmt: "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n" , |
658 | rx_tid->tid, ret); |
659 | } |
660 | |
661 | memset(&cmd, 0, sizeof(cmd)); |
662 | cmd.addr_lo = lower_32_bits(rx_tid->paddr); |
663 | cmd.addr_hi = upper_32_bits(rx_tid->paddr); |
664 | cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; |
665 | ret = ath12k_dp_reo_cmd_send(ab, rx_tid, |
666 | type: HAL_REO_CMD_FLUSH_CACHE, |
667 | cmd: &cmd, cb: ath12k_dp_reo_cmd_free); |
668 | if (ret) { |
669 | ath12k_err(ab, fmt: "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n" , |
670 | rx_tid->tid, ret); |
671 | dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, |
672 | DMA_BIDIRECTIONAL); |
673 | kfree(objp: rx_tid->vaddr); |
674 | rx_tid->vaddr = NULL; |
675 | } |
676 | } |
677 | |
678 | static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, |
679 | enum hal_reo_cmd_status status) |
680 | { |
681 | struct ath12k_base *ab = dp->ab; |
682 | struct ath12k_dp_rx_tid *rx_tid = ctx; |
683 | struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; |
684 | |
685 | if (status == HAL_REO_CMD_DRAIN) { |
686 | goto free_desc; |
687 | } else if (status != HAL_REO_CMD_SUCCESS) { |
688 | /* Shouldn't happen! Cleanup in case of other failure? */ |
689 | ath12k_warn(ab, fmt: "failed to delete rx tid %d hw descriptor %d\n" , |
690 | rx_tid->tid, status); |
691 | return; |
692 | } |
693 | |
694 | elem = kzalloc(size: sizeof(*elem), GFP_ATOMIC); |
695 | if (!elem) |
696 | goto free_desc; |
697 | |
698 | elem->ts = jiffies; |
699 | memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); |
700 | |
701 | spin_lock_bh(lock: &dp->reo_cmd_lock); |
702 | list_add_tail(new: &elem->list, head: &dp->reo_cmd_cache_flush_list); |
703 | dp->reo_cmd_cache_flush_count++; |
704 | |
705 | /* Flush and invalidate aged REO desc from HW cache */ |
706 | list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, |
707 | list) { |
708 | if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || |
709 | time_after(jiffies, elem->ts + |
710 | msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { |
711 | list_del(entry: &elem->list); |
712 | dp->reo_cmd_cache_flush_count--; |
713 | |
714 | /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() |
715 | * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list |
716 | * is used in only two contexts, one is in this function called |
717 | * from napi and the other in ath12k_dp_free during core destroy. |
718 | * Before dp_free, the irqs would be disabled and would wait to |
719 | * synchronize. Hence there wouldn’t be any race against add or |
720 | * delete to this list. Hence unlock-lock is safe here. |
721 | */ |
722 | spin_unlock_bh(lock: &dp->reo_cmd_lock); |
723 | |
724 | ath12k_dp_reo_cache_flush(ab, rx_tid: &elem->data); |
725 | kfree(objp: elem); |
726 | spin_lock_bh(lock: &dp->reo_cmd_lock); |
727 | } |
728 | } |
729 | spin_unlock_bh(lock: &dp->reo_cmd_lock); |
730 | |
731 | return; |
732 | free_desc: |
733 | dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, |
734 | DMA_BIDIRECTIONAL); |
735 | kfree(objp: rx_tid->vaddr); |
736 | rx_tid->vaddr = NULL; |
737 | } |
738 | |
739 | static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, |
740 | dma_addr_t paddr) |
741 | { |
742 | struct ath12k_reo_queue_ref *qref; |
743 | struct ath12k_dp *dp = &ab->dp; |
744 | |
745 | if (!ab->hw_params->reoq_lut_support) |
746 | return; |
747 | |
748 | /* TODO: based on ML peer or not, select the LUT. below assumes non |
749 | * ML peer |
750 | */ |
751 | qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + |
752 | (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); |
753 | |
754 | qref->info0 = u32_encode_bits(lower_32_bits(paddr), |
755 | BUFFER_ADDR_INFO0_ADDR); |
756 | qref->info1 = u32_encode_bits(upper_32_bits(paddr), |
757 | BUFFER_ADDR_INFO1_ADDR) | |
758 | u32_encode_bits(v: tid, DP_REO_QREF_NUM); |
759 | } |
760 | |
761 | static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) |
762 | { |
763 | struct ath12k_reo_queue_ref *qref; |
764 | struct ath12k_dp *dp = &ab->dp; |
765 | |
766 | if (!ab->hw_params->reoq_lut_support) |
767 | return; |
768 | |
769 | /* TODO: based on ML peer or not, select the LUT. below assumes non |
770 | * ML peer |
771 | */ |
772 | qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + |
773 | (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); |
774 | |
775 | qref->info0 = u32_encode_bits(v: 0, BUFFER_ADDR_INFO0_ADDR); |
776 | qref->info1 = u32_encode_bits(v: 0, BUFFER_ADDR_INFO1_ADDR) | |
777 | u32_encode_bits(v: tid, DP_REO_QREF_NUM); |
778 | } |
779 | |
780 | void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, |
781 | struct ath12k_peer *peer, u8 tid) |
782 | { |
783 | struct ath12k_hal_reo_cmd cmd = {0}; |
784 | struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; |
785 | int ret; |
786 | |
787 | if (!rx_tid->active) |
788 | return; |
789 | |
790 | cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; |
791 | cmd.addr_lo = lower_32_bits(rx_tid->paddr); |
792 | cmd.addr_hi = upper_32_bits(rx_tid->paddr); |
793 | cmd.upd0 = HAL_REO_CMD_UPD0_VLD; |
794 | ret = ath12k_dp_reo_cmd_send(ab: ar->ab, rx_tid, |
795 | type: HAL_REO_CMD_UPDATE_RX_QUEUE, cmd: &cmd, |
796 | cb: ath12k_dp_rx_tid_del_func); |
797 | if (ret) { |
798 | ath12k_err(ab: ar->ab, fmt: "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n" , |
799 | tid, ret); |
800 | dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, |
801 | DMA_BIDIRECTIONAL); |
802 | kfree(objp: rx_tid->vaddr); |
803 | rx_tid->vaddr = NULL; |
804 | } |
805 | |
806 | ath12k_peer_rx_tid_qref_reset(ab: ar->ab, peer_id: peer->peer_id, tid); |
807 | |
808 | rx_tid->active = false; |
809 | } |
810 | |
811 | /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted |
812 | * to struct hal_wbm_release_ring, I couldn't figure out the logic behind |
813 | * that. |
814 | */ |
815 | static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, |
816 | struct hal_reo_dest_ring *ring, |
817 | enum hal_wbm_rel_bm_act action) |
818 | { |
819 | struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring; |
820 | struct hal_wbm_release_ring *desc; |
821 | struct ath12k_dp *dp = &ab->dp; |
822 | struct hal_srng *srng; |
823 | int ret = 0; |
824 | |
825 | srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; |
826 | |
827 | spin_lock_bh(lock: &srng->lock); |
828 | |
829 | ath12k_hal_srng_access_begin(ab, srng); |
830 | |
831 | desc = ath12k_hal_srng_src_get_next_entry(ab, srng); |
832 | if (!desc) { |
833 | ret = -ENOBUFS; |
834 | goto exit; |
835 | } |
836 | |
837 | ath12k_hal_rx_msdu_link_desc_set(ab, dst_desc: desc, src_desc: link_desc, action); |
838 | |
839 | exit: |
840 | ath12k_hal_srng_access_end(ab, srng); |
841 | |
842 | spin_unlock_bh(lock: &srng->lock); |
843 | |
844 | return ret; |
845 | } |
846 | |
847 | static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, |
848 | bool rel_link_desc) |
849 | { |
850 | struct ath12k_base *ab = rx_tid->ab; |
851 | |
852 | lockdep_assert_held(&ab->base_lock); |
853 | |
854 | if (rx_tid->dst_ring_desc) { |
855 | if (rel_link_desc) |
856 | ath12k_dp_rx_link_desc_return(ab, ring: rx_tid->dst_ring_desc, |
857 | action: HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); |
858 | kfree(objp: rx_tid->dst_ring_desc); |
859 | rx_tid->dst_ring_desc = NULL; |
860 | } |
861 | |
862 | rx_tid->cur_sn = 0; |
863 | rx_tid->last_frag_no = 0; |
864 | rx_tid->rx_frag_bitmap = 0; |
865 | __skb_queue_purge(list: &rx_tid->rx_frags); |
866 | } |
867 | |
868 | void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) |
869 | { |
870 | struct ath12k_dp_rx_tid *rx_tid; |
871 | int i; |
872 | |
873 | lockdep_assert_held(&ar->ab->base_lock); |
874 | |
875 | for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { |
876 | rx_tid = &peer->rx_tid[i]; |
877 | |
878 | ath12k_dp_rx_peer_tid_delete(ar, peer, tid: i); |
879 | ath12k_dp_rx_frags_cleanup(rx_tid, rel_link_desc: true); |
880 | |
881 | spin_unlock_bh(lock: &ar->ab->base_lock); |
882 | del_timer_sync(timer: &rx_tid->frag_timer); |
883 | spin_lock_bh(lock: &ar->ab->base_lock); |
884 | } |
885 | } |
886 | |
887 | static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, |
888 | struct ath12k_peer *peer, |
889 | struct ath12k_dp_rx_tid *rx_tid, |
890 | u32 ba_win_sz, u16 ssn, |
891 | bool update_ssn) |
892 | { |
893 | struct ath12k_hal_reo_cmd cmd = {0}; |
894 | int ret; |
895 | |
896 | cmd.addr_lo = lower_32_bits(rx_tid->paddr); |
897 | cmd.addr_hi = upper_32_bits(rx_tid->paddr); |
898 | cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; |
899 | cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; |
900 | cmd.ba_window_size = ba_win_sz; |
901 | |
902 | if (update_ssn) { |
903 | cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; |
904 | cmd.upd2 = u32_encode_bits(v: ssn, HAL_REO_CMD_UPD2_SSN); |
905 | } |
906 | |
907 | ret = ath12k_dp_reo_cmd_send(ab: ar->ab, rx_tid, |
908 | type: HAL_REO_CMD_UPDATE_RX_QUEUE, cmd: &cmd, |
909 | NULL); |
910 | if (ret) { |
911 | ath12k_warn(ab: ar->ab, fmt: "failed to update rx tid queue, tid %d (%d)\n" , |
912 | rx_tid->tid, ret); |
913 | return ret; |
914 | } |
915 | |
916 | rx_tid->ba_win_sz = ba_win_sz; |
917 | |
918 | return 0; |
919 | } |
920 | |
921 | int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, |
922 | u8 tid, u32 ba_win_sz, u16 ssn, |
923 | enum hal_pn_type pn_type) |
924 | { |
925 | struct ath12k_base *ab = ar->ab; |
926 | struct ath12k_dp *dp = &ab->dp; |
927 | struct hal_rx_reo_queue *addr_aligned; |
928 | struct ath12k_peer *peer; |
929 | struct ath12k_dp_rx_tid *rx_tid; |
930 | u32 hw_desc_sz; |
931 | void *vaddr; |
932 | dma_addr_t paddr; |
933 | int ret; |
934 | |
935 | spin_lock_bh(lock: &ab->base_lock); |
936 | |
937 | peer = ath12k_peer_find(ab, vdev_id, addr: peer_mac); |
938 | if (!peer) { |
939 | spin_unlock_bh(lock: &ab->base_lock); |
940 | ath12k_warn(ab, fmt: "failed to find the peer to set up rx tid\n" ); |
941 | return -ENOENT; |
942 | } |
943 | |
944 | if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) { |
945 | spin_unlock_bh(lock: &ab->base_lock); |
946 | ath12k_warn(ab, fmt: "reo qref table is not setup\n" ); |
947 | return -EINVAL; |
948 | } |
949 | |
950 | if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { |
951 | ath12k_warn(ab, fmt: "peer id of peer %d or tid %d doesn't allow reoq setup\n" , |
952 | peer->peer_id, tid); |
953 | spin_unlock_bh(lock: &ab->base_lock); |
954 | return -EINVAL; |
955 | } |
956 | |
957 | rx_tid = &peer->rx_tid[tid]; |
958 | /* Update the tid queue if it is already setup */ |
959 | if (rx_tid->active) { |
960 | paddr = rx_tid->paddr; |
961 | ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, |
962 | ba_win_sz, ssn, update_ssn: true); |
963 | spin_unlock_bh(lock: &ab->base_lock); |
964 | if (ret) { |
965 | ath12k_warn(ab, fmt: "failed to update reo for rx tid %d\n" , tid); |
966 | return ret; |
967 | } |
968 | |
969 | if (!ab->hw_params->reoq_lut_support) { |
970 | ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, |
971 | addr: peer_mac, |
972 | paddr, tid, ba_window_size_valid: 1, |
973 | ba_window_size: ba_win_sz); |
974 | if (ret) { |
975 | ath12k_warn(ab, fmt: "failed to setup peer rx reorder queuefor tid %d: %d\n" , |
976 | tid, ret); |
977 | return ret; |
978 | } |
979 | } |
980 | |
981 | return 0; |
982 | } |
983 | |
984 | rx_tid->tid = tid; |
985 | |
986 | rx_tid->ba_win_sz = ba_win_sz; |
987 | |
988 | /* TODO: Optimize the memory allocation for qos tid based on |
989 | * the actual BA window size in REO tid update path. |
990 | */ |
991 | if (tid == HAL_DESC_REO_NON_QOS_TID) |
992 | hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_window_size: ba_win_sz, tid); |
993 | else |
994 | hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); |
995 | |
996 | vaddr = kzalloc(size: hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); |
997 | if (!vaddr) { |
998 | spin_unlock_bh(lock: &ab->base_lock); |
999 | return -ENOMEM; |
1000 | } |
1001 | |
1002 | addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); |
1003 | |
1004 | ath12k_hal_reo_qdesc_setup(qdesc: addr_aligned, tid, ba_window_size: ba_win_sz, |
1005 | start_seq: ssn, type: pn_type); |
1006 | |
1007 | paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, |
1008 | DMA_BIDIRECTIONAL); |
1009 | |
1010 | ret = dma_mapping_error(dev: ab->dev, dma_addr: paddr); |
1011 | if (ret) { |
1012 | spin_unlock_bh(lock: &ab->base_lock); |
1013 | goto err_mem_free; |
1014 | } |
1015 | |
1016 | rx_tid->vaddr = vaddr; |
1017 | rx_tid->paddr = paddr; |
1018 | rx_tid->size = hw_desc_sz; |
1019 | rx_tid->active = true; |
1020 | |
1021 | if (ab->hw_params->reoq_lut_support) { |
1022 | /* Update the REO queue LUT at the corresponding peer id |
1023 | * and tid with qaddr. |
1024 | */ |
1025 | ath12k_peer_rx_tid_qref_setup(ab, peer_id: peer->peer_id, tid, paddr); |
1026 | spin_unlock_bh(lock: &ab->base_lock); |
1027 | } else { |
1028 | spin_unlock_bh(lock: &ab->base_lock); |
1029 | ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, addr: peer_mac, |
1030 | paddr, tid, ba_window_size_valid: 1, ba_window_size: ba_win_sz); |
1031 | } |
1032 | |
1033 | return ret; |
1034 | |
1035 | err_mem_free: |
1036 | kfree(objp: vaddr); |
1037 | |
1038 | return ret; |
1039 | } |
1040 | |
1041 | int ath12k_dp_rx_ampdu_start(struct ath12k *ar, |
1042 | struct ieee80211_ampdu_params *params) |
1043 | { |
1044 | struct ath12k_base *ab = ar->ab; |
1045 | struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta: params->sta); |
1046 | int vdev_id = arsta->arvif->vdev_id; |
1047 | int ret; |
1048 | |
1049 | ret = ath12k_dp_rx_peer_tid_setup(ar, peer_mac: params->sta->addr, vdev_id, |
1050 | tid: params->tid, ba_win_sz: params->buf_size, |
1051 | ssn: params->ssn, pn_type: arsta->pn_type); |
1052 | if (ret) |
1053 | ath12k_warn(ab, fmt: "failed to setup rx tid %d\n" , ret); |
1054 | |
1055 | return ret; |
1056 | } |
1057 | |
1058 | int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, |
1059 | struct ieee80211_ampdu_params *params) |
1060 | { |
1061 | struct ath12k_base *ab = ar->ab; |
1062 | struct ath12k_peer *peer; |
1063 | struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta: params->sta); |
1064 | int vdev_id = arsta->arvif->vdev_id; |
1065 | bool active; |
1066 | int ret; |
1067 | |
1068 | spin_lock_bh(lock: &ab->base_lock); |
1069 | |
1070 | peer = ath12k_peer_find(ab, vdev_id, addr: params->sta->addr); |
1071 | if (!peer) { |
1072 | spin_unlock_bh(lock: &ab->base_lock); |
1073 | ath12k_warn(ab, fmt: "failed to find the peer to stop rx aggregation\n" ); |
1074 | return -ENOENT; |
1075 | } |
1076 | |
1077 | active = peer->rx_tid[params->tid].active; |
1078 | |
1079 | if (!active) { |
1080 | spin_unlock_bh(lock: &ab->base_lock); |
1081 | return 0; |
1082 | } |
1083 | |
1084 | ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid: peer->rx_tid, ba_win_sz: 1, ssn: 0, update_ssn: false); |
1085 | spin_unlock_bh(lock: &ab->base_lock); |
1086 | if (ret) { |
1087 | ath12k_warn(ab, fmt: "failed to update reo for rx tid %d: %d\n" , |
1088 | params->tid, ret); |
1089 | return ret; |
1090 | } |
1091 | |
1092 | return ret; |
1093 | } |
1094 | |
1095 | int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, |
1096 | const u8 *peer_addr, |
1097 | enum set_key_cmd key_cmd, |
1098 | struct ieee80211_key_conf *key) |
1099 | { |
1100 | struct ath12k *ar = arvif->ar; |
1101 | struct ath12k_base *ab = ar->ab; |
1102 | struct ath12k_hal_reo_cmd cmd = {0}; |
1103 | struct ath12k_peer *peer; |
1104 | struct ath12k_dp_rx_tid *rx_tid; |
1105 | u8 tid; |
1106 | int ret = 0; |
1107 | |
1108 | /* NOTE: Enable PN/TSC replay check offload only for unicast frames. |
1109 | * We use mac80211 PN/TSC replay check functionality for bcast/mcast |
1110 | * for now. |
1111 | */ |
1112 | if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) |
1113 | return 0; |
1114 | |
1115 | cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; |
1116 | cmd.upd0 = HAL_REO_CMD_UPD0_PN | |
1117 | HAL_REO_CMD_UPD0_PN_SIZE | |
1118 | HAL_REO_CMD_UPD0_PN_VALID | |
1119 | HAL_REO_CMD_UPD0_PN_CHECK | |
1120 | HAL_REO_CMD_UPD0_SVLD; |
1121 | |
1122 | switch (key->cipher) { |
1123 | case WLAN_CIPHER_SUITE_TKIP: |
1124 | case WLAN_CIPHER_SUITE_CCMP: |
1125 | case WLAN_CIPHER_SUITE_CCMP_256: |
1126 | case WLAN_CIPHER_SUITE_GCMP: |
1127 | case WLAN_CIPHER_SUITE_GCMP_256: |
1128 | if (key_cmd == SET_KEY) { |
1129 | cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; |
1130 | cmd.pn_size = 48; |
1131 | } |
1132 | break; |
1133 | default: |
1134 | break; |
1135 | } |
1136 | |
1137 | spin_lock_bh(lock: &ab->base_lock); |
1138 | |
1139 | peer = ath12k_peer_find(ab, vdev_id: arvif->vdev_id, addr: peer_addr); |
1140 | if (!peer) { |
1141 | spin_unlock_bh(lock: &ab->base_lock); |
1142 | ath12k_warn(ab, fmt: "failed to find the peer %pM to configure pn replay detection\n" , |
1143 | peer_addr); |
1144 | return -ENOENT; |
1145 | } |
1146 | |
1147 | for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { |
1148 | rx_tid = &peer->rx_tid[tid]; |
1149 | if (!rx_tid->active) |
1150 | continue; |
1151 | cmd.addr_lo = lower_32_bits(rx_tid->paddr); |
1152 | cmd.addr_hi = upper_32_bits(rx_tid->paddr); |
1153 | ret = ath12k_dp_reo_cmd_send(ab, rx_tid, |
1154 | type: HAL_REO_CMD_UPDATE_RX_QUEUE, |
1155 | cmd: &cmd, NULL); |
1156 | if (ret) { |
1157 | ath12k_warn(ab, fmt: "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n" , |
1158 | tid, peer_addr, ret); |
1159 | break; |
1160 | } |
1161 | } |
1162 | |
1163 | spin_unlock_bh(lock: &ab->base_lock); |
1164 | |
1165 | return ret; |
1166 | } |
1167 | |
1168 | static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, |
1169 | u16 peer_id) |
1170 | { |
1171 | int i; |
1172 | |
1173 | for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { |
1174 | if (ppdu_stats->user_stats[i].is_valid_peer_id) { |
1175 | if (peer_id == ppdu_stats->user_stats[i].peer_id) |
1176 | return i; |
1177 | } else { |
1178 | return i; |
1179 | } |
1180 | } |
1181 | |
1182 | return -EINVAL; |
1183 | } |
1184 | |
1185 | static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, |
1186 | u16 tag, u16 len, const void *ptr, |
1187 | void *data) |
1188 | { |
1189 | const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; |
1190 | const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; |
1191 | const struct htt_ppdu_stats_user_rate *user_rate; |
1192 | struct htt_ppdu_stats_info *ppdu_info; |
1193 | struct htt_ppdu_user_stats *user_stats; |
1194 | int cur_user; |
1195 | u16 peer_id; |
1196 | |
1197 | ppdu_info = data; |
1198 | |
1199 | switch (tag) { |
1200 | case HTT_PPDU_STATS_TAG_COMMON: |
1201 | if (len < sizeof(struct htt_ppdu_stats_common)) { |
1202 | ath12k_warn(ab, fmt: "Invalid len %d for the tag 0x%x\n" , |
1203 | len, tag); |
1204 | return -EINVAL; |
1205 | } |
1206 | memcpy(&ppdu_info->ppdu_stats.common, ptr, |
1207 | sizeof(struct htt_ppdu_stats_common)); |
1208 | break; |
1209 | case HTT_PPDU_STATS_TAG_USR_RATE: |
1210 | if (len < sizeof(struct htt_ppdu_stats_user_rate)) { |
1211 | ath12k_warn(ab, fmt: "Invalid len %d for the tag 0x%x\n" , |
1212 | len, tag); |
1213 | return -EINVAL; |
1214 | } |
1215 | user_rate = ptr; |
1216 | peer_id = le16_to_cpu(user_rate->sw_peer_id); |
1217 | cur_user = ath12k_get_ppdu_user_index(ppdu_stats: &ppdu_info->ppdu_stats, |
1218 | peer_id); |
1219 | if (cur_user < 0) |
1220 | return -EINVAL; |
1221 | user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; |
1222 | user_stats->peer_id = peer_id; |
1223 | user_stats->is_valid_peer_id = true; |
1224 | memcpy(&user_stats->rate, ptr, |
1225 | sizeof(struct htt_ppdu_stats_user_rate)); |
1226 | user_stats->tlv_flags |= BIT(tag); |
1227 | break; |
1228 | case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: |
1229 | if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { |
1230 | ath12k_warn(ab, fmt: "Invalid len %d for the tag 0x%x\n" , |
1231 | len, tag); |
1232 | return -EINVAL; |
1233 | } |
1234 | |
1235 | cmplt_cmn = ptr; |
1236 | peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); |
1237 | cur_user = ath12k_get_ppdu_user_index(ppdu_stats: &ppdu_info->ppdu_stats, |
1238 | peer_id); |
1239 | if (cur_user < 0) |
1240 | return -EINVAL; |
1241 | user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; |
1242 | user_stats->peer_id = peer_id; |
1243 | user_stats->is_valid_peer_id = true; |
1244 | memcpy(&user_stats->cmpltn_cmn, ptr, |
1245 | sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); |
1246 | user_stats->tlv_flags |= BIT(tag); |
1247 | break; |
1248 | case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: |
1249 | if (len < |
1250 | sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { |
1251 | ath12k_warn(ab, fmt: "Invalid len %d for the tag 0x%x\n" , |
1252 | len, tag); |
1253 | return -EINVAL; |
1254 | } |
1255 | |
1256 | ba_status = ptr; |
1257 | peer_id = le16_to_cpu(ba_status->sw_peer_id); |
1258 | cur_user = ath12k_get_ppdu_user_index(ppdu_stats: &ppdu_info->ppdu_stats, |
1259 | peer_id); |
1260 | if (cur_user < 0) |
1261 | return -EINVAL; |
1262 | user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; |
1263 | user_stats->peer_id = peer_id; |
1264 | user_stats->is_valid_peer_id = true; |
1265 | memcpy(&user_stats->ack_ba, ptr, |
1266 | sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); |
1267 | user_stats->tlv_flags |= BIT(tag); |
1268 | break; |
1269 | } |
1270 | return 0; |
1271 | } |
1272 | |
1273 | static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, |
1274 | int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, |
1275 | const void *ptr, void *data), |
1276 | void *data) |
1277 | { |
1278 | const struct htt_tlv *tlv; |
1279 | const void *begin = ptr; |
1280 | u16 tlv_tag, tlv_len; |
1281 | int ret = -EINVAL; |
1282 | |
1283 | while (len > 0) { |
1284 | if (len < sizeof(*tlv)) { |
1285 | ath12k_err(ab, fmt: "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n" , |
1286 | ptr - begin, len, sizeof(*tlv)); |
1287 | return -EINVAL; |
1288 | } |
1289 | tlv = (struct htt_tlv *)ptr; |
1290 | tlv_tag = le32_get_bits(v: tlv->header, HTT_TLV_TAG); |
1291 | tlv_len = le32_get_bits(v: tlv->header, HTT_TLV_LEN); |
1292 | ptr += sizeof(*tlv); |
1293 | len -= sizeof(*tlv); |
1294 | |
1295 | if (tlv_len > len) { |
1296 | ath12k_err(ab, fmt: "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n" , |
1297 | tlv_tag, ptr - begin, len, tlv_len); |
1298 | return -EINVAL; |
1299 | } |
1300 | ret = iter(ab, tlv_tag, tlv_len, ptr, data); |
1301 | if (ret == -ENOMEM) |
1302 | return ret; |
1303 | |
1304 | ptr += tlv_len; |
1305 | len -= tlv_len; |
1306 | } |
1307 | return 0; |
1308 | } |
1309 | |
1310 | static void |
1311 | ath12k_update_per_peer_tx_stats(struct ath12k *ar, |
1312 | struct htt_ppdu_stats *ppdu_stats, u8 user) |
1313 | { |
1314 | struct ath12k_base *ab = ar->ab; |
1315 | struct ath12k_peer *peer; |
1316 | struct ieee80211_sta *sta; |
1317 | struct ath12k_sta *arsta; |
1318 | struct htt_ppdu_stats_user_rate *user_rate; |
1319 | struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; |
1320 | struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; |
1321 | struct htt_ppdu_stats_common *common = &ppdu_stats->common; |
1322 | int ret; |
1323 | u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; |
1324 | u32 v, succ_bytes = 0; |
1325 | u16 tones, rate = 0, succ_pkts = 0; |
1326 | u32 tx_duration = 0; |
1327 | u8 tid = HTT_PPDU_STATS_NON_QOS_TID; |
1328 | bool is_ampdu = false; |
1329 | |
1330 | if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) |
1331 | return; |
1332 | |
1333 | if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) |
1334 | is_ampdu = |
1335 | HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); |
1336 | |
1337 | if (usr_stats->tlv_flags & |
1338 | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { |
1339 | succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); |
1340 | succ_pkts = le32_get_bits(v: usr_stats->ack_ba.info, |
1341 | HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); |
1342 | tid = le32_get_bits(v: usr_stats->ack_ba.info, |
1343 | HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); |
1344 | } |
1345 | |
1346 | if (common->fes_duration_us) |
1347 | tx_duration = le32_to_cpu(common->fes_duration_us); |
1348 | |
1349 | user_rate = &usr_stats->rate; |
1350 | flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); |
1351 | bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; |
1352 | nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; |
1353 | mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); |
1354 | sgi = HTT_USR_RATE_GI(user_rate->rate_flags); |
1355 | dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); |
1356 | |
1357 | /* Note: If host configured fixed rates and in some other special |
1358 | * cases, the broadcast/management frames are sent in different rates. |
1359 | * Firmware rate's control to be skipped for this? |
1360 | */ |
1361 | |
1362 | if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { |
1363 | ath12k_warn(ab, fmt: "Invalid HE mcs %d peer stats" , mcs); |
1364 | return; |
1365 | } |
1366 | |
1367 | if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { |
1368 | ath12k_warn(ab, fmt: "Invalid VHT mcs %d peer stats" , mcs); |
1369 | return; |
1370 | } |
1371 | |
1372 | if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { |
1373 | ath12k_warn(ab, fmt: "Invalid HT mcs %d nss %d peer stats" , |
1374 | mcs, nss); |
1375 | return; |
1376 | } |
1377 | |
1378 | if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { |
1379 | ret = ath12k_mac_hw_ratecode_to_legacy_rate(hw_rc: mcs, |
1380 | preamble: flags, |
1381 | rateidx: &rate_idx, |
1382 | rate: &rate); |
1383 | if (ret < 0) |
1384 | return; |
1385 | } |
1386 | |
1387 | rcu_read_lock(); |
1388 | spin_lock_bh(lock: &ab->base_lock); |
1389 | peer = ath12k_peer_find_by_id(ab, peer_id: usr_stats->peer_id); |
1390 | |
1391 | if (!peer || !peer->sta) { |
1392 | spin_unlock_bh(lock: &ab->base_lock); |
1393 | rcu_read_unlock(); |
1394 | return; |
1395 | } |
1396 | |
1397 | sta = peer->sta; |
1398 | arsta = ath12k_sta_to_arsta(sta); |
1399 | |
1400 | memset(&arsta->txrate, 0, sizeof(arsta->txrate)); |
1401 | |
1402 | switch (flags) { |
1403 | case WMI_RATE_PREAMBLE_OFDM: |
1404 | arsta->txrate.legacy = rate; |
1405 | break; |
1406 | case WMI_RATE_PREAMBLE_CCK: |
1407 | arsta->txrate.legacy = rate; |
1408 | break; |
1409 | case WMI_RATE_PREAMBLE_HT: |
1410 | arsta->txrate.mcs = mcs + 8 * (nss - 1); |
1411 | arsta->txrate.flags = RATE_INFO_FLAGS_MCS; |
1412 | if (sgi) |
1413 | arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; |
1414 | break; |
1415 | case WMI_RATE_PREAMBLE_VHT: |
1416 | arsta->txrate.mcs = mcs; |
1417 | arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; |
1418 | if (sgi) |
1419 | arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; |
1420 | break; |
1421 | case WMI_RATE_PREAMBLE_HE: |
1422 | arsta->txrate.mcs = mcs; |
1423 | arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; |
1424 | arsta->txrate.he_dcm = dcm; |
1425 | arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); |
1426 | tones = le16_to_cpu(user_rate->ru_end) - |
1427 | le16_to_cpu(user_rate->ru_start) + 1; |
1428 | v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones: tones); |
1429 | arsta->txrate.he_ru_alloc = v; |
1430 | break; |
1431 | } |
1432 | |
1433 | arsta->txrate.nss = nss; |
1434 | arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); |
1435 | arsta->tx_duration += tx_duration; |
1436 | memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); |
1437 | |
1438 | /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. |
1439 | * So skip peer stats update for mgmt packets. |
1440 | */ |
1441 | if (tid < HTT_PPDU_STATS_NON_QOS_TID) { |
1442 | memset(peer_stats, 0, sizeof(*peer_stats)); |
1443 | peer_stats->succ_pkts = succ_pkts; |
1444 | peer_stats->succ_bytes = succ_bytes; |
1445 | peer_stats->is_ampdu = is_ampdu; |
1446 | peer_stats->duration = tx_duration; |
1447 | peer_stats->ba_fails = |
1448 | HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + |
1449 | HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); |
1450 | } |
1451 | |
1452 | spin_unlock_bh(lock: &ab->base_lock); |
1453 | rcu_read_unlock(); |
1454 | } |
1455 | |
1456 | static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, |
1457 | struct htt_ppdu_stats *ppdu_stats) |
1458 | { |
1459 | u8 user; |
1460 | |
1461 | for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) |
1462 | ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); |
1463 | } |
1464 | |
1465 | static |
1466 | struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, |
1467 | u32 ppdu_id) |
1468 | { |
1469 | struct htt_ppdu_stats_info *ppdu_info; |
1470 | |
1471 | lockdep_assert_held(&ar->data_lock); |
1472 | if (!list_empty(head: &ar->ppdu_stats_info)) { |
1473 | list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { |
1474 | if (ppdu_info->ppdu_id == ppdu_id) |
1475 | return ppdu_info; |
1476 | } |
1477 | |
1478 | if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { |
1479 | ppdu_info = list_first_entry(&ar->ppdu_stats_info, |
1480 | typeof(*ppdu_info), list); |
1481 | list_del(entry: &ppdu_info->list); |
1482 | ar->ppdu_stat_list_depth--; |
1483 | ath12k_htt_update_ppdu_stats(ar, ppdu_stats: &ppdu_info->ppdu_stats); |
1484 | kfree(objp: ppdu_info); |
1485 | } |
1486 | } |
1487 | |
1488 | ppdu_info = kzalloc(size: sizeof(*ppdu_info), GFP_ATOMIC); |
1489 | if (!ppdu_info) |
1490 | return NULL; |
1491 | |
1492 | list_add_tail(new: &ppdu_info->list, head: &ar->ppdu_stats_info); |
1493 | ar->ppdu_stat_list_depth++; |
1494 | |
1495 | return ppdu_info; |
1496 | } |
1497 | |
1498 | static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, |
1499 | struct htt_ppdu_user_stats *usr_stats) |
1500 | { |
1501 | peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); |
1502 | peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); |
1503 | peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); |
1504 | peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); |
1505 | peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); |
1506 | peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); |
1507 | peer->ppdu_stats_delayba.resp_rate_flags = |
1508 | le32_to_cpu(usr_stats->rate.resp_rate_flags); |
1509 | |
1510 | peer->delayba_flag = true; |
1511 | } |
1512 | |
1513 | static void ath12k_copy_to_bar(struct ath12k_peer *peer, |
1514 | struct htt_ppdu_user_stats *usr_stats) |
1515 | { |
1516 | usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); |
1517 | usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); |
1518 | usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); |
1519 | usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); |
1520 | usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); |
1521 | usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); |
1522 | usr_stats->rate.resp_rate_flags = |
1523 | cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); |
1524 | |
1525 | peer->delayba_flag = false; |
1526 | } |
1527 | |
1528 | static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, |
1529 | struct sk_buff *skb) |
1530 | { |
1531 | struct ath12k_htt_ppdu_stats_msg *msg; |
1532 | struct htt_ppdu_stats_info *ppdu_info; |
1533 | struct ath12k_peer *peer = NULL; |
1534 | struct htt_ppdu_user_stats *usr_stats = NULL; |
1535 | u32 peer_id = 0; |
1536 | struct ath12k *ar; |
1537 | int ret, i; |
1538 | u8 pdev_id; |
1539 | u32 ppdu_id, len; |
1540 | |
1541 | msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; |
1542 | len = le32_get_bits(v: msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); |
1543 | if (len > (skb->len - struct_size(msg, data, 0))) { |
1544 | ath12k_warn(ab, |
1545 | fmt: "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n" , |
1546 | len, skb->len); |
1547 | return -EINVAL; |
1548 | } |
1549 | |
1550 | pdev_id = le32_get_bits(v: msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); |
1551 | ppdu_id = le32_to_cpu(msg->ppdu_id); |
1552 | |
1553 | rcu_read_lock(); |
1554 | ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); |
1555 | if (!ar) { |
1556 | ret = -EINVAL; |
1557 | goto exit; |
1558 | } |
1559 | |
1560 | spin_lock_bh(lock: &ar->data_lock); |
1561 | ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); |
1562 | if (!ppdu_info) { |
1563 | spin_unlock_bh(lock: &ar->data_lock); |
1564 | ret = -EINVAL; |
1565 | goto exit; |
1566 | } |
1567 | |
1568 | ppdu_info->ppdu_id = ppdu_id; |
1569 | ret = ath12k_dp_htt_tlv_iter(ab, ptr: msg->data, len, |
1570 | iter: ath12k_htt_tlv_ppdu_stats_parse, |
1571 | data: (void *)ppdu_info); |
1572 | if (ret) { |
1573 | spin_unlock_bh(lock: &ar->data_lock); |
1574 | ath12k_warn(ab, fmt: "Failed to parse tlv %d\n" , ret); |
1575 | goto exit; |
1576 | } |
1577 | |
1578 | if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { |
1579 | spin_unlock_bh(lock: &ar->data_lock); |
1580 | ath12k_warn(ab, |
1581 | fmt: "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n" , |
1582 | ppdu_info->ppdu_stats.common.num_users, |
1583 | HTT_PPDU_STATS_MAX_USERS); |
1584 | ret = -EINVAL; |
1585 | goto exit; |
1586 | } |
1587 | |
1588 | /* back up data rate tlv for all peers */ |
1589 | if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && |
1590 | (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && |
1591 | ppdu_info->delay_ba) { |
1592 | for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { |
1593 | peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; |
1594 | spin_lock_bh(lock: &ab->base_lock); |
1595 | peer = ath12k_peer_find_by_id(ab, peer_id); |
1596 | if (!peer) { |
1597 | spin_unlock_bh(lock: &ab->base_lock); |
1598 | continue; |
1599 | } |
1600 | |
1601 | usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; |
1602 | if (usr_stats->delay_ba) |
1603 | ath12k_copy_to_delay_stats(peer, usr_stats); |
1604 | spin_unlock_bh(lock: &ab->base_lock); |
1605 | } |
1606 | } |
1607 | |
1608 | /* restore all peers' data rate tlv to mu-bar tlv */ |
1609 | if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && |
1610 | (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { |
1611 | for (i = 0; i < ppdu_info->bar_num_users; i++) { |
1612 | peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; |
1613 | spin_lock_bh(lock: &ab->base_lock); |
1614 | peer = ath12k_peer_find_by_id(ab, peer_id); |
1615 | if (!peer) { |
1616 | spin_unlock_bh(lock: &ab->base_lock); |
1617 | continue; |
1618 | } |
1619 | |
1620 | usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; |
1621 | if (peer->delayba_flag) |
1622 | ath12k_copy_to_bar(peer, usr_stats); |
1623 | spin_unlock_bh(lock: &ab->base_lock); |
1624 | } |
1625 | } |
1626 | |
1627 | spin_unlock_bh(lock: &ar->data_lock); |
1628 | |
1629 | exit: |
1630 | rcu_read_unlock(); |
1631 | |
1632 | return ret; |
1633 | } |
1634 | |
1635 | static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, |
1636 | struct sk_buff *skb) |
1637 | { |
1638 | struct ath12k_htt_mlo_offset_msg *msg; |
1639 | struct ath12k_pdev *pdev; |
1640 | struct ath12k *ar; |
1641 | u8 pdev_id; |
1642 | |
1643 | msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; |
1644 | pdev_id = u32_get_bits(__le32_to_cpu(msg->info), |
1645 | HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); |
1646 | |
1647 | rcu_read_lock(); |
1648 | ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); |
1649 | if (!ar) { |
1650 | ath12k_warn(ab, fmt: "invalid pdev id %d on htt mlo offset\n" , pdev_id); |
1651 | goto exit; |
1652 | } |
1653 | |
1654 | spin_lock_bh(lock: &ar->data_lock); |
1655 | pdev = ar->pdev; |
1656 | |
1657 | pdev->timestamp.info = __le32_to_cpu(msg->info); |
1658 | pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); |
1659 | pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); |
1660 | pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); |
1661 | pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); |
1662 | pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); |
1663 | pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); |
1664 | pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); |
1665 | |
1666 | spin_unlock_bh(lock: &ar->data_lock); |
1667 | exit: |
1668 | rcu_read_unlock(); |
1669 | } |
1670 | |
1671 | void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, |
1672 | struct sk_buff *skb) |
1673 | { |
1674 | struct ath12k_dp *dp = &ab->dp; |
1675 | struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; |
1676 | enum htt_t2h_msg_type type; |
1677 | u16 peer_id; |
1678 | u8 vdev_id; |
1679 | u8 mac_addr[ETH_ALEN]; |
1680 | u16 peer_mac_h16; |
1681 | u16 ast_hash = 0; |
1682 | u16 hw_peer_id; |
1683 | |
1684 | type = le32_get_bits(v: resp->version_msg.version, HTT_T2H_MSG_TYPE); |
1685 | |
1686 | ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n" , type); |
1687 | |
1688 | switch (type) { |
1689 | case HTT_T2H_MSG_TYPE_VERSION_CONF: |
1690 | dp->htt_tgt_ver_major = le32_get_bits(v: resp->version_msg.version, |
1691 | HTT_T2H_VERSION_CONF_MAJOR); |
1692 | dp->htt_tgt_ver_minor = le32_get_bits(v: resp->version_msg.version, |
1693 | HTT_T2H_VERSION_CONF_MINOR); |
1694 | complete(&dp->htt_tgt_version_received); |
1695 | break; |
1696 | /* TODO: remove unused peer map versions after testing */ |
1697 | case HTT_T2H_MSG_TYPE_PEER_MAP: |
1698 | vdev_id = le32_get_bits(v: resp->peer_map_ev.info, |
1699 | HTT_T2H_PEER_MAP_INFO_VDEV_ID); |
1700 | peer_id = le32_get_bits(v: resp->peer_map_ev.info, |
1701 | HTT_T2H_PEER_MAP_INFO_PEER_ID); |
1702 | peer_mac_h16 = le32_get_bits(v: resp->peer_map_ev.info1, |
1703 | HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); |
1704 | ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), |
1705 | addr_h16: peer_mac_h16, addr: mac_addr); |
1706 | ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash: 0, hw_peer_id: 0); |
1707 | break; |
1708 | case HTT_T2H_MSG_TYPE_PEER_MAP2: |
1709 | vdev_id = le32_get_bits(v: resp->peer_map_ev.info, |
1710 | HTT_T2H_PEER_MAP_INFO_VDEV_ID); |
1711 | peer_id = le32_get_bits(v: resp->peer_map_ev.info, |
1712 | HTT_T2H_PEER_MAP_INFO_PEER_ID); |
1713 | peer_mac_h16 = le32_get_bits(v: resp->peer_map_ev.info1, |
1714 | HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); |
1715 | ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), |
1716 | addr_h16: peer_mac_h16, addr: mac_addr); |
1717 | ast_hash = le32_get_bits(v: resp->peer_map_ev.info2, |
1718 | HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); |
1719 | hw_peer_id = le32_get_bits(v: resp->peer_map_ev.info1, |
1720 | HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); |
1721 | ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, |
1722 | hw_peer_id); |
1723 | break; |
1724 | case HTT_T2H_MSG_TYPE_PEER_MAP3: |
1725 | vdev_id = le32_get_bits(v: resp->peer_map_ev.info, |
1726 | HTT_T2H_PEER_MAP_INFO_VDEV_ID); |
1727 | peer_id = le32_get_bits(v: resp->peer_map_ev.info, |
1728 | HTT_T2H_PEER_MAP_INFO_PEER_ID); |
1729 | peer_mac_h16 = le32_get_bits(v: resp->peer_map_ev.info1, |
1730 | HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); |
1731 | ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), |
1732 | addr_h16: peer_mac_h16, addr: mac_addr); |
1733 | ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, |
1734 | hw_peer_id: peer_id); |
1735 | break; |
1736 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: |
1737 | case HTT_T2H_MSG_TYPE_PEER_UNMAP2: |
1738 | peer_id = le32_get_bits(v: resp->peer_unmap_ev.info, |
1739 | HTT_T2H_PEER_UNMAP_INFO_PEER_ID); |
1740 | ath12k_peer_unmap_event(ab, peer_id); |
1741 | break; |
1742 | case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: |
1743 | ath12k_htt_pull_ppdu_stats(ab, skb); |
1744 | break; |
1745 | case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: |
1746 | break; |
1747 | case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: |
1748 | ath12k_htt_mlo_offset_event_handler(ab, skb); |
1749 | break; |
1750 | default: |
1751 | ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n" , |
1752 | type); |
1753 | break; |
1754 | } |
1755 | |
1756 | dev_kfree_skb_any(skb); |
1757 | } |
1758 | |
1759 | static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, |
1760 | struct sk_buff_head *msdu_list, |
1761 | struct sk_buff *first, struct sk_buff *last, |
1762 | u8 l3pad_bytes, int msdu_len) |
1763 | { |
1764 | struct ath12k_base *ab = ar->ab; |
1765 | struct sk_buff *skb; |
1766 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: first); |
1767 | int buf_first_hdr_len, buf_first_len; |
1768 | struct hal_rx_desc *ldesc; |
1769 | int , rem_len, buf_len; |
1770 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
1771 | |
1772 | /* As the msdu is spread across multiple rx buffers, |
1773 | * find the offset to the start of msdu for computing |
1774 | * the length of the msdu in the first buffer. |
1775 | */ |
1776 | buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; |
1777 | buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; |
1778 | |
1779 | if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { |
1780 | skb_put(skb: first, len: buf_first_hdr_len + msdu_len); |
1781 | skb_pull(skb: first, len: buf_first_hdr_len); |
1782 | return 0; |
1783 | } |
1784 | |
1785 | ldesc = (struct hal_rx_desc *)last->data; |
1786 | rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc: ldesc); |
1787 | rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc: ldesc); |
1788 | |
1789 | /* MSDU spans over multiple buffers because the length of the MSDU |
1790 | * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data |
1791 | * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. |
1792 | */ |
1793 | skb_put(skb: first, DP_RX_BUFFER_SIZE); |
1794 | skb_pull(skb: first, len: buf_first_hdr_len); |
1795 | |
1796 | /* When an MSDU spread over multiple buffers MSDU_END |
1797 | * tlvs are valid only in the last buffer. Copy those tlvs. |
1798 | */ |
1799 | ath12k_dp_rx_desc_end_tlv_copy(ab, fdesc: rxcb->rx_desc, ldesc); |
1800 | |
1801 | space_extra = msdu_len - (buf_first_len + skb_tailroom(skb: first)); |
1802 | if (space_extra > 0 && |
1803 | (pskb_expand_head(skb: first, nhead: 0, ntail: space_extra, GFP_ATOMIC) < 0)) { |
1804 | /* Free up all buffers of the MSDU */ |
1805 | while ((skb = __skb_dequeue(list: msdu_list)) != NULL) { |
1806 | rxcb = ATH12K_SKB_RXCB(skb); |
1807 | if (!rxcb->is_continuation) { |
1808 | dev_kfree_skb_any(skb); |
1809 | break; |
1810 | } |
1811 | dev_kfree_skb_any(skb); |
1812 | } |
1813 | return -ENOMEM; |
1814 | } |
1815 | |
1816 | rem_len = msdu_len - buf_first_len; |
1817 | while ((skb = __skb_dequeue(list: msdu_list)) != NULL && rem_len > 0) { |
1818 | rxcb = ATH12K_SKB_RXCB(skb); |
1819 | if (rxcb->is_continuation) |
1820 | buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; |
1821 | else |
1822 | buf_len = rem_len; |
1823 | |
1824 | if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { |
1825 | WARN_ON_ONCE(1); |
1826 | dev_kfree_skb_any(skb); |
1827 | return -EINVAL; |
1828 | } |
1829 | |
1830 | skb_put(skb, len: buf_len + hal_rx_desc_sz); |
1831 | skb_pull(skb, len: hal_rx_desc_sz); |
1832 | skb_copy_from_linear_data(skb, to: skb_put(skb: first, len: buf_len), |
1833 | len: buf_len); |
1834 | dev_kfree_skb_any(skb); |
1835 | |
1836 | rem_len -= buf_len; |
1837 | if (!rxcb->is_continuation) |
1838 | break; |
1839 | } |
1840 | |
1841 | return 0; |
1842 | } |
1843 | |
1844 | static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, |
1845 | struct sk_buff *first) |
1846 | { |
1847 | struct sk_buff *skb; |
1848 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: first); |
1849 | |
1850 | if (!rxcb->is_continuation) |
1851 | return first; |
1852 | |
1853 | skb_queue_walk(msdu_list, skb) { |
1854 | rxcb = ATH12K_SKB_RXCB(skb); |
1855 | if (!rxcb->is_continuation) |
1856 | return skb; |
1857 | } |
1858 | |
1859 | return NULL; |
1860 | } |
1861 | |
1862 | static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu) |
1863 | { |
1864 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
1865 | struct ath12k_base *ab = ar->ab; |
1866 | bool ip_csum_fail, l4_csum_fail; |
1867 | |
1868 | ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, desc: rxcb->rx_desc); |
1869 | l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, desc: rxcb->rx_desc); |
1870 | |
1871 | msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? |
1872 | CHECKSUM_NONE : CHECKSUM_UNNECESSARY; |
1873 | } |
1874 | |
1875 | static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, |
1876 | enum hal_encrypt_type enctype) |
1877 | { |
1878 | switch (enctype) { |
1879 | case HAL_ENCRYPT_TYPE_OPEN: |
1880 | case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: |
1881 | case HAL_ENCRYPT_TYPE_TKIP_MIC: |
1882 | return 0; |
1883 | case HAL_ENCRYPT_TYPE_CCMP_128: |
1884 | return IEEE80211_CCMP_MIC_LEN; |
1885 | case HAL_ENCRYPT_TYPE_CCMP_256: |
1886 | return IEEE80211_CCMP_256_MIC_LEN; |
1887 | case HAL_ENCRYPT_TYPE_GCMP_128: |
1888 | case HAL_ENCRYPT_TYPE_AES_GCMP_256: |
1889 | return IEEE80211_GCMP_MIC_LEN; |
1890 | case HAL_ENCRYPT_TYPE_WEP_40: |
1891 | case HAL_ENCRYPT_TYPE_WEP_104: |
1892 | case HAL_ENCRYPT_TYPE_WEP_128: |
1893 | case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: |
1894 | case HAL_ENCRYPT_TYPE_WAPI: |
1895 | break; |
1896 | } |
1897 | |
1898 | ath12k_warn(ab: ar->ab, fmt: "unsupported encryption type %d for mic len\n" , enctype); |
1899 | return 0; |
1900 | } |
1901 | |
1902 | static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, |
1903 | enum hal_encrypt_type enctype) |
1904 | { |
1905 | switch (enctype) { |
1906 | case HAL_ENCRYPT_TYPE_OPEN: |
1907 | return 0; |
1908 | case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: |
1909 | case HAL_ENCRYPT_TYPE_TKIP_MIC: |
1910 | return IEEE80211_TKIP_IV_LEN; |
1911 | case HAL_ENCRYPT_TYPE_CCMP_128: |
1912 | return IEEE80211_CCMP_HDR_LEN; |
1913 | case HAL_ENCRYPT_TYPE_CCMP_256: |
1914 | return IEEE80211_CCMP_256_HDR_LEN; |
1915 | case HAL_ENCRYPT_TYPE_GCMP_128: |
1916 | case HAL_ENCRYPT_TYPE_AES_GCMP_256: |
1917 | return IEEE80211_GCMP_HDR_LEN; |
1918 | case HAL_ENCRYPT_TYPE_WEP_40: |
1919 | case HAL_ENCRYPT_TYPE_WEP_104: |
1920 | case HAL_ENCRYPT_TYPE_WEP_128: |
1921 | case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: |
1922 | case HAL_ENCRYPT_TYPE_WAPI: |
1923 | break; |
1924 | } |
1925 | |
1926 | ath12k_warn(ab: ar->ab, fmt: "unsupported encryption type %d\n" , enctype); |
1927 | return 0; |
1928 | } |
1929 | |
1930 | static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, |
1931 | enum hal_encrypt_type enctype) |
1932 | { |
1933 | switch (enctype) { |
1934 | case HAL_ENCRYPT_TYPE_OPEN: |
1935 | case HAL_ENCRYPT_TYPE_CCMP_128: |
1936 | case HAL_ENCRYPT_TYPE_CCMP_256: |
1937 | case HAL_ENCRYPT_TYPE_GCMP_128: |
1938 | case HAL_ENCRYPT_TYPE_AES_GCMP_256: |
1939 | return 0; |
1940 | case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: |
1941 | case HAL_ENCRYPT_TYPE_TKIP_MIC: |
1942 | return IEEE80211_TKIP_ICV_LEN; |
1943 | case HAL_ENCRYPT_TYPE_WEP_40: |
1944 | case HAL_ENCRYPT_TYPE_WEP_104: |
1945 | case HAL_ENCRYPT_TYPE_WEP_128: |
1946 | case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: |
1947 | case HAL_ENCRYPT_TYPE_WAPI: |
1948 | break; |
1949 | } |
1950 | |
1951 | ath12k_warn(ab: ar->ab, fmt: "unsupported encryption type %d\n" , enctype); |
1952 | return 0; |
1953 | } |
1954 | |
1955 | static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, |
1956 | struct sk_buff *msdu, |
1957 | enum hal_encrypt_type enctype, |
1958 | struct ieee80211_rx_status *status) |
1959 | { |
1960 | struct ath12k_base *ab = ar->ab; |
1961 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
1962 | u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; |
1963 | struct ieee80211_hdr *hdr; |
1964 | size_t hdr_len; |
1965 | u8 *crypto_hdr; |
1966 | u16 qos_ctl; |
1967 | |
1968 | /* pull decapped header */ |
1969 | hdr = (struct ieee80211_hdr *)msdu->data; |
1970 | hdr_len = ieee80211_hdrlen(fc: hdr->frame_control); |
1971 | skb_pull(skb: msdu, len: hdr_len); |
1972 | |
1973 | /* Rebuild qos header */ |
1974 | hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1975 | |
1976 | /* Reset the order bit as the HT_Control header is stripped */ |
1977 | hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); |
1978 | |
1979 | qos_ctl = rxcb->tid; |
1980 | |
1981 | if (ath12k_dp_rx_h_mesh_ctl_present(ab, desc: rxcb->rx_desc)) |
1982 | qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; |
1983 | |
1984 | /* TODO: Add other QoS ctl fields when required */ |
1985 | |
1986 | /* copy decap header before overwriting for reuse below */ |
1987 | memcpy(decap_hdr, hdr, hdr_len); |
1988 | |
1989 | /* Rebuild crypto header for mac80211 use */ |
1990 | if (!(status->flag & RX_FLAG_IV_STRIPPED)) { |
1991 | crypto_hdr = skb_push(skb: msdu, len: ath12k_dp_rx_crypto_param_len(ar, enctype)); |
1992 | ath12k_dp_rx_desc_get_crypto_header(ab: ar->ab, |
1993 | desc: rxcb->rx_desc, crypto_hdr, |
1994 | enctype); |
1995 | } |
1996 | |
1997 | memcpy(skb_push(msdu, |
1998 | IEEE80211_QOS_CTL_LEN), &qos_ctl, |
1999 | IEEE80211_QOS_CTL_LEN); |
2000 | memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); |
2001 | } |
2002 | |
2003 | static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, |
2004 | enum hal_encrypt_type enctype, |
2005 | struct ieee80211_rx_status *status, |
2006 | bool decrypted) |
2007 | { |
2008 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2009 | struct ieee80211_hdr *hdr; |
2010 | size_t hdr_len; |
2011 | size_t crypto_len; |
2012 | |
2013 | if (!rxcb->is_first_msdu || |
2014 | !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { |
2015 | WARN_ON_ONCE(1); |
2016 | return; |
2017 | } |
2018 | |
2019 | skb_trim(skb: msdu, len: msdu->len - FCS_LEN); |
2020 | |
2021 | if (!decrypted) |
2022 | return; |
2023 | |
2024 | hdr = (void *)msdu->data; |
2025 | |
2026 | /* Tail */ |
2027 | if (status->flag & RX_FLAG_IV_STRIPPED) { |
2028 | skb_trim(skb: msdu, len: msdu->len - |
2029 | ath12k_dp_rx_crypto_mic_len(ar, enctype)); |
2030 | |
2031 | skb_trim(skb: msdu, len: msdu->len - |
2032 | ath12k_dp_rx_crypto_icv_len(ar, enctype)); |
2033 | } else { |
2034 | /* MIC */ |
2035 | if (status->flag & RX_FLAG_MIC_STRIPPED) |
2036 | skb_trim(skb: msdu, len: msdu->len - |
2037 | ath12k_dp_rx_crypto_mic_len(ar, enctype)); |
2038 | |
2039 | /* ICV */ |
2040 | if (status->flag & RX_FLAG_ICV_STRIPPED) |
2041 | skb_trim(skb: msdu, len: msdu->len - |
2042 | ath12k_dp_rx_crypto_icv_len(ar, enctype)); |
2043 | } |
2044 | |
2045 | /* MMIC */ |
2046 | if ((status->flag & RX_FLAG_MMIC_STRIPPED) && |
2047 | !ieee80211_has_morefrags(fc: hdr->frame_control) && |
2048 | enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) |
2049 | skb_trim(skb: msdu, len: msdu->len - IEEE80211_CCMP_MIC_LEN); |
2050 | |
2051 | /* Head */ |
2052 | if (status->flag & RX_FLAG_IV_STRIPPED) { |
2053 | hdr_len = ieee80211_hdrlen(fc: hdr->frame_control); |
2054 | crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); |
2055 | |
2056 | memmove(msdu->data + crypto_len, msdu->data, hdr_len); |
2057 | skb_pull(skb: msdu, len: crypto_len); |
2058 | } |
2059 | } |
2060 | |
2061 | static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, |
2062 | struct sk_buff *msdu, |
2063 | struct ath12k_skb_rxcb *rxcb, |
2064 | struct ieee80211_rx_status *status, |
2065 | enum hal_encrypt_type enctype) |
2066 | { |
2067 | struct hal_rx_desc *rx_desc = rxcb->rx_desc; |
2068 | struct ath12k_base *ab = ar->ab; |
2069 | size_t hdr_len, crypto_len; |
2070 | struct ieee80211_hdr *hdr; |
2071 | u16 qos_ctl; |
2072 | __le16 fc; |
2073 | u8 *crypto_hdr; |
2074 | |
2075 | if (!(status->flag & RX_FLAG_IV_STRIPPED)) { |
2076 | crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); |
2077 | crypto_hdr = skb_push(skb: msdu, len: crypto_len); |
2078 | ath12k_dp_rx_desc_get_crypto_header(ab, desc: rx_desc, crypto_hdr, enctype); |
2079 | } |
2080 | |
2081 | fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc)); |
2082 | hdr_len = ieee80211_hdrlen(fc); |
2083 | skb_push(skb: msdu, len: hdr_len); |
2084 | hdr = (struct ieee80211_hdr *)msdu->data; |
2085 | hdr->frame_control = fc; |
2086 | |
2087 | /* Get wifi header from rx_desc */ |
2088 | ath12k_dp_rx_desc_get_dot11_hdr(ab, desc: rx_desc, hdr); |
2089 | |
2090 | if (rxcb->is_mcbc) |
2091 | status->flag &= ~RX_FLAG_PN_VALIDATED; |
2092 | |
2093 | /* Add QOS header */ |
2094 | if (ieee80211_is_data_qos(fc: hdr->frame_control)) { |
2095 | qos_ctl = rxcb->tid; |
2096 | if (ath12k_dp_rx_h_mesh_ctl_present(ab, desc: rx_desc)) |
2097 | qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; |
2098 | |
2099 | /* TODO: Add other QoS ctl fields when required */ |
2100 | memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN), |
2101 | &qos_ctl, IEEE80211_QOS_CTL_LEN); |
2102 | } |
2103 | } |
2104 | |
2105 | static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, |
2106 | struct sk_buff *msdu, |
2107 | enum hal_encrypt_type enctype, |
2108 | struct ieee80211_rx_status *status) |
2109 | { |
2110 | struct ieee80211_hdr *hdr; |
2111 | struct ethhdr *eth; |
2112 | u8 da[ETH_ALEN]; |
2113 | u8 sa[ETH_ALEN]; |
2114 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2115 | struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; |
2116 | |
2117 | eth = (struct ethhdr *)msdu->data; |
2118 | ether_addr_copy(dst: da, src: eth->h_dest); |
2119 | ether_addr_copy(dst: sa, src: eth->h_source); |
2120 | rfc.snap_type = eth->h_proto; |
2121 | skb_pull(skb: msdu, len: sizeof(*eth)); |
2122 | memcpy(skb_push(msdu, sizeof(rfc)), &rfc, |
2123 | sizeof(rfc)); |
2124 | ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); |
2125 | |
2126 | /* original 802.11 header has a different DA and in |
2127 | * case of 4addr it may also have different SA |
2128 | */ |
2129 | hdr = (struct ieee80211_hdr *)msdu->data; |
2130 | ether_addr_copy(dst: ieee80211_get_DA(hdr), src: da); |
2131 | ether_addr_copy(dst: ieee80211_get_SA(hdr), src: sa); |
2132 | } |
2133 | |
2134 | static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, |
2135 | struct hal_rx_desc *rx_desc, |
2136 | enum hal_encrypt_type enctype, |
2137 | struct ieee80211_rx_status *status, |
2138 | bool decrypted) |
2139 | { |
2140 | struct ath12k_base *ab = ar->ab; |
2141 | u8 decap; |
2142 | struct ethhdr *ehdr; |
2143 | |
2144 | decap = ath12k_dp_rx_h_decap_type(ab, desc: rx_desc); |
2145 | |
2146 | switch (decap) { |
2147 | case DP_RX_DECAP_TYPE_NATIVE_WIFI: |
2148 | ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); |
2149 | break; |
2150 | case DP_RX_DECAP_TYPE_RAW: |
2151 | ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, |
2152 | decrypted); |
2153 | break; |
2154 | case DP_RX_DECAP_TYPE_ETHERNET2_DIX: |
2155 | ehdr = (struct ethhdr *)msdu->data; |
2156 | |
2157 | /* mac80211 allows fast path only for authorized STA */ |
2158 | if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { |
2159 | ATH12K_SKB_RXCB(skb: msdu)->is_eapol = true; |
2160 | ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); |
2161 | break; |
2162 | } |
2163 | |
2164 | /* PN for mcast packets will be validated in mac80211; |
2165 | * remove eth header and add 802.11 header. |
2166 | */ |
2167 | if (ATH12K_SKB_RXCB(skb: msdu)->is_mcbc && decrypted) |
2168 | ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); |
2169 | break; |
2170 | case DP_RX_DECAP_TYPE_8023: |
2171 | /* TODO: Handle undecap for these formats */ |
2172 | break; |
2173 | } |
2174 | } |
2175 | |
2176 | struct ath12k_peer * |
2177 | ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu) |
2178 | { |
2179 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2180 | struct hal_rx_desc *rx_desc = rxcb->rx_desc; |
2181 | struct ath12k_peer *peer = NULL; |
2182 | |
2183 | lockdep_assert_held(&ab->base_lock); |
2184 | |
2185 | if (rxcb->peer_id) |
2186 | peer = ath12k_peer_find_by_id(ab, peer_id: rxcb->peer_id); |
2187 | |
2188 | if (peer) |
2189 | return peer; |
2190 | |
2191 | if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, desc: rx_desc))) |
2192 | return NULL; |
2193 | |
2194 | peer = ath12k_peer_find_by_addr(ab, |
2195 | addr: ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, |
2196 | desc: rx_desc)); |
2197 | return peer; |
2198 | } |
2199 | |
2200 | static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, |
2201 | struct sk_buff *msdu, |
2202 | struct hal_rx_desc *rx_desc, |
2203 | struct ieee80211_rx_status *rx_status) |
2204 | { |
2205 | bool fill_crypto_hdr; |
2206 | struct ath12k_base *ab = ar->ab; |
2207 | struct ath12k_skb_rxcb *rxcb; |
2208 | enum hal_encrypt_type enctype; |
2209 | bool is_decrypted = false; |
2210 | struct ieee80211_hdr *hdr; |
2211 | struct ath12k_peer *peer; |
2212 | u32 err_bitmap; |
2213 | |
2214 | /* PN for multicast packets will be checked in mac80211 */ |
2215 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2216 | fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ab: ar->ab, desc: rx_desc); |
2217 | rxcb->is_mcbc = fill_crypto_hdr; |
2218 | |
2219 | if (rxcb->is_mcbc) |
2220 | rxcb->peer_id = ath12k_dp_rx_h_peer_id(ab: ar->ab, desc: rx_desc); |
2221 | |
2222 | spin_lock_bh(lock: &ar->ab->base_lock); |
2223 | peer = ath12k_dp_rx_h_find_peer(ab: ar->ab, msdu); |
2224 | if (peer) { |
2225 | if (rxcb->is_mcbc) |
2226 | enctype = peer->sec_type_grp; |
2227 | else |
2228 | enctype = peer->sec_type; |
2229 | } else { |
2230 | enctype = HAL_ENCRYPT_TYPE_OPEN; |
2231 | } |
2232 | spin_unlock_bh(lock: &ar->ab->base_lock); |
2233 | |
2234 | err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, desc: rx_desc); |
2235 | if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) |
2236 | is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, desc: rx_desc); |
2237 | |
2238 | /* Clear per-MPDU flags while leaving per-PPDU flags intact */ |
2239 | rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | |
2240 | RX_FLAG_MMIC_ERROR | |
2241 | RX_FLAG_DECRYPTED | |
2242 | RX_FLAG_IV_STRIPPED | |
2243 | RX_FLAG_MMIC_STRIPPED); |
2244 | |
2245 | if (err_bitmap & HAL_RX_MPDU_ERR_FCS) |
2246 | rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; |
2247 | if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) |
2248 | rx_status->flag |= RX_FLAG_MMIC_ERROR; |
2249 | |
2250 | if (is_decrypted) { |
2251 | rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; |
2252 | |
2253 | if (fill_crypto_hdr) |
2254 | rx_status->flag |= RX_FLAG_MIC_STRIPPED | |
2255 | RX_FLAG_ICV_STRIPPED; |
2256 | else |
2257 | rx_status->flag |= RX_FLAG_IV_STRIPPED | |
2258 | RX_FLAG_PN_VALIDATED; |
2259 | } |
2260 | |
2261 | ath12k_dp_rx_h_csum_offload(ar, msdu); |
2262 | ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, |
2263 | enctype, status: rx_status, decrypted: is_decrypted); |
2264 | |
2265 | if (!is_decrypted || fill_crypto_hdr) |
2266 | return; |
2267 | |
2268 | if (ath12k_dp_rx_h_decap_type(ab: ar->ab, desc: rx_desc) != |
2269 | DP_RX_DECAP_TYPE_ETHERNET2_DIX) { |
2270 | hdr = (void *)msdu->data; |
2271 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
2272 | } |
2273 | } |
2274 | |
2275 | static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc, |
2276 | struct ieee80211_rx_status *rx_status) |
2277 | { |
2278 | struct ath12k_base *ab = ar->ab; |
2279 | struct ieee80211_supported_band *sband; |
2280 | enum rx_msdu_start_pkt_type pkt_type; |
2281 | u8 bw; |
2282 | u8 rate_mcs, nss; |
2283 | u8 sgi; |
2284 | bool is_cck; |
2285 | |
2286 | pkt_type = ath12k_dp_rx_h_pkt_type(ab, desc: rx_desc); |
2287 | bw = ath12k_dp_rx_h_rx_bw(ab, desc: rx_desc); |
2288 | rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, desc: rx_desc); |
2289 | nss = ath12k_dp_rx_h_nss(ab, desc: rx_desc); |
2290 | sgi = ath12k_dp_rx_h_sgi(ab, desc: rx_desc); |
2291 | |
2292 | switch (pkt_type) { |
2293 | case RX_MSDU_START_PKT_TYPE_11A: |
2294 | case RX_MSDU_START_PKT_TYPE_11B: |
2295 | is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); |
2296 | sband = &ar->mac.sbands[rx_status->band]; |
2297 | rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, hw_rate: rate_mcs, |
2298 | cck: is_cck); |
2299 | break; |
2300 | case RX_MSDU_START_PKT_TYPE_11N: |
2301 | rx_status->encoding = RX_ENC_HT; |
2302 | if (rate_mcs > ATH12K_HT_MCS_MAX) { |
2303 | ath12k_warn(ab: ar->ab, |
2304 | fmt: "Received with invalid mcs in HT mode %d\n" , |
2305 | rate_mcs); |
2306 | break; |
2307 | } |
2308 | rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); |
2309 | if (sgi) |
2310 | rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; |
2311 | rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); |
2312 | break; |
2313 | case RX_MSDU_START_PKT_TYPE_11AC: |
2314 | rx_status->encoding = RX_ENC_VHT; |
2315 | rx_status->rate_idx = rate_mcs; |
2316 | if (rate_mcs > ATH12K_VHT_MCS_MAX) { |
2317 | ath12k_warn(ab: ar->ab, |
2318 | fmt: "Received with invalid mcs in VHT mode %d\n" , |
2319 | rate_mcs); |
2320 | break; |
2321 | } |
2322 | rx_status->nss = nss; |
2323 | if (sgi) |
2324 | rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; |
2325 | rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); |
2326 | break; |
2327 | case RX_MSDU_START_PKT_TYPE_11AX: |
2328 | rx_status->rate_idx = rate_mcs; |
2329 | if (rate_mcs > ATH12K_HE_MCS_MAX) { |
2330 | ath12k_warn(ab: ar->ab, |
2331 | fmt: "Received with invalid mcs in HE mode %d\n" , |
2332 | rate_mcs); |
2333 | break; |
2334 | } |
2335 | rx_status->encoding = RX_ENC_HE; |
2336 | rx_status->nss = nss; |
2337 | rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); |
2338 | rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); |
2339 | break; |
2340 | } |
2341 | } |
2342 | |
2343 | void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, |
2344 | struct ieee80211_rx_status *rx_status) |
2345 | { |
2346 | struct ath12k_base *ab = ar->ab; |
2347 | u8 channel_num; |
2348 | u32 center_freq, meta_data; |
2349 | struct ieee80211_channel *channel; |
2350 | |
2351 | rx_status->freq = 0; |
2352 | rx_status->rate_idx = 0; |
2353 | rx_status->nss = 0; |
2354 | rx_status->encoding = RX_ENC_LEGACY; |
2355 | rx_status->bw = RATE_INFO_BW_20; |
2356 | rx_status->enc_flags = 0; |
2357 | |
2358 | rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; |
2359 | |
2360 | meta_data = ath12k_dp_rx_h_freq(ab, desc: rx_desc); |
2361 | channel_num = meta_data; |
2362 | center_freq = meta_data >> 16; |
2363 | |
2364 | if (center_freq >= 5935 && center_freq <= 7105) { |
2365 | rx_status->band = NL80211_BAND_6GHZ; |
2366 | } else if (channel_num >= 1 && channel_num <= 14) { |
2367 | rx_status->band = NL80211_BAND_2GHZ; |
2368 | } else if (channel_num >= 36 && channel_num <= 173) { |
2369 | rx_status->band = NL80211_BAND_5GHZ; |
2370 | } else { |
2371 | spin_lock_bh(lock: &ar->data_lock); |
2372 | channel = ar->rx_channel; |
2373 | if (channel) { |
2374 | rx_status->band = channel->band; |
2375 | channel_num = |
2376 | ieee80211_frequency_to_channel(freq: channel->center_freq); |
2377 | } |
2378 | spin_unlock_bh(lock: &ar->data_lock); |
2379 | ath12k_dbg_dump(ab: ar->ab, mask: ATH12K_DBG_DATA, NULL, prefix: "rx_desc: " , |
2380 | buf: rx_desc, len: sizeof(*rx_desc)); |
2381 | } |
2382 | |
2383 | rx_status->freq = ieee80211_channel_to_frequency(chan: channel_num, |
2384 | band: rx_status->band); |
2385 | |
2386 | ath12k_dp_rx_h_rate(ar, rx_desc, rx_status); |
2387 | } |
2388 | |
2389 | static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, |
2390 | struct sk_buff *msdu, |
2391 | struct ieee80211_rx_status *status) |
2392 | { |
2393 | struct ath12k_base *ab = ar->ab; |
2394 | static const struct ieee80211_radiotap_he known = { |
2395 | .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | |
2396 | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), |
2397 | .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), |
2398 | }; |
2399 | struct ieee80211_radiotap_he *he; |
2400 | struct ieee80211_rx_status *rx_status; |
2401 | struct ieee80211_sta *pubsta; |
2402 | struct ath12k_peer *peer; |
2403 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2404 | u8 decap = DP_RX_DECAP_TYPE_RAW; |
2405 | bool is_mcbc = rxcb->is_mcbc; |
2406 | bool is_eapol = rxcb->is_eapol; |
2407 | |
2408 | if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && |
2409 | !(status->flag & RX_FLAG_SKIP_MONITOR)) { |
2410 | he = skb_push(skb: msdu, len: sizeof(known)); |
2411 | memcpy(he, &known, sizeof(known)); |
2412 | status->flag |= RX_FLAG_RADIOTAP_HE; |
2413 | } |
2414 | |
2415 | if (!(status->flag & RX_FLAG_ONLY_MONITOR)) |
2416 | decap = ath12k_dp_rx_h_decap_type(ab, desc: rxcb->rx_desc); |
2417 | |
2418 | spin_lock_bh(lock: &ab->base_lock); |
2419 | peer = ath12k_dp_rx_h_find_peer(ab, msdu); |
2420 | |
2421 | pubsta = peer ? peer->sta : NULL; |
2422 | |
2423 | spin_unlock_bh(lock: &ab->base_lock); |
2424 | |
2425 | ath12k_dbg(ab, ATH12K_DBG_DATA, |
2426 | "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n" , |
2427 | msdu, |
2428 | msdu->len, |
2429 | peer ? peer->addr : NULL, |
2430 | rxcb->tid, |
2431 | is_mcbc ? "mcast" : "ucast" , |
2432 | ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), |
2433 | (status->encoding == RX_ENC_LEGACY) ? "legacy" : "" , |
2434 | (status->encoding == RX_ENC_HT) ? "ht" : "" , |
2435 | (status->encoding == RX_ENC_VHT) ? "vht" : "" , |
2436 | (status->encoding == RX_ENC_HE) ? "he" : "" , |
2437 | (status->bw == RATE_INFO_BW_40) ? "40" : "" , |
2438 | (status->bw == RATE_INFO_BW_80) ? "80" : "" , |
2439 | (status->bw == RATE_INFO_BW_160) ? "160" : "" , |
2440 | (status->bw == RATE_INFO_BW_320) ? "320" : "" , |
2441 | status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "" , |
2442 | status->rate_idx, |
2443 | status->nss, |
2444 | status->freq, |
2445 | status->band, status->flag, |
2446 | !!(status->flag & RX_FLAG_FAILED_FCS_CRC), |
2447 | !!(status->flag & RX_FLAG_MMIC_ERROR), |
2448 | !!(status->flag & RX_FLAG_AMSDU_MORE)); |
2449 | |
2450 | ath12k_dbg_dump(ab, mask: ATH12K_DBG_DP_RX, NULL, prefix: "dp rx msdu: " , |
2451 | buf: msdu->data, len: msdu->len); |
2452 | |
2453 | rx_status = IEEE80211_SKB_RXCB(skb: msdu); |
2454 | *rx_status = *status; |
2455 | |
2456 | /* TODO: trace rx packet */ |
2457 | |
2458 | /* PN for multicast packets are not validate in HW, |
2459 | * so skip 802.3 rx path |
2460 | * Also, fast_rx expects the STA to be authorized, hence |
2461 | * eapol packets are sent in slow path. |
2462 | */ |
2463 | if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && |
2464 | !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) |
2465 | rx_status->flag |= RX_FLAG_8023; |
2466 | |
2467 | ieee80211_rx_napi(hw: ath12k_ar_to_hw(ar), sta: pubsta, skb: msdu, napi); |
2468 | } |
2469 | |
2470 | static int ath12k_dp_rx_process_msdu(struct ath12k *ar, |
2471 | struct sk_buff *msdu, |
2472 | struct sk_buff_head *msdu_list, |
2473 | struct ieee80211_rx_status *rx_status) |
2474 | { |
2475 | struct ath12k_base *ab = ar->ab; |
2476 | struct hal_rx_desc *rx_desc, *lrx_desc; |
2477 | struct ath12k_skb_rxcb *rxcb; |
2478 | struct sk_buff *last_buf; |
2479 | u8 l3_pad_bytes; |
2480 | u16 msdu_len; |
2481 | int ret; |
2482 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
2483 | |
2484 | last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, first: msdu); |
2485 | if (!last_buf) { |
2486 | ath12k_warn(ab, |
2487 | fmt: "No valid Rx buffer to access MSDU_END tlv\n" ); |
2488 | ret = -EIO; |
2489 | goto free_out; |
2490 | } |
2491 | |
2492 | rx_desc = (struct hal_rx_desc *)msdu->data; |
2493 | lrx_desc = (struct hal_rx_desc *)last_buf->data; |
2494 | if (!ath12k_dp_rx_h_msdu_done(ab, desc: lrx_desc)) { |
2495 | ath12k_warn(ab, fmt: "msdu_done bit in msdu_end is not set\n" ); |
2496 | ret = -EIO; |
2497 | goto free_out; |
2498 | } |
2499 | |
2500 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2501 | rxcb->rx_desc = rx_desc; |
2502 | msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc: lrx_desc); |
2503 | l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc: lrx_desc); |
2504 | |
2505 | if (rxcb->is_frag) { |
2506 | skb_pull(skb: msdu, len: hal_rx_desc_sz); |
2507 | } else if (!rxcb->is_continuation) { |
2508 | if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { |
2509 | ret = -EINVAL; |
2510 | ath12k_warn(ab, fmt: "invalid msdu len %u\n" , msdu_len); |
2511 | ath12k_dbg_dump(ab, mask: ATH12K_DBG_DATA, NULL, prefix: "" , buf: rx_desc, |
2512 | len: sizeof(*rx_desc)); |
2513 | goto free_out; |
2514 | } |
2515 | skb_put(skb: msdu, len: hal_rx_desc_sz + l3_pad_bytes + msdu_len); |
2516 | skb_pull(skb: msdu, len: hal_rx_desc_sz + l3_pad_bytes); |
2517 | } else { |
2518 | ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, |
2519 | first: msdu, last: last_buf, |
2520 | l3pad_bytes: l3_pad_bytes, msdu_len); |
2521 | if (ret) { |
2522 | ath12k_warn(ab, |
2523 | fmt: "failed to coalesce msdu rx buffer%d\n" , ret); |
2524 | goto free_out; |
2525 | } |
2526 | } |
2527 | |
2528 | ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status); |
2529 | ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); |
2530 | |
2531 | rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; |
2532 | |
2533 | return 0; |
2534 | |
2535 | free_out: |
2536 | return ret; |
2537 | } |
2538 | |
2539 | static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, |
2540 | struct napi_struct *napi, |
2541 | struct sk_buff_head *msdu_list, |
2542 | int ring_id) |
2543 | { |
2544 | struct ieee80211_rx_status rx_status = {0}; |
2545 | struct ath12k_skb_rxcb *rxcb; |
2546 | struct sk_buff *msdu; |
2547 | struct ath12k *ar; |
2548 | u8 mac_id, pdev_id; |
2549 | int ret; |
2550 | |
2551 | if (skb_queue_empty(list: msdu_list)) |
2552 | return; |
2553 | |
2554 | rcu_read_lock(); |
2555 | |
2556 | while ((msdu = __skb_dequeue(list: msdu_list))) { |
2557 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2558 | mac_id = rxcb->mac_id; |
2559 | pdev_id = ath12k_hw_mac_id_to_pdev_id(hw: ab->hw_params, mac_id); |
2560 | ar = ab->pdevs[pdev_id].ar; |
2561 | if (!rcu_dereference(ab->pdevs_active[pdev_id])) { |
2562 | dev_kfree_skb_any(skb: msdu); |
2563 | continue; |
2564 | } |
2565 | |
2566 | if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { |
2567 | dev_kfree_skb_any(skb: msdu); |
2568 | continue; |
2569 | } |
2570 | |
2571 | ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, rx_status: &rx_status); |
2572 | if (ret) { |
2573 | ath12k_dbg(ab, ATH12K_DBG_DATA, |
2574 | "Unable to process msdu %d" , ret); |
2575 | dev_kfree_skb_any(skb: msdu); |
2576 | continue; |
2577 | } |
2578 | |
2579 | ath12k_dp_rx_deliver_msdu(ar, napi, msdu, status: &rx_status); |
2580 | } |
2581 | |
2582 | rcu_read_unlock(); |
2583 | } |
2584 | |
2585 | int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, |
2586 | struct napi_struct *napi, int budget) |
2587 | { |
2588 | struct ath12k_rx_desc_info *desc_info; |
2589 | struct ath12k_dp *dp = &ab->dp; |
2590 | struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; |
2591 | struct hal_reo_dest_ring *desc; |
2592 | int num_buffs_reaped = 0; |
2593 | struct sk_buff_head msdu_list; |
2594 | struct ath12k_skb_rxcb *rxcb; |
2595 | int total_msdu_reaped = 0; |
2596 | struct hal_srng *srng; |
2597 | struct sk_buff *msdu; |
2598 | bool done = false; |
2599 | int mac_id; |
2600 | u64 desc_va; |
2601 | |
2602 | __skb_queue_head_init(list: &msdu_list); |
2603 | |
2604 | srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; |
2605 | |
2606 | spin_lock_bh(lock: &srng->lock); |
2607 | |
2608 | try_again: |
2609 | ath12k_hal_srng_access_begin(ab, srng); |
2610 | |
2611 | while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { |
2612 | enum hal_reo_dest_ring_push_reason push_reason; |
2613 | u32 cookie; |
2614 | |
2615 | cookie = le32_get_bits(v: desc->buf_addr_info.info1, |
2616 | BUFFER_ADDR_INFO1_SW_COOKIE); |
2617 | |
2618 | mac_id = le32_get_bits(v: desc->info0, |
2619 | HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); |
2620 | |
2621 | desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | |
2622 | le32_to_cpu(desc->buf_va_lo)); |
2623 | desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); |
2624 | |
2625 | /* retry manual desc retrieval */ |
2626 | if (!desc_info) { |
2627 | desc_info = ath12k_dp_get_rx_desc(ab, cookie); |
2628 | if (!desc_info) { |
2629 | ath12k_warn(ab, fmt: "Invalid cookie in manual desc retrieval" ); |
2630 | continue; |
2631 | } |
2632 | } |
2633 | |
2634 | if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) |
2635 | ath12k_warn(ab, fmt: "Check HW CC implementation" ); |
2636 | |
2637 | msdu = desc_info->skb; |
2638 | desc_info->skb = NULL; |
2639 | |
2640 | spin_lock_bh(lock: &dp->rx_desc_lock); |
2641 | list_move_tail(list: &desc_info->list, head: &dp->rx_desc_free_list); |
2642 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
2643 | |
2644 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
2645 | dma_unmap_single(ab->dev, rxcb->paddr, |
2646 | msdu->len + skb_tailroom(msdu), |
2647 | DMA_FROM_DEVICE); |
2648 | |
2649 | num_buffs_reaped++; |
2650 | |
2651 | push_reason = le32_get_bits(v: desc->info0, |
2652 | HAL_REO_DEST_RING_INFO0_PUSH_REASON); |
2653 | if (push_reason != |
2654 | HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { |
2655 | dev_kfree_skb_any(skb: msdu); |
2656 | ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; |
2657 | continue; |
2658 | } |
2659 | |
2660 | rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & |
2661 | RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); |
2662 | rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & |
2663 | RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); |
2664 | rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & |
2665 | RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); |
2666 | rxcb->mac_id = mac_id; |
2667 | rxcb->peer_id = le32_get_bits(v: desc->rx_mpdu_info.peer_meta_data, |
2668 | RX_MPDU_DESC_META_DATA_PEER_ID); |
2669 | rxcb->tid = le32_get_bits(v: desc->rx_mpdu_info.info0, |
2670 | RX_MPDU_DESC_INFO0_TID); |
2671 | |
2672 | __skb_queue_tail(list: &msdu_list, newsk: msdu); |
2673 | |
2674 | if (!rxcb->is_continuation) { |
2675 | total_msdu_reaped++; |
2676 | done = true; |
2677 | } else { |
2678 | done = false; |
2679 | } |
2680 | |
2681 | if (total_msdu_reaped >= budget) |
2682 | break; |
2683 | } |
2684 | |
2685 | /* Hw might have updated the head pointer after we cached it. |
2686 | * In this case, even though there are entries in the ring we'll |
2687 | * get rx_desc NULL. Give the read another try with updated cached |
2688 | * head pointer so that we can reap complete MPDU in the current |
2689 | * rx processing. |
2690 | */ |
2691 | if (!done && ath12k_hal_srng_dst_num_free(ab, srng, sync_hw_ptr: true)) { |
2692 | ath12k_hal_srng_access_end(ab, srng); |
2693 | goto try_again; |
2694 | } |
2695 | |
2696 | ath12k_hal_srng_access_end(ab, srng); |
2697 | |
2698 | spin_unlock_bh(lock: &srng->lock); |
2699 | |
2700 | if (!total_msdu_reaped) |
2701 | goto exit; |
2702 | |
2703 | ath12k_dp_rx_bufs_replenish(ab, rx_ring, req_entries: num_buffs_reaped); |
2704 | |
2705 | ath12k_dp_rx_process_received_packets(ab, napi, msdu_list: &msdu_list, |
2706 | ring_id); |
2707 | |
2708 | exit: |
2709 | return total_msdu_reaped; |
2710 | } |
2711 | |
2712 | static void ath12k_dp_rx_frag_timer(struct timer_list *timer) |
2713 | { |
2714 | struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); |
2715 | |
2716 | spin_lock_bh(lock: &rx_tid->ab->base_lock); |
2717 | if (rx_tid->last_frag_no && |
2718 | rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { |
2719 | spin_unlock_bh(lock: &rx_tid->ab->base_lock); |
2720 | return; |
2721 | } |
2722 | ath12k_dp_rx_frags_cleanup(rx_tid, rel_link_desc: true); |
2723 | spin_unlock_bh(lock: &rx_tid->ab->base_lock); |
2724 | } |
2725 | |
2726 | int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) |
2727 | { |
2728 | struct ath12k_base *ab = ar->ab; |
2729 | struct crypto_shash *tfm; |
2730 | struct ath12k_peer *peer; |
2731 | struct ath12k_dp_rx_tid *rx_tid; |
2732 | int i; |
2733 | |
2734 | tfm = crypto_alloc_shash(alg_name: "michael_mic" , type: 0, mask: 0); |
2735 | if (IS_ERR(ptr: tfm)) |
2736 | return PTR_ERR(ptr: tfm); |
2737 | |
2738 | spin_lock_bh(lock: &ab->base_lock); |
2739 | |
2740 | peer = ath12k_peer_find(ab, vdev_id, addr: peer_mac); |
2741 | if (!peer) { |
2742 | spin_unlock_bh(lock: &ab->base_lock); |
2743 | ath12k_warn(ab, fmt: "failed to find the peer to set up fragment info\n" ); |
2744 | return -ENOENT; |
2745 | } |
2746 | |
2747 | for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { |
2748 | rx_tid = &peer->rx_tid[i]; |
2749 | rx_tid->ab = ab; |
2750 | timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); |
2751 | skb_queue_head_init(list: &rx_tid->rx_frags); |
2752 | } |
2753 | |
2754 | peer->tfm_mmic = tfm; |
2755 | peer->dp_setup_done = true; |
2756 | spin_unlock_bh(lock: &ab->base_lock); |
2757 | |
2758 | return 0; |
2759 | } |
2760 | |
2761 | static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, |
2762 | struct ieee80211_hdr *hdr, u8 *data, |
2763 | size_t data_len, u8 *mic) |
2764 | { |
2765 | SHASH_DESC_ON_STACK(desc, tfm); |
2766 | u8 mic_hdr[16] = {0}; |
2767 | u8 tid = 0; |
2768 | int ret; |
2769 | |
2770 | if (!tfm) |
2771 | return -EINVAL; |
2772 | |
2773 | desc->tfm = tfm; |
2774 | |
2775 | ret = crypto_shash_setkey(tfm, key, keylen: 8); |
2776 | if (ret) |
2777 | goto out; |
2778 | |
2779 | ret = crypto_shash_init(desc); |
2780 | if (ret) |
2781 | goto out; |
2782 | |
2783 | /* TKIP MIC header */ |
2784 | memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); |
2785 | memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); |
2786 | if (ieee80211_is_data_qos(fc: hdr->frame_control)) |
2787 | tid = ieee80211_get_tid(hdr); |
2788 | mic_hdr[12] = tid; |
2789 | |
2790 | ret = crypto_shash_update(desc, data: mic_hdr, len: 16); |
2791 | if (ret) |
2792 | goto out; |
2793 | ret = crypto_shash_update(desc, data, len: data_len); |
2794 | if (ret) |
2795 | goto out; |
2796 | ret = crypto_shash_final(desc, out: mic); |
2797 | out: |
2798 | shash_desc_zero(desc); |
2799 | return ret; |
2800 | } |
2801 | |
2802 | static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, |
2803 | struct sk_buff *msdu) |
2804 | { |
2805 | struct ath12k_base *ab = ar->ab; |
2806 | struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; |
2807 | struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(skb: msdu); |
2808 | struct ieee80211_key_conf *key_conf; |
2809 | struct ieee80211_hdr *hdr; |
2810 | u8 mic[IEEE80211_CCMP_MIC_LEN]; |
2811 | int head_len, tail_len, ret; |
2812 | size_t data_len; |
2813 | u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
2814 | u8 *key, *data; |
2815 | u8 key_idx; |
2816 | |
2817 | if (ath12k_dp_rx_h_enctype(ab, desc: rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) |
2818 | return 0; |
2819 | |
2820 | hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); |
2821 | hdr_len = ieee80211_hdrlen(fc: hdr->frame_control); |
2822 | head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; |
2823 | tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; |
2824 | |
2825 | if (!is_multicast_ether_addr(addr: hdr->addr1)) |
2826 | key_idx = peer->ucast_keyidx; |
2827 | else |
2828 | key_idx = peer->mcast_keyidx; |
2829 | |
2830 | key_conf = peer->keys[key_idx]; |
2831 | |
2832 | data = msdu->data + head_len; |
2833 | data_len = msdu->len - head_len - tail_len; |
2834 | key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; |
2835 | |
2836 | ret = ath12k_dp_rx_h_michael_mic(tfm: peer->tfm_mmic, key, hdr, data, data_len, mic); |
2837 | if (ret || memcmp(p: mic, q: data + data_len, IEEE80211_CCMP_MIC_LEN)) |
2838 | goto mic_fail; |
2839 | |
2840 | return 0; |
2841 | |
2842 | mic_fail: |
2843 | (ATH12K_SKB_RXCB(skb: msdu))->is_first_msdu = true; |
2844 | (ATH12K_SKB_RXCB(skb: msdu))->is_last_msdu = true; |
2845 | |
2846 | rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | |
2847 | RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; |
2848 | skb_pull(skb: msdu, len: hal_rx_desc_sz); |
2849 | |
2850 | ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status: rxs); |
2851 | ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, |
2852 | enctype: HAL_ENCRYPT_TYPE_TKIP_MIC, status: rxs, decrypted: true); |
2853 | ieee80211_rx(hw: ath12k_ar_to_hw(ar), skb: msdu); |
2854 | return -EINVAL; |
2855 | } |
2856 | |
2857 | static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, |
2858 | enum hal_encrypt_type enctype, u32 flags) |
2859 | { |
2860 | struct ieee80211_hdr *hdr; |
2861 | size_t hdr_len; |
2862 | size_t crypto_len; |
2863 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
2864 | |
2865 | if (!flags) |
2866 | return; |
2867 | |
2868 | hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); |
2869 | |
2870 | if (flags & RX_FLAG_MIC_STRIPPED) |
2871 | skb_trim(skb: msdu, len: msdu->len - |
2872 | ath12k_dp_rx_crypto_mic_len(ar, enctype)); |
2873 | |
2874 | if (flags & RX_FLAG_ICV_STRIPPED) |
2875 | skb_trim(skb: msdu, len: msdu->len - |
2876 | ath12k_dp_rx_crypto_icv_len(ar, enctype)); |
2877 | |
2878 | if (flags & RX_FLAG_IV_STRIPPED) { |
2879 | hdr_len = ieee80211_hdrlen(fc: hdr->frame_control); |
2880 | crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); |
2881 | |
2882 | memmove(msdu->data + hal_rx_desc_sz + crypto_len, |
2883 | msdu->data + hal_rx_desc_sz, hdr_len); |
2884 | skb_pull(skb: msdu, len: crypto_len); |
2885 | } |
2886 | } |
2887 | |
2888 | static int ath12k_dp_rx_h_defrag(struct ath12k *ar, |
2889 | struct ath12k_peer *peer, |
2890 | struct ath12k_dp_rx_tid *rx_tid, |
2891 | struct sk_buff **defrag_skb) |
2892 | { |
2893 | struct ath12k_base *ab = ar->ab; |
2894 | struct hal_rx_desc *rx_desc; |
2895 | struct sk_buff *skb, *first_frag, *last_frag; |
2896 | struct ieee80211_hdr *hdr; |
2897 | enum hal_encrypt_type enctype; |
2898 | bool is_decrypted = false; |
2899 | int msdu_len = 0; |
2900 | int ; |
2901 | u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
2902 | |
2903 | first_frag = skb_peek(list_: &rx_tid->rx_frags); |
2904 | last_frag = skb_peek_tail(list_: &rx_tid->rx_frags); |
2905 | |
2906 | skb_queue_walk(&rx_tid->rx_frags, skb) { |
2907 | flags = 0; |
2908 | rx_desc = (struct hal_rx_desc *)skb->data; |
2909 | hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); |
2910 | |
2911 | enctype = ath12k_dp_rx_h_enctype(ab, desc: rx_desc); |
2912 | if (enctype != HAL_ENCRYPT_TYPE_OPEN) |
2913 | is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, |
2914 | desc: rx_desc); |
2915 | |
2916 | if (is_decrypted) { |
2917 | if (skb != first_frag) |
2918 | flags |= RX_FLAG_IV_STRIPPED; |
2919 | if (skb != last_frag) |
2920 | flags |= RX_FLAG_ICV_STRIPPED | |
2921 | RX_FLAG_MIC_STRIPPED; |
2922 | } |
2923 | |
2924 | /* RX fragments are always raw packets */ |
2925 | if (skb != last_frag) |
2926 | skb_trim(skb, len: skb->len - FCS_LEN); |
2927 | ath12k_dp_rx_h_undecap_frag(ar, msdu: skb, enctype, flags); |
2928 | |
2929 | if (skb != first_frag) |
2930 | skb_pull(skb, len: hal_rx_desc_sz + |
2931 | ieee80211_hdrlen(fc: hdr->frame_control)); |
2932 | msdu_len += skb->len; |
2933 | } |
2934 | |
2935 | extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(skb: first_frag)); |
2936 | if (extra_space > 0 && |
2937 | (pskb_expand_head(skb: first_frag, nhead: 0, ntail: extra_space, GFP_ATOMIC) < 0)) |
2938 | return -ENOMEM; |
2939 | |
2940 | __skb_unlink(skb: first_frag, list: &rx_tid->rx_frags); |
2941 | while ((skb = __skb_dequeue(list: &rx_tid->rx_frags))) { |
2942 | skb_put_data(skb: first_frag, data: skb->data, len: skb->len); |
2943 | dev_kfree_skb_any(skb); |
2944 | } |
2945 | |
2946 | hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); |
2947 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); |
2948 | ATH12K_SKB_RXCB(skb: first_frag)->is_frag = 1; |
2949 | |
2950 | if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, msdu: first_frag)) |
2951 | first_frag = NULL; |
2952 | |
2953 | *defrag_skb = first_frag; |
2954 | return 0; |
2955 | } |
2956 | |
2957 | static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, |
2958 | struct ath12k_dp_rx_tid *rx_tid, |
2959 | struct sk_buff *defrag_skb) |
2960 | { |
2961 | struct ath12k_base *ab = ar->ab; |
2962 | struct ath12k_dp *dp = &ab->dp; |
2963 | struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; |
2964 | struct hal_reo_entrance_ring *reo_ent_ring; |
2965 | struct hal_reo_dest_ring *reo_dest_ring; |
2966 | struct dp_link_desc_bank *link_desc_banks; |
2967 | struct hal_rx_msdu_link *msdu_link; |
2968 | struct hal_rx_msdu_details *msdu0; |
2969 | struct hal_srng *srng; |
2970 | dma_addr_t link_paddr, buf_paddr; |
2971 | u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; |
2972 | u32 cookie, hal_rx_desc_sz, dest_ring_info0; |
2973 | int ret; |
2974 | struct ath12k_rx_desc_info *desc_info; |
2975 | u8 dst_ind; |
2976 | |
2977 | hal_rx_desc_sz = ab->hal.hal_desc_sz; |
2978 | link_desc_banks = dp->link_desc_banks; |
2979 | reo_dest_ring = rx_tid->dst_ring_desc; |
2980 | |
2981 | ath12k_hal_rx_reo_ent_paddr_get(ab, buff_addr: &reo_dest_ring->buf_addr_info, |
2982 | paddr: &link_paddr, cookie: &cookie); |
2983 | desc_bank = u32_get_bits(v: cookie, DP_LINK_DESC_BANK_MASK); |
2984 | |
2985 | msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + |
2986 | (link_paddr - link_desc_banks[desc_bank].paddr)); |
2987 | msdu0 = &msdu_link->msdu_link[0]; |
2988 | msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); |
2989 | dst_ind = u32_get_bits(v: msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); |
2990 | |
2991 | memset(msdu0, 0, sizeof(*msdu0)); |
2992 | |
2993 | msdu_info = u32_encode_bits(v: 1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | |
2994 | u32_encode_bits(v: 1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | |
2995 | u32_encode_bits(v: 0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | |
2996 | u32_encode_bits(v: defrag_skb->len - hal_rx_desc_sz, |
2997 | RX_MSDU_DESC_INFO0_MSDU_LENGTH) | |
2998 | u32_encode_bits(v: 1, RX_MSDU_DESC_INFO0_VALID_SA) | |
2999 | u32_encode_bits(v: 1, RX_MSDU_DESC_INFO0_VALID_DA); |
3000 | msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); |
3001 | msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); |
3002 | |
3003 | /* change msdu len in hal rx desc */ |
3004 | ath12k_dp_rxdesc_set_msdu_len(ab, desc: rx_desc, len: defrag_skb->len - hal_rx_desc_sz); |
3005 | |
3006 | buf_paddr = dma_map_single(ab->dev, defrag_skb->data, |
3007 | defrag_skb->len + skb_tailroom(defrag_skb), |
3008 | DMA_FROM_DEVICE); |
3009 | if (dma_mapping_error(dev: ab->dev, dma_addr: buf_paddr)) |
3010 | return -ENOMEM; |
3011 | |
3012 | spin_lock_bh(lock: &dp->rx_desc_lock); |
3013 | desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, |
3014 | struct ath12k_rx_desc_info, |
3015 | list); |
3016 | if (!desc_info) { |
3017 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
3018 | ath12k_warn(ab, fmt: "failed to find rx desc for reinject\n" ); |
3019 | ret = -ENOMEM; |
3020 | goto err_unmap_dma; |
3021 | } |
3022 | |
3023 | desc_info->skb = defrag_skb; |
3024 | |
3025 | list_del(entry: &desc_info->list); |
3026 | list_add_tail(new: &desc_info->list, head: &dp->rx_desc_used_list); |
3027 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
3028 | |
3029 | ATH12K_SKB_RXCB(skb: defrag_skb)->paddr = buf_paddr; |
3030 | |
3031 | ath12k_hal_rx_buf_addr_info_set(binfo: &msdu0->buf_addr_info, paddr: buf_paddr, |
3032 | cookie: desc_info->cookie, |
3033 | manager: HAL_RX_BUF_RBM_SW3_BM); |
3034 | |
3035 | /* Fill mpdu details into reo entrance ring */ |
3036 | srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; |
3037 | |
3038 | spin_lock_bh(lock: &srng->lock); |
3039 | ath12k_hal_srng_access_begin(ab, srng); |
3040 | |
3041 | reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); |
3042 | if (!reo_ent_ring) { |
3043 | ath12k_hal_srng_access_end(ab, srng); |
3044 | spin_unlock_bh(lock: &srng->lock); |
3045 | ret = -ENOSPC; |
3046 | goto err_free_desc; |
3047 | } |
3048 | memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); |
3049 | |
3050 | ath12k_hal_rx_buf_addr_info_set(binfo: &reo_ent_ring->buf_addr_info, paddr: link_paddr, |
3051 | cookie, |
3052 | manager: HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST); |
3053 | |
3054 | mpdu_info = u32_encode_bits(v: 1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | |
3055 | u32_encode_bits(v: 0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | |
3056 | u32_encode_bits(v: 1, RX_MPDU_DESC_INFO0_RAW_MPDU) | |
3057 | u32_encode_bits(v: 1, RX_MPDU_DESC_INFO0_VALID_PN) | |
3058 | u32_encode_bits(v: rx_tid->tid, RX_MPDU_DESC_INFO0_TID); |
3059 | |
3060 | reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); |
3061 | reo_ent_ring->rx_mpdu_info.peer_meta_data = |
3062 | reo_dest_ring->rx_mpdu_info.peer_meta_data; |
3063 | |
3064 | /* Firmware expects physical address to be filled in queue_addr_lo in |
3065 | * the MLO scenario and in case of non MLO peer meta data needs to be |
3066 | * filled. |
3067 | * TODO: Need to handle for MLO scenario. |
3068 | */ |
3069 | reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; |
3070 | reo_ent_ring->info0 = le32_encode_bits(v: dst_ind, |
3071 | HAL_REO_ENTR_RING_INFO0_DEST_IND); |
3072 | |
3073 | reo_ent_ring->info1 = le32_encode_bits(v: rx_tid->cur_sn, |
3074 | HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); |
3075 | dest_ring_info0 = le32_get_bits(v: reo_dest_ring->info0, |
3076 | HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); |
3077 | reo_ent_ring->info2 = |
3078 | cpu_to_le32(u32_get_bits(dest_ring_info0, |
3079 | HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); |
3080 | |
3081 | ath12k_hal_srng_access_end(ab, srng); |
3082 | spin_unlock_bh(lock: &srng->lock); |
3083 | |
3084 | return 0; |
3085 | |
3086 | err_free_desc: |
3087 | spin_lock_bh(lock: &dp->rx_desc_lock); |
3088 | list_del(entry: &desc_info->list); |
3089 | list_add_tail(new: &desc_info->list, head: &dp->rx_desc_free_list); |
3090 | desc_info->skb = NULL; |
3091 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
3092 | err_unmap_dma: |
3093 | dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), |
3094 | DMA_FROM_DEVICE); |
3095 | return ret; |
3096 | } |
3097 | |
3098 | static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, |
3099 | struct sk_buff *a, struct sk_buff *b) |
3100 | { |
3101 | int frag1, frag2; |
3102 | |
3103 | frag1 = ath12k_dp_rx_h_frag_no(ab, skb: a); |
3104 | frag2 = ath12k_dp_rx_h_frag_no(ab, skb: b); |
3105 | |
3106 | return frag1 - frag2; |
3107 | } |
3108 | |
3109 | static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, |
3110 | struct sk_buff_head *frag_list, |
3111 | struct sk_buff *cur_frag) |
3112 | { |
3113 | struct sk_buff *skb; |
3114 | int cmp; |
3115 | |
3116 | skb_queue_walk(frag_list, skb) { |
3117 | cmp = ath12k_dp_rx_h_cmp_frags(ab, a: skb, b: cur_frag); |
3118 | if (cmp < 0) |
3119 | continue; |
3120 | __skb_queue_before(list: frag_list, next: skb, newsk: cur_frag); |
3121 | return; |
3122 | } |
3123 | __skb_queue_tail(list: frag_list, newsk: cur_frag); |
3124 | } |
3125 | |
3126 | static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) |
3127 | { |
3128 | struct ieee80211_hdr *hdr; |
3129 | u64 pn = 0; |
3130 | u8 *ehdr; |
3131 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
3132 | |
3133 | hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); |
3134 | ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(fc: hdr->frame_control); |
3135 | |
3136 | pn = ehdr[0]; |
3137 | pn |= (u64)ehdr[1] << 8; |
3138 | pn |= (u64)ehdr[4] << 16; |
3139 | pn |= (u64)ehdr[5] << 24; |
3140 | pn |= (u64)ehdr[6] << 32; |
3141 | pn |= (u64)ehdr[7] << 40; |
3142 | |
3143 | return pn; |
3144 | } |
3145 | |
3146 | static bool |
3147 | ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) |
3148 | { |
3149 | struct ath12k_base *ab = ar->ab; |
3150 | enum hal_encrypt_type encrypt_type; |
3151 | struct sk_buff *first_frag, *skb; |
3152 | struct hal_rx_desc *desc; |
3153 | u64 last_pn; |
3154 | u64 cur_pn; |
3155 | |
3156 | first_frag = skb_peek(list_: &rx_tid->rx_frags); |
3157 | desc = (struct hal_rx_desc *)first_frag->data; |
3158 | |
3159 | encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); |
3160 | if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && |
3161 | encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && |
3162 | encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && |
3163 | encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) |
3164 | return true; |
3165 | |
3166 | last_pn = ath12k_dp_rx_h_get_pn(ar, skb: first_frag); |
3167 | skb_queue_walk(&rx_tid->rx_frags, skb) { |
3168 | if (skb == first_frag) |
3169 | continue; |
3170 | |
3171 | cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); |
3172 | if (cur_pn != last_pn + 1) |
3173 | return false; |
3174 | last_pn = cur_pn; |
3175 | } |
3176 | return true; |
3177 | } |
3178 | |
3179 | static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, |
3180 | struct sk_buff *msdu, |
3181 | struct hal_reo_dest_ring *ring_desc) |
3182 | { |
3183 | struct ath12k_base *ab = ar->ab; |
3184 | struct hal_rx_desc *rx_desc; |
3185 | struct ath12k_peer *peer; |
3186 | struct ath12k_dp_rx_tid *rx_tid; |
3187 | struct sk_buff *defrag_skb = NULL; |
3188 | u32 peer_id; |
3189 | u16 seqno, frag_no; |
3190 | u8 tid; |
3191 | int ret = 0; |
3192 | bool more_frags; |
3193 | |
3194 | rx_desc = (struct hal_rx_desc *)msdu->data; |
3195 | peer_id = ath12k_dp_rx_h_peer_id(ab, desc: rx_desc); |
3196 | tid = ath12k_dp_rx_h_tid(ab, desc: rx_desc); |
3197 | seqno = ath12k_dp_rx_h_seq_no(ab, desc: rx_desc); |
3198 | frag_no = ath12k_dp_rx_h_frag_no(ab, skb: msdu); |
3199 | more_frags = ath12k_dp_rx_h_more_frags(ab, skb: msdu); |
3200 | |
3201 | if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, desc: rx_desc) || |
3202 | !ath12k_dp_rx_h_fc_valid(ab, desc: rx_desc) || |
3203 | tid > IEEE80211_NUM_TIDS) |
3204 | return -EINVAL; |
3205 | |
3206 | /* received unfragmented packet in reo |
3207 | * exception ring, this shouldn't happen |
3208 | * as these packets typically come from |
3209 | * reo2sw srngs. |
3210 | */ |
3211 | if (WARN_ON_ONCE(!frag_no && !more_frags)) |
3212 | return -EINVAL; |
3213 | |
3214 | spin_lock_bh(lock: &ab->base_lock); |
3215 | peer = ath12k_peer_find_by_id(ab, peer_id); |
3216 | if (!peer) { |
3217 | ath12k_warn(ab, fmt: "failed to find the peer to de-fragment received fragment peer_id %d\n" , |
3218 | peer_id); |
3219 | ret = -ENOENT; |
3220 | goto out_unlock; |
3221 | } |
3222 | |
3223 | if (!peer->dp_setup_done) { |
3224 | ath12k_warn(ab, fmt: "The peer %pM [%d] has uninitialized datapath\n" , |
3225 | peer->addr, peer_id); |
3226 | ret = -ENOENT; |
3227 | goto out_unlock; |
3228 | } |
3229 | |
3230 | rx_tid = &peer->rx_tid[tid]; |
3231 | |
3232 | if ((!skb_queue_empty(list: &rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || |
3233 | skb_queue_empty(list: &rx_tid->rx_frags)) { |
3234 | /* Flush stored fragments and start a new sequence */ |
3235 | ath12k_dp_rx_frags_cleanup(rx_tid, rel_link_desc: true); |
3236 | rx_tid->cur_sn = seqno; |
3237 | } |
3238 | |
3239 | if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { |
3240 | /* Fragment already present */ |
3241 | ret = -EINVAL; |
3242 | goto out_unlock; |
3243 | } |
3244 | |
3245 | if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(word: rx_tid->rx_frag_bitmap))) |
3246 | __skb_queue_tail(list: &rx_tid->rx_frags, newsk: msdu); |
3247 | else |
3248 | ath12k_dp_rx_h_sort_frags(ab, frag_list: &rx_tid->rx_frags, cur_frag: msdu); |
3249 | |
3250 | rx_tid->rx_frag_bitmap |= BIT(frag_no); |
3251 | if (!more_frags) |
3252 | rx_tid->last_frag_no = frag_no; |
3253 | |
3254 | if (frag_no == 0) { |
3255 | rx_tid->dst_ring_desc = kmemdup(p: ring_desc, |
3256 | size: sizeof(*rx_tid->dst_ring_desc), |
3257 | GFP_ATOMIC); |
3258 | if (!rx_tid->dst_ring_desc) { |
3259 | ret = -ENOMEM; |
3260 | goto out_unlock; |
3261 | } |
3262 | } else { |
3263 | ath12k_dp_rx_link_desc_return(ab, ring: ring_desc, |
3264 | action: HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); |
3265 | } |
3266 | |
3267 | if (!rx_tid->last_frag_no || |
3268 | rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { |
3269 | mod_timer(timer: &rx_tid->frag_timer, expires: jiffies + |
3270 | ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); |
3271 | goto out_unlock; |
3272 | } |
3273 | |
3274 | spin_unlock_bh(lock: &ab->base_lock); |
3275 | del_timer_sync(timer: &rx_tid->frag_timer); |
3276 | spin_lock_bh(lock: &ab->base_lock); |
3277 | |
3278 | peer = ath12k_peer_find_by_id(ab, peer_id); |
3279 | if (!peer) |
3280 | goto err_frags_cleanup; |
3281 | |
3282 | if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) |
3283 | goto err_frags_cleanup; |
3284 | |
3285 | if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, defrag_skb: &defrag_skb)) |
3286 | goto err_frags_cleanup; |
3287 | |
3288 | if (!defrag_skb) |
3289 | goto err_frags_cleanup; |
3290 | |
3291 | if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) |
3292 | goto err_frags_cleanup; |
3293 | |
3294 | ath12k_dp_rx_frags_cleanup(rx_tid, rel_link_desc: false); |
3295 | goto out_unlock; |
3296 | |
3297 | err_frags_cleanup: |
3298 | dev_kfree_skb_any(skb: defrag_skb); |
3299 | ath12k_dp_rx_frags_cleanup(rx_tid, rel_link_desc: true); |
3300 | out_unlock: |
3301 | spin_unlock_bh(lock: &ab->base_lock); |
3302 | return ret; |
3303 | } |
3304 | |
3305 | static int |
3306 | ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, |
3307 | bool drop, u32 cookie) |
3308 | { |
3309 | struct ath12k_base *ab = ar->ab; |
3310 | struct sk_buff *msdu; |
3311 | struct ath12k_skb_rxcb *rxcb; |
3312 | struct hal_rx_desc *rx_desc; |
3313 | u16 msdu_len; |
3314 | u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; |
3315 | struct ath12k_rx_desc_info *desc_info; |
3316 | u64 desc_va; |
3317 | |
3318 | desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | |
3319 | le32_to_cpu(desc->buf_va_lo)); |
3320 | desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); |
3321 | |
3322 | /* retry manual desc retrieval */ |
3323 | if (!desc_info) { |
3324 | desc_info = ath12k_dp_get_rx_desc(ab, cookie); |
3325 | if (!desc_info) { |
3326 | ath12k_warn(ab, fmt: "Invalid cookie in manual desc retrieval" ); |
3327 | return -EINVAL; |
3328 | } |
3329 | } |
3330 | |
3331 | if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) |
3332 | ath12k_warn(ab, fmt: " RX Exception, Check HW CC implementation" ); |
3333 | |
3334 | msdu = desc_info->skb; |
3335 | desc_info->skb = NULL; |
3336 | spin_lock_bh(lock: &ab->dp.rx_desc_lock); |
3337 | list_move_tail(list: &desc_info->list, head: &ab->dp.rx_desc_free_list); |
3338 | spin_unlock_bh(lock: &ab->dp.rx_desc_lock); |
3339 | |
3340 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3341 | dma_unmap_single(ar->ab->dev, rxcb->paddr, |
3342 | msdu->len + skb_tailroom(msdu), |
3343 | DMA_FROM_DEVICE); |
3344 | |
3345 | if (drop) { |
3346 | dev_kfree_skb_any(skb: msdu); |
3347 | return 0; |
3348 | } |
3349 | |
3350 | rcu_read_lock(); |
3351 | if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { |
3352 | dev_kfree_skb_any(skb: msdu); |
3353 | goto exit; |
3354 | } |
3355 | |
3356 | if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { |
3357 | dev_kfree_skb_any(skb: msdu); |
3358 | goto exit; |
3359 | } |
3360 | |
3361 | rx_desc = (struct hal_rx_desc *)msdu->data; |
3362 | msdu_len = ath12k_dp_rx_h_msdu_len(ab: ar->ab, desc: rx_desc); |
3363 | if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { |
3364 | ath12k_warn(ab: ar->ab, fmt: "invalid msdu leng %u" , msdu_len); |
3365 | ath12k_dbg_dump(ab: ar->ab, mask: ATH12K_DBG_DATA, NULL, prefix: "" , buf: rx_desc, |
3366 | len: sizeof(*rx_desc)); |
3367 | dev_kfree_skb_any(skb: msdu); |
3368 | goto exit; |
3369 | } |
3370 | |
3371 | skb_put(skb: msdu, len: hal_rx_desc_sz + msdu_len); |
3372 | |
3373 | if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc: desc)) { |
3374 | dev_kfree_skb_any(skb: msdu); |
3375 | ath12k_dp_rx_link_desc_return(ab: ar->ab, ring: desc, |
3376 | action: HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); |
3377 | } |
3378 | exit: |
3379 | rcu_read_unlock(); |
3380 | return 0; |
3381 | } |
3382 | |
3383 | int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, |
3384 | int budget) |
3385 | { |
3386 | u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; |
3387 | struct dp_link_desc_bank *link_desc_banks; |
3388 | enum hal_rx_buf_return_buf_manager rbm; |
3389 | struct hal_rx_msdu_link *link_desc_va; |
3390 | int tot_n_bufs_reaped, quota, ret, i; |
3391 | struct hal_reo_dest_ring *reo_desc; |
3392 | struct dp_rxdma_ring *rx_ring; |
3393 | struct dp_srng *reo_except; |
3394 | u32 desc_bank, num_msdus; |
3395 | struct hal_srng *srng; |
3396 | struct ath12k_dp *dp; |
3397 | int mac_id; |
3398 | struct ath12k *ar; |
3399 | dma_addr_t paddr; |
3400 | bool is_frag; |
3401 | bool drop = false; |
3402 | int pdev_id; |
3403 | |
3404 | tot_n_bufs_reaped = 0; |
3405 | quota = budget; |
3406 | |
3407 | dp = &ab->dp; |
3408 | reo_except = &dp->reo_except_ring; |
3409 | link_desc_banks = dp->link_desc_banks; |
3410 | |
3411 | srng = &ab->hal.srng_list[reo_except->ring_id]; |
3412 | |
3413 | spin_lock_bh(lock: &srng->lock); |
3414 | |
3415 | ath12k_hal_srng_access_begin(ab, srng); |
3416 | |
3417 | while (budget && |
3418 | (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { |
3419 | ab->soc_stats.err_ring_pkts++; |
3420 | ret = ath12k_hal_desc_reo_parse_err(ab, desc: reo_desc, paddr: &paddr, |
3421 | desc_bank: &desc_bank); |
3422 | if (ret) { |
3423 | ath12k_warn(ab, fmt: "failed to parse error reo desc %d\n" , |
3424 | ret); |
3425 | continue; |
3426 | } |
3427 | link_desc_va = link_desc_banks[desc_bank].vaddr + |
3428 | (paddr - link_desc_banks[desc_bank].paddr); |
3429 | ath12k_hal_rx_msdu_link_info_get(link: link_desc_va, num_msdus: &num_msdus, msdu_cookies, |
3430 | rbm: &rbm); |
3431 | if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST && |
3432 | rbm != HAL_RX_BUF_RBM_SW3_BM && |
3433 | rbm != ab->hw_params->hal_params->rx_buf_rbm) { |
3434 | ab->soc_stats.invalid_rbm++; |
3435 | ath12k_warn(ab, fmt: "invalid return buffer manager %d\n" , rbm); |
3436 | ath12k_dp_rx_link_desc_return(ab, ring: reo_desc, |
3437 | action: HAL_WBM_REL_BM_ACT_REL_MSDU); |
3438 | continue; |
3439 | } |
3440 | |
3441 | is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & |
3442 | RX_MPDU_DESC_INFO0_FRAG_FLAG); |
3443 | |
3444 | /* Process only rx fragments with one msdu per link desc below, and drop |
3445 | * msdu's indicated due to error reasons. |
3446 | */ |
3447 | if (!is_frag || num_msdus > 1) { |
3448 | drop = true; |
3449 | /* Return the link desc back to wbm idle list */ |
3450 | ath12k_dp_rx_link_desc_return(ab, ring: reo_desc, |
3451 | action: HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); |
3452 | } |
3453 | |
3454 | for (i = 0; i < num_msdus; i++) { |
3455 | mac_id = le32_get_bits(v: reo_desc->info0, |
3456 | HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); |
3457 | |
3458 | pdev_id = ath12k_hw_mac_id_to_pdev_id(hw: ab->hw_params, mac_id); |
3459 | ar = ab->pdevs[pdev_id].ar; |
3460 | |
3461 | if (!ath12k_dp_process_rx_err_buf(ar, desc: reo_desc, drop, |
3462 | cookie: msdu_cookies[i])) |
3463 | tot_n_bufs_reaped++; |
3464 | } |
3465 | |
3466 | if (tot_n_bufs_reaped >= quota) { |
3467 | tot_n_bufs_reaped = quota; |
3468 | goto exit; |
3469 | } |
3470 | |
3471 | budget = quota - tot_n_bufs_reaped; |
3472 | } |
3473 | |
3474 | exit: |
3475 | ath12k_hal_srng_access_end(ab, srng); |
3476 | |
3477 | spin_unlock_bh(lock: &srng->lock); |
3478 | |
3479 | rx_ring = &dp->rx_refill_buf_ring; |
3480 | |
3481 | ath12k_dp_rx_bufs_replenish(ab, rx_ring, req_entries: tot_n_bufs_reaped); |
3482 | |
3483 | return tot_n_bufs_reaped; |
3484 | } |
3485 | |
3486 | static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, |
3487 | int msdu_len, |
3488 | struct sk_buff_head *msdu_list) |
3489 | { |
3490 | struct sk_buff *skb, *tmp; |
3491 | struct ath12k_skb_rxcb *rxcb; |
3492 | int n_buffs; |
3493 | |
3494 | n_buffs = DIV_ROUND_UP(msdu_len, |
3495 | (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); |
3496 | |
3497 | skb_queue_walk_safe(msdu_list, skb, tmp) { |
3498 | rxcb = ATH12K_SKB_RXCB(skb); |
3499 | if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && |
3500 | rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { |
3501 | if (!n_buffs) |
3502 | break; |
3503 | __skb_unlink(skb, list: msdu_list); |
3504 | dev_kfree_skb_any(skb); |
3505 | n_buffs--; |
3506 | } |
3507 | } |
3508 | } |
3509 | |
3510 | static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, |
3511 | struct ieee80211_rx_status *status, |
3512 | struct sk_buff_head *msdu_list) |
3513 | { |
3514 | struct ath12k_base *ab = ar->ab; |
3515 | u16 msdu_len; |
3516 | struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; |
3517 | u8 l3pad_bytes; |
3518 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3519 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
3520 | |
3521 | msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); |
3522 | |
3523 | if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { |
3524 | /* First buffer will be freed by the caller, so deduct it's length */ |
3525 | msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); |
3526 | ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); |
3527 | return -EINVAL; |
3528 | } |
3529 | |
3530 | /* Even after cleaning up the sg buffers in the msdu list with above check |
3531 | * any msdu received with continuation flag needs to be dropped as invalid. |
3532 | * This protects against some random err frame with continuation flag. |
3533 | */ |
3534 | if (rxcb->is_continuation) |
3535 | return -EINVAL; |
3536 | |
3537 | if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { |
3538 | ath12k_warn(ab: ar->ab, |
3539 | fmt: "msdu_done bit not set in null_q_des processing\n" ); |
3540 | __skb_queue_purge(list: msdu_list); |
3541 | return -EIO; |
3542 | } |
3543 | |
3544 | /* Handle NULL queue descriptor violations arising out a missing |
3545 | * REO queue for a given peer or a given TID. This typically |
3546 | * may happen if a packet is received on a QOS enabled TID before the |
3547 | * ADDBA negotiation for that TID, when the TID queue is setup. Or |
3548 | * it may also happen for MC/BC frames if they are not routed to the |
3549 | * non-QOS TID queue, in the absence of any other default TID queue. |
3550 | * This error can show up both in a REO destination or WBM release ring. |
3551 | */ |
3552 | |
3553 | if (rxcb->is_frag) { |
3554 | skb_pull(skb: msdu, len: hal_rx_desc_sz); |
3555 | } else { |
3556 | l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); |
3557 | |
3558 | if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) |
3559 | return -EINVAL; |
3560 | |
3561 | skb_put(skb: msdu, len: hal_rx_desc_sz + l3pad_bytes + msdu_len); |
3562 | skb_pull(skb: msdu, len: hal_rx_desc_sz + l3pad_bytes); |
3563 | } |
3564 | ath12k_dp_rx_h_ppdu(ar, rx_desc: desc, rx_status: status); |
3565 | |
3566 | ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc: desc, rx_status: status); |
3567 | |
3568 | rxcb->tid = ath12k_dp_rx_h_tid(ab, desc); |
3569 | |
3570 | /* Please note that caller will having the access to msdu and completing |
3571 | * rx with mac80211. Need not worry about cleaning up amsdu_list. |
3572 | */ |
3573 | |
3574 | return 0; |
3575 | } |
3576 | |
3577 | static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, |
3578 | struct ieee80211_rx_status *status, |
3579 | struct sk_buff_head *msdu_list) |
3580 | { |
3581 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3582 | bool drop = false; |
3583 | |
3584 | ar->ab->soc_stats.reo_error[rxcb->err_code]++; |
3585 | |
3586 | switch (rxcb->err_code) { |
3587 | case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: |
3588 | if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) |
3589 | drop = true; |
3590 | break; |
3591 | case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: |
3592 | /* TODO: Do not drop PN failed packets in the driver; |
3593 | * instead, it is good to drop such packets in mac80211 |
3594 | * after incrementing the replay counters. |
3595 | */ |
3596 | fallthrough; |
3597 | default: |
3598 | /* TODO: Review other errors and process them to mac80211 |
3599 | * as appropriate. |
3600 | */ |
3601 | drop = true; |
3602 | break; |
3603 | } |
3604 | |
3605 | return drop; |
3606 | } |
3607 | |
3608 | static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, |
3609 | struct ieee80211_rx_status *status) |
3610 | { |
3611 | struct ath12k_base *ab = ar->ab; |
3612 | u16 msdu_len; |
3613 | struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; |
3614 | u8 l3pad_bytes; |
3615 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3616 | u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; |
3617 | |
3618 | rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); |
3619 | rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); |
3620 | |
3621 | l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); |
3622 | msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); |
3623 | skb_put(skb: msdu, len: hal_rx_desc_sz + l3pad_bytes + msdu_len); |
3624 | skb_pull(skb: msdu, len: hal_rx_desc_sz + l3pad_bytes); |
3625 | |
3626 | ath12k_dp_rx_h_ppdu(ar, rx_desc: desc, rx_status: status); |
3627 | |
3628 | status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | |
3629 | RX_FLAG_DECRYPTED); |
3630 | |
3631 | ath12k_dp_rx_h_undecap(ar, msdu, rx_desc: desc, |
3632 | enctype: HAL_ENCRYPT_TYPE_TKIP_MIC, status, decrypted: false); |
3633 | } |
3634 | |
3635 | static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, |
3636 | struct ieee80211_rx_status *status) |
3637 | { |
3638 | struct ath12k_base *ab = ar->ab; |
3639 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3640 | struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; |
3641 | bool drop = false; |
3642 | u32 err_bitmap; |
3643 | |
3644 | ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; |
3645 | |
3646 | switch (rxcb->err_code) { |
3647 | case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: |
3648 | case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: |
3649 | err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, desc: rx_desc); |
3650 | if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { |
3651 | ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); |
3652 | break; |
3653 | } |
3654 | fallthrough; |
3655 | default: |
3656 | /* TODO: Review other rxdma error code to check if anything is |
3657 | * worth reporting to mac80211 |
3658 | */ |
3659 | drop = true; |
3660 | break; |
3661 | } |
3662 | |
3663 | return drop; |
3664 | } |
3665 | |
3666 | static void ath12k_dp_rx_wbm_err(struct ath12k *ar, |
3667 | struct napi_struct *napi, |
3668 | struct sk_buff *msdu, |
3669 | struct sk_buff_head *msdu_list) |
3670 | { |
3671 | struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3672 | struct ieee80211_rx_status rxs = {0}; |
3673 | bool drop = true; |
3674 | |
3675 | switch (rxcb->err_rel_src) { |
3676 | case HAL_WBM_REL_SRC_MODULE_REO: |
3677 | drop = ath12k_dp_rx_h_reo_err(ar, msdu, status: &rxs, msdu_list); |
3678 | break; |
3679 | case HAL_WBM_REL_SRC_MODULE_RXDMA: |
3680 | drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, status: &rxs); |
3681 | break; |
3682 | default: |
3683 | /* msdu will get freed */ |
3684 | break; |
3685 | } |
3686 | |
3687 | if (drop) { |
3688 | dev_kfree_skb_any(skb: msdu); |
3689 | return; |
3690 | } |
3691 | |
3692 | ath12k_dp_rx_deliver_msdu(ar, napi, msdu, status: &rxs); |
3693 | } |
3694 | |
3695 | int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, |
3696 | struct napi_struct *napi, int budget) |
3697 | { |
3698 | struct ath12k *ar; |
3699 | struct ath12k_dp *dp = &ab->dp; |
3700 | struct dp_rxdma_ring *rx_ring; |
3701 | struct hal_rx_wbm_rel_info err_info; |
3702 | struct hal_srng *srng; |
3703 | struct sk_buff *msdu; |
3704 | struct sk_buff_head msdu_list; |
3705 | struct ath12k_skb_rxcb *rxcb; |
3706 | void *rx_desc; |
3707 | u8 mac_id; |
3708 | int num_buffs_reaped = 0; |
3709 | struct ath12k_rx_desc_info *desc_info; |
3710 | int ret, pdev_id; |
3711 | |
3712 | __skb_queue_head_init(list: &msdu_list); |
3713 | |
3714 | srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; |
3715 | rx_ring = &dp->rx_refill_buf_ring; |
3716 | |
3717 | spin_lock_bh(lock: &srng->lock); |
3718 | |
3719 | ath12k_hal_srng_access_begin(ab, srng); |
3720 | |
3721 | while (budget) { |
3722 | rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); |
3723 | if (!rx_desc) |
3724 | break; |
3725 | |
3726 | ret = ath12k_hal_wbm_desc_parse_err(ab, desc: rx_desc, rel_info: &err_info); |
3727 | if (ret) { |
3728 | ath12k_warn(ab, |
3729 | fmt: "failed to parse rx error in wbm_rel ring desc %d\n" , |
3730 | ret); |
3731 | continue; |
3732 | } |
3733 | |
3734 | desc_info = err_info.rx_desc; |
3735 | |
3736 | /* retry manual desc retrieval if hw cc is not done */ |
3737 | if (!desc_info) { |
3738 | desc_info = ath12k_dp_get_rx_desc(ab, cookie: err_info.cookie); |
3739 | if (!desc_info) { |
3740 | ath12k_warn(ab, fmt: "Invalid cookie in manual desc retrieval" ); |
3741 | continue; |
3742 | } |
3743 | } |
3744 | |
3745 | if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) |
3746 | ath12k_warn(ab, fmt: "WBM RX err, Check HW CC implementation" ); |
3747 | |
3748 | msdu = desc_info->skb; |
3749 | desc_info->skb = NULL; |
3750 | |
3751 | spin_lock_bh(lock: &dp->rx_desc_lock); |
3752 | list_move_tail(list: &desc_info->list, head: &dp->rx_desc_free_list); |
3753 | spin_unlock_bh(lock: &dp->rx_desc_lock); |
3754 | |
3755 | rxcb = ATH12K_SKB_RXCB(skb: msdu); |
3756 | dma_unmap_single(ab->dev, rxcb->paddr, |
3757 | msdu->len + skb_tailroom(msdu), |
3758 | DMA_FROM_DEVICE); |
3759 | |
3760 | num_buffs_reaped++; |
3761 | |
3762 | if (!err_info.continuation) |
3763 | budget--; |
3764 | |
3765 | if (err_info.push_reason != |
3766 | HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { |
3767 | dev_kfree_skb_any(skb: msdu); |
3768 | continue; |
3769 | } |
3770 | |
3771 | rxcb->err_rel_src = err_info.err_rel_src; |
3772 | rxcb->err_code = err_info.err_code; |
3773 | rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; |
3774 | |
3775 | __skb_queue_tail(list: &msdu_list, newsk: msdu); |
3776 | |
3777 | rxcb->is_first_msdu = err_info.first_msdu; |
3778 | rxcb->is_last_msdu = err_info.last_msdu; |
3779 | rxcb->is_continuation = err_info.continuation; |
3780 | } |
3781 | |
3782 | ath12k_hal_srng_access_end(ab, srng); |
3783 | |
3784 | spin_unlock_bh(lock: &srng->lock); |
3785 | |
3786 | if (!num_buffs_reaped) |
3787 | goto done; |
3788 | |
3789 | ath12k_dp_rx_bufs_replenish(ab, rx_ring, req_entries: num_buffs_reaped); |
3790 | |
3791 | rcu_read_lock(); |
3792 | while ((msdu = __skb_dequeue(list: &msdu_list))) { |
3793 | mac_id = ath12k_dp_rx_get_msdu_src_link(ab, |
3794 | desc: (struct hal_rx_desc *)msdu->data); |
3795 | pdev_id = ath12k_hw_mac_id_to_pdev_id(hw: ab->hw_params, mac_id); |
3796 | ar = ab->pdevs[pdev_id].ar; |
3797 | |
3798 | if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) { |
3799 | dev_kfree_skb_any(skb: msdu); |
3800 | continue; |
3801 | } |
3802 | |
3803 | if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { |
3804 | dev_kfree_skb_any(skb: msdu); |
3805 | continue; |
3806 | } |
3807 | ath12k_dp_rx_wbm_err(ar, napi, msdu, msdu_list: &msdu_list); |
3808 | } |
3809 | rcu_read_unlock(); |
3810 | done: |
3811 | return num_buffs_reaped; |
3812 | } |
3813 | |
3814 | void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) |
3815 | { |
3816 | struct ath12k_dp *dp = &ab->dp; |
3817 | struct hal_tlv_64_hdr *hdr; |
3818 | struct hal_srng *srng; |
3819 | struct ath12k_dp_rx_reo_cmd *cmd, *tmp; |
3820 | bool found = false; |
3821 | u16 tag; |
3822 | struct hal_reo_status reo_status; |
3823 | |
3824 | srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; |
3825 | |
3826 | memset(&reo_status, 0, sizeof(reo_status)); |
3827 | |
3828 | spin_lock_bh(lock: &srng->lock); |
3829 | |
3830 | ath12k_hal_srng_access_begin(ab, srng); |
3831 | |
3832 | while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { |
3833 | tag = u64_get_bits(v: hdr->tl, HAL_SRNG_TLV_HDR_TAG); |
3834 | |
3835 | switch (tag) { |
3836 | case HAL_REO_GET_QUEUE_STATS_STATUS: |
3837 | ath12k_hal_reo_status_queue_stats(ab, tlv: hdr, |
3838 | status: &reo_status); |
3839 | break; |
3840 | case HAL_REO_FLUSH_QUEUE_STATUS: |
3841 | ath12k_hal_reo_flush_queue_status(ab, tlv: hdr, |
3842 | status: &reo_status); |
3843 | break; |
3844 | case HAL_REO_FLUSH_CACHE_STATUS: |
3845 | ath12k_hal_reo_flush_cache_status(ab, tlv: hdr, |
3846 | status: &reo_status); |
3847 | break; |
3848 | case HAL_REO_UNBLOCK_CACHE_STATUS: |
3849 | ath12k_hal_reo_unblk_cache_status(ab, tlv: hdr, |
3850 | status: &reo_status); |
3851 | break; |
3852 | case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: |
3853 | ath12k_hal_reo_flush_timeout_list_status(ab, tlv: hdr, |
3854 | status: &reo_status); |
3855 | break; |
3856 | case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: |
3857 | ath12k_hal_reo_desc_thresh_reached_status(ab, tlv: hdr, |
3858 | status: &reo_status); |
3859 | break; |
3860 | case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: |
3861 | ath12k_hal_reo_update_rx_reo_queue_status(ab, tlv: hdr, |
3862 | status: &reo_status); |
3863 | break; |
3864 | default: |
3865 | ath12k_warn(ab, fmt: "Unknown reo status type %d\n" , tag); |
3866 | continue; |
3867 | } |
3868 | |
3869 | spin_lock_bh(lock: &dp->reo_cmd_lock); |
3870 | list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { |
3871 | if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { |
3872 | found = true; |
3873 | list_del(entry: &cmd->list); |
3874 | break; |
3875 | } |
3876 | } |
3877 | spin_unlock_bh(lock: &dp->reo_cmd_lock); |
3878 | |
3879 | if (found) { |
3880 | cmd->handler(dp, (void *)&cmd->data, |
3881 | reo_status.uniform_hdr.cmd_status); |
3882 | kfree(objp: cmd); |
3883 | } |
3884 | |
3885 | found = false; |
3886 | } |
3887 | |
3888 | ath12k_hal_srng_access_end(ab, srng); |
3889 | |
3890 | spin_unlock_bh(lock: &srng->lock); |
3891 | } |
3892 | |
3893 | void ath12k_dp_rx_free(struct ath12k_base *ab) |
3894 | { |
3895 | struct ath12k_dp *dp = &ab->dp; |
3896 | int i; |
3897 | |
3898 | ath12k_dp_srng_cleanup(ab, ring: &dp->rx_refill_buf_ring.refill_buf_ring); |
3899 | |
3900 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
3901 | if (ab->hw_params->rx_mac_buf_ring) |
3902 | ath12k_dp_srng_cleanup(ab, ring: &dp->rx_mac_buf_ring[i]); |
3903 | } |
3904 | |
3905 | for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) |
3906 | ath12k_dp_srng_cleanup(ab, ring: &dp->rxdma_err_dst_ring[i]); |
3907 | |
3908 | ath12k_dp_srng_cleanup(ab, ring: &dp->rxdma_mon_buf_ring.refill_buf_ring); |
3909 | ath12k_dp_srng_cleanup(ab, ring: &dp->tx_mon_buf_ring.refill_buf_ring); |
3910 | |
3911 | ath12k_dp_rxdma_buf_free(ab); |
3912 | } |
3913 | |
3914 | void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) |
3915 | { |
3916 | struct ath12k *ar = ab->pdevs[mac_id].ar; |
3917 | |
3918 | ath12k_dp_rx_pdev_srng_free(ar); |
3919 | } |
3920 | |
3921 | int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) |
3922 | { |
3923 | struct ath12k_dp *dp = &ab->dp; |
3924 | struct htt_rx_ring_tlv_filter tlv_filter = {0}; |
3925 | u32 ring_id; |
3926 | int ret; |
3927 | u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; |
3928 | |
3929 | ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; |
3930 | |
3931 | tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; |
3932 | tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; |
3933 | tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | |
3934 | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | |
3935 | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; |
3936 | tlv_filter.offset_valid = true; |
3937 | tlv_filter.rx_packet_offset = hal_rx_desc_sz; |
3938 | |
3939 | tlv_filter.rx_mpdu_start_offset = |
3940 | ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); |
3941 | tlv_filter.rx_msdu_end_offset = |
3942 | ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); |
3943 | |
3944 | if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { |
3945 | tlv_filter.rx_mpdu_start_wmask = |
3946 | ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); |
3947 | tlv_filter.rx_msdu_end_wmask = |
3948 | ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); |
3949 | ath12k_dbg(ab, ATH12K_DBG_DATA, |
3950 | "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n" , |
3951 | tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); |
3952 | } |
3953 | |
3954 | ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, mac_id: 0, |
3955 | ring_type: HAL_RXDMA_BUF, |
3956 | DP_RXDMA_REFILL_RING_SIZE, |
3957 | tlv_filter: &tlv_filter); |
3958 | |
3959 | return ret; |
3960 | } |
3961 | |
3962 | int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) |
3963 | { |
3964 | struct ath12k_dp *dp = &ab->dp; |
3965 | struct htt_rx_ring_tlv_filter tlv_filter = {0}; |
3966 | u32 ring_id; |
3967 | int ret; |
3968 | u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; |
3969 | int i; |
3970 | |
3971 | ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; |
3972 | |
3973 | tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; |
3974 | tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; |
3975 | tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | |
3976 | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | |
3977 | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; |
3978 | tlv_filter.offset_valid = true; |
3979 | tlv_filter.rx_packet_offset = hal_rx_desc_sz; |
3980 | |
3981 | tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); |
3982 | |
3983 | tlv_filter.rx_mpdu_start_offset = |
3984 | ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); |
3985 | tlv_filter.rx_msdu_end_offset = |
3986 | ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); |
3987 | |
3988 | /* TODO: Selectively subscribe to required qwords within msdu_end |
3989 | * and mpdu_start and setup the mask in below msg |
3990 | * and modify the rx_desc struct |
3991 | */ |
3992 | |
3993 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
3994 | ring_id = dp->rx_mac_buf_ring[i].ring_id; |
3995 | ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, mac_id: i, |
3996 | ring_type: HAL_RXDMA_BUF, |
3997 | DP_RXDMA_REFILL_RING_SIZE, |
3998 | tlv_filter: &tlv_filter); |
3999 | } |
4000 | |
4001 | return ret; |
4002 | } |
4003 | |
4004 | int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) |
4005 | { |
4006 | struct ath12k_dp *dp = &ab->dp; |
4007 | u32 ring_id; |
4008 | int i, ret; |
4009 | |
4010 | /* TODO: Need to verify the HTT setup for QCN9224 */ |
4011 | ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; |
4012 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, mac_id: 0, ring_type: HAL_RXDMA_BUF); |
4013 | if (ret) { |
4014 | ath12k_warn(ab, fmt: "failed to configure rx_refill_buf_ring %d\n" , |
4015 | ret); |
4016 | return ret; |
4017 | } |
4018 | |
4019 | if (ab->hw_params->rx_mac_buf_ring) { |
4020 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
4021 | ring_id = dp->rx_mac_buf_ring[i].ring_id; |
4022 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4023 | mac_id: i, ring_type: HAL_RXDMA_BUF); |
4024 | if (ret) { |
4025 | ath12k_warn(ab, fmt: "failed to configure rx_mac_buf_ring%d %d\n" , |
4026 | i, ret); |
4027 | return ret; |
4028 | } |
4029 | } |
4030 | } |
4031 | |
4032 | for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { |
4033 | ring_id = dp->rxdma_err_dst_ring[i].ring_id; |
4034 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4035 | mac_id: i, ring_type: HAL_RXDMA_DST); |
4036 | if (ret) { |
4037 | ath12k_warn(ab, fmt: "failed to configure rxdma_err_dest_ring%d %d\n" , |
4038 | i, ret); |
4039 | return ret; |
4040 | } |
4041 | } |
4042 | |
4043 | if (ab->hw_params->rxdma1_enable) { |
4044 | ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; |
4045 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4046 | mac_id: 0, ring_type: HAL_RXDMA_MONITOR_BUF); |
4047 | if (ret) { |
4048 | ath12k_warn(ab, fmt: "failed to configure rxdma_mon_buf_ring %d\n" , |
4049 | ret); |
4050 | return ret; |
4051 | } |
4052 | |
4053 | ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; |
4054 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4055 | mac_id: 0, ring_type: HAL_TX_MONITOR_BUF); |
4056 | if (ret) { |
4057 | ath12k_warn(ab, fmt: "failed to configure rxdma_mon_buf_ring %d\n" , |
4058 | ret); |
4059 | return ret; |
4060 | } |
4061 | } |
4062 | |
4063 | ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); |
4064 | if (ret) { |
4065 | ath12k_warn(ab, fmt: "failed to setup rxdma ring selection config\n" ); |
4066 | return ret; |
4067 | } |
4068 | |
4069 | return 0; |
4070 | } |
4071 | |
4072 | int ath12k_dp_rx_alloc(struct ath12k_base *ab) |
4073 | { |
4074 | struct ath12k_dp *dp = &ab->dp; |
4075 | int i, ret; |
4076 | |
4077 | idr_init(idr: &dp->rxdma_mon_buf_ring.bufs_idr); |
4078 | spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); |
4079 | |
4080 | idr_init(idr: &dp->tx_mon_buf_ring.bufs_idr); |
4081 | spin_lock_init(&dp->tx_mon_buf_ring.idr_lock); |
4082 | |
4083 | ret = ath12k_dp_srng_setup(ab, |
4084 | ring: &dp->rx_refill_buf_ring.refill_buf_ring, |
4085 | type: HAL_RXDMA_BUF, ring_num: 0, mac_id: 0, |
4086 | DP_RXDMA_BUF_RING_SIZE); |
4087 | if (ret) { |
4088 | ath12k_warn(ab, fmt: "failed to setup rx_refill_buf_ring\n" ); |
4089 | return ret; |
4090 | } |
4091 | |
4092 | if (ab->hw_params->rx_mac_buf_ring) { |
4093 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
4094 | ret = ath12k_dp_srng_setup(ab, |
4095 | ring: &dp->rx_mac_buf_ring[i], |
4096 | type: HAL_RXDMA_BUF, ring_num: 1, |
4097 | mac_id: i, DP_RX_MAC_BUF_RING_SIZE); |
4098 | if (ret) { |
4099 | ath12k_warn(ab, fmt: "failed to setup rx_mac_buf_ring %d\n" , |
4100 | i); |
4101 | return ret; |
4102 | } |
4103 | } |
4104 | } |
4105 | |
4106 | for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { |
4107 | ret = ath12k_dp_srng_setup(ab, ring: &dp->rxdma_err_dst_ring[i], |
4108 | type: HAL_RXDMA_DST, ring_num: 0, mac_id: i, |
4109 | DP_RXDMA_ERR_DST_RING_SIZE); |
4110 | if (ret) { |
4111 | ath12k_warn(ab, fmt: "failed to setup rxdma_err_dst_ring %d\n" , i); |
4112 | return ret; |
4113 | } |
4114 | } |
4115 | |
4116 | if (ab->hw_params->rxdma1_enable) { |
4117 | ret = ath12k_dp_srng_setup(ab, |
4118 | ring: &dp->rxdma_mon_buf_ring.refill_buf_ring, |
4119 | type: HAL_RXDMA_MONITOR_BUF, ring_num: 0, mac_id: 0, |
4120 | DP_RXDMA_MONITOR_BUF_RING_SIZE); |
4121 | if (ret) { |
4122 | ath12k_warn(ab, fmt: "failed to setup HAL_RXDMA_MONITOR_BUF\n" ); |
4123 | return ret; |
4124 | } |
4125 | |
4126 | ret = ath12k_dp_srng_setup(ab, |
4127 | ring: &dp->tx_mon_buf_ring.refill_buf_ring, |
4128 | type: HAL_TX_MONITOR_BUF, ring_num: 0, mac_id: 0, |
4129 | DP_TX_MONITOR_BUF_RING_SIZE); |
4130 | if (ret) { |
4131 | ath12k_warn(ab, fmt: "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n" ); |
4132 | return ret; |
4133 | } |
4134 | } |
4135 | |
4136 | ret = ath12k_dp_rxdma_buf_setup(ab); |
4137 | if (ret) { |
4138 | ath12k_warn(ab, fmt: "failed to setup rxdma ring\n" ); |
4139 | return ret; |
4140 | } |
4141 | |
4142 | return 0; |
4143 | } |
4144 | |
4145 | int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) |
4146 | { |
4147 | struct ath12k *ar = ab->pdevs[mac_id].ar; |
4148 | struct ath12k_pdev_dp *dp = &ar->dp; |
4149 | u32 ring_id; |
4150 | int i; |
4151 | int ret; |
4152 | |
4153 | if (!ab->hw_params->rxdma1_enable) |
4154 | goto out; |
4155 | |
4156 | ret = ath12k_dp_rx_pdev_srng_alloc(ar); |
4157 | if (ret) { |
4158 | ath12k_warn(ab, fmt: "failed to setup rx srngs\n" ); |
4159 | return ret; |
4160 | } |
4161 | |
4162 | for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { |
4163 | ring_id = dp->rxdma_mon_dst_ring[i].ring_id; |
4164 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4165 | mac_id: mac_id + i, |
4166 | ring_type: HAL_RXDMA_MONITOR_DST); |
4167 | if (ret) { |
4168 | ath12k_warn(ab, |
4169 | fmt: "failed to configure rxdma_mon_dst_ring %d %d\n" , |
4170 | i, ret); |
4171 | return ret; |
4172 | } |
4173 | |
4174 | ring_id = dp->tx_mon_dst_ring[i].ring_id; |
4175 | ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, |
4176 | mac_id: mac_id + i, |
4177 | ring_type: HAL_TX_MONITOR_DST); |
4178 | if (ret) { |
4179 | ath12k_warn(ab, |
4180 | fmt: "failed to configure tx_mon_dst_ring %d %d\n" , |
4181 | i, ret); |
4182 | return ret; |
4183 | } |
4184 | } |
4185 | out: |
4186 | return 0; |
4187 | } |
4188 | |
4189 | static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) |
4190 | { |
4191 | struct ath12k_pdev_dp *dp = &ar->dp; |
4192 | struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; |
4193 | |
4194 | skb_queue_head_init(list: &pmon->rx_status_q); |
4195 | |
4196 | pmon->mon_ppdu_status = DP_PPDU_STATUS_START; |
4197 | |
4198 | memset(&pmon->rx_mon_stats, 0, |
4199 | sizeof(pmon->rx_mon_stats)); |
4200 | return 0; |
4201 | } |
4202 | |
4203 | int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) |
4204 | { |
4205 | struct ath12k_pdev_dp *dp = &ar->dp; |
4206 | struct ath12k_mon_data *pmon = &dp->mon_data; |
4207 | int ret = 0; |
4208 | |
4209 | ret = ath12k_dp_rx_pdev_mon_status_attach(ar); |
4210 | if (ret) { |
4211 | ath12k_warn(ab: ar->ab, fmt: "pdev_mon_status_attach() failed" ); |
4212 | return ret; |
4213 | } |
4214 | |
4215 | /* if rxdma1_enable is false, no need to setup |
4216 | * rxdma_mon_desc_ring. |
4217 | */ |
4218 | if (!ar->ab->hw_params->rxdma1_enable) |
4219 | return 0; |
4220 | |
4221 | pmon->mon_last_linkdesc_paddr = 0; |
4222 | pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; |
4223 | spin_lock_init(&pmon->mon_lock); |
4224 | |
4225 | return 0; |
4226 | } |
4227 | |
4228 | int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab) |
4229 | { |
4230 | /* start reap timer */ |
4231 | mod_timer(timer: &ab->mon_reap_timer, |
4232 | expires: jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); |
4233 | |
4234 | return 0; |
4235 | } |
4236 | |
4237 | int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer) |
4238 | { |
4239 | int ret; |
4240 | |
4241 | if (stop_timer) |
4242 | del_timer_sync(timer: &ab->mon_reap_timer); |
4243 | |
4244 | /* reap all the monitor related rings */ |
4245 | ret = ath12k_dp_purge_mon_ring(ab); |
4246 | if (ret) { |
4247 | ath12k_warn(ab, fmt: "failed to purge dp mon ring: %d\n" , ret); |
4248 | return ret; |
4249 | } |
4250 | |
4251 | return 0; |
4252 | } |
4253 | |