1 | // SPDX-License-Identifier: ISC |
2 | /* |
3 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
4 | * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. |
5 | * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/etherdevice.h> |
9 | #include "htt.h" |
10 | #include "mac.h" |
11 | #include "hif.h" |
12 | #include "txrx.h" |
13 | #include "debug.h" |
14 | |
15 | static u8 ath10k_htt_tx_txq_calc_size(size_t count) |
16 | { |
17 | int exp; |
18 | int factor; |
19 | |
20 | exp = 0; |
21 | factor = count >> 7; |
22 | |
23 | while (factor >= 64 && exp < 4) { |
24 | factor >>= 3; |
25 | exp++; |
26 | } |
27 | |
28 | if (exp == 4) |
29 | return 0xff; |
30 | |
31 | if (count > 0) |
32 | factor = max(1, factor); |
33 | |
34 | return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | |
35 | SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); |
36 | } |
37 | |
38 | static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, |
39 | struct ieee80211_txq *txq) |
40 | { |
41 | struct ath10k *ar = hw->priv; |
42 | struct ath10k_sta *arsta; |
43 | struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; |
44 | unsigned long byte_cnt; |
45 | int idx; |
46 | u32 bit; |
47 | u16 peer_id; |
48 | u8 tid; |
49 | u8 count; |
50 | |
51 | lockdep_assert_held(&ar->htt.tx_lock); |
52 | |
53 | if (!ar->htt.tx_q_state.enabled) |
54 | return; |
55 | |
56 | if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) |
57 | return; |
58 | |
59 | if (txq->sta) { |
60 | arsta = (void *)txq->sta->drv_priv; |
61 | peer_id = arsta->peer_id; |
62 | } else { |
63 | peer_id = arvif->peer_id; |
64 | } |
65 | |
66 | tid = txq->tid; |
67 | bit = BIT(peer_id % 32); |
68 | idx = peer_id / 32; |
69 | |
70 | ieee80211_txq_get_depth(txq, NULL, byte_cnt: &byte_cnt); |
71 | count = ath10k_htt_tx_txq_calc_size(count: byte_cnt); |
72 | |
73 | if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || |
74 | unlikely(tid >= ar->htt.tx_q_state.num_tids)) { |
75 | ath10k_warn(ar, fmt: "refusing to update txq for peer_id %u tid %u due to out of bounds\n" , |
76 | peer_id, tid); |
77 | return; |
78 | } |
79 | |
80 | ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; |
81 | ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; |
82 | ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; |
83 | |
84 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n" , |
85 | peer_id, tid, count); |
86 | } |
87 | |
88 | static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) |
89 | { |
90 | u32 seq; |
91 | size_t size; |
92 | |
93 | lockdep_assert_held(&ar->htt.tx_lock); |
94 | |
95 | if (!ar->htt.tx_q_state.enabled) |
96 | return; |
97 | |
98 | if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) |
99 | return; |
100 | |
101 | seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); |
102 | seq++; |
103 | ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); |
104 | |
105 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n" , |
106 | seq); |
107 | |
108 | size = sizeof(*ar->htt.tx_q_state.vaddr); |
109 | dma_sync_single_for_device(dev: ar->dev, |
110 | addr: ar->htt.tx_q_state.paddr, |
111 | size, |
112 | dir: DMA_TO_DEVICE); |
113 | } |
114 | |
115 | void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, |
116 | struct ieee80211_txq *txq) |
117 | { |
118 | struct ath10k *ar = hw->priv; |
119 | |
120 | spin_lock_bh(lock: &ar->htt.tx_lock); |
121 | __ath10k_htt_tx_txq_recalc(hw, txq); |
122 | spin_unlock_bh(lock: &ar->htt.tx_lock); |
123 | } |
124 | |
125 | void ath10k_htt_tx_txq_sync(struct ath10k *ar) |
126 | { |
127 | spin_lock_bh(lock: &ar->htt.tx_lock); |
128 | __ath10k_htt_tx_txq_sync(ar); |
129 | spin_unlock_bh(lock: &ar->htt.tx_lock); |
130 | } |
131 | |
132 | void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, |
133 | struct ieee80211_txq *txq) |
134 | { |
135 | struct ath10k *ar = hw->priv; |
136 | |
137 | spin_lock_bh(lock: &ar->htt.tx_lock); |
138 | __ath10k_htt_tx_txq_recalc(hw, txq); |
139 | __ath10k_htt_tx_txq_sync(ar); |
140 | spin_unlock_bh(lock: &ar->htt.tx_lock); |
141 | } |
142 | |
143 | void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) |
144 | { |
145 | lockdep_assert_held(&htt->tx_lock); |
146 | |
147 | htt->num_pending_tx--; |
148 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) |
149 | ath10k_mac_tx_unlock(ar: htt->ar, reason: ATH10K_TX_PAUSE_Q_FULL); |
150 | |
151 | if (htt->num_pending_tx == 0) |
152 | wake_up(&htt->empty_tx_wq); |
153 | } |
154 | |
155 | int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) |
156 | { |
157 | lockdep_assert_held(&htt->tx_lock); |
158 | |
159 | if (htt->num_pending_tx >= htt->max_num_pending_tx) |
160 | return -EBUSY; |
161 | |
162 | htt->num_pending_tx++; |
163 | if (htt->num_pending_tx == htt->max_num_pending_tx) |
164 | ath10k_mac_tx_lock(ar: htt->ar, reason: ATH10K_TX_PAUSE_Q_FULL); |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, |
170 | bool is_presp) |
171 | { |
172 | struct ath10k *ar = htt->ar; |
173 | |
174 | lockdep_assert_held(&htt->tx_lock); |
175 | |
176 | if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) |
177 | return 0; |
178 | |
179 | if (is_presp && |
180 | ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) |
181 | return -EBUSY; |
182 | |
183 | htt->num_pending_mgmt_tx++; |
184 | |
185 | return 0; |
186 | } |
187 | |
188 | void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) |
189 | { |
190 | lockdep_assert_held(&htt->tx_lock); |
191 | |
192 | if (!htt->ar->hw_params.max_probe_resp_desc_thres) |
193 | return; |
194 | |
195 | htt->num_pending_mgmt_tx--; |
196 | } |
197 | |
198 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
199 | { |
200 | struct ath10k *ar = htt->ar; |
201 | int ret; |
202 | |
203 | spin_lock_bh(lock: &htt->tx_lock); |
204 | ret = idr_alloc(&htt->pending_tx, ptr: skb, start: 0, |
205 | end: htt->max_num_pending_tx, GFP_ATOMIC); |
206 | spin_unlock_bh(lock: &htt->tx_lock); |
207 | |
208 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n" , ret); |
209 | |
210 | return ret; |
211 | } |
212 | |
213 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) |
214 | { |
215 | struct ath10k *ar = htt->ar; |
216 | |
217 | lockdep_assert_held(&htt->tx_lock); |
218 | |
219 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n" , msdu_id); |
220 | |
221 | idr_remove(&htt->pending_tx, id: msdu_id); |
222 | } |
223 | |
224 | static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) |
225 | { |
226 | struct ath10k *ar = htt->ar; |
227 | size_t size; |
228 | |
229 | if (!htt->txbuf.vaddr_txbuff_32) |
230 | return; |
231 | |
232 | size = htt->txbuf.size; |
233 | dma_free_coherent(dev: ar->dev, size, cpu_addr: htt->txbuf.vaddr_txbuff_32, |
234 | dma_handle: htt->txbuf.paddr); |
235 | htt->txbuf.vaddr_txbuff_32 = NULL; |
236 | } |
237 | |
238 | static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) |
239 | { |
240 | struct ath10k *ar = htt->ar; |
241 | size_t size; |
242 | |
243 | size = htt->max_num_pending_tx * |
244 | sizeof(struct ath10k_htt_txbuf_32); |
245 | |
246 | htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(dev: ar->dev, size, |
247 | dma_handle: &htt->txbuf.paddr, |
248 | GFP_KERNEL); |
249 | if (!htt->txbuf.vaddr_txbuff_32) |
250 | return -ENOMEM; |
251 | |
252 | htt->txbuf.size = size; |
253 | |
254 | return 0; |
255 | } |
256 | |
257 | static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) |
258 | { |
259 | struct ath10k *ar = htt->ar; |
260 | size_t size; |
261 | |
262 | if (!htt->txbuf.vaddr_txbuff_64) |
263 | return; |
264 | |
265 | size = htt->txbuf.size; |
266 | dma_free_coherent(dev: ar->dev, size, cpu_addr: htt->txbuf.vaddr_txbuff_64, |
267 | dma_handle: htt->txbuf.paddr); |
268 | htt->txbuf.vaddr_txbuff_64 = NULL; |
269 | } |
270 | |
271 | static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) |
272 | { |
273 | struct ath10k *ar = htt->ar; |
274 | size_t size; |
275 | |
276 | size = htt->max_num_pending_tx * |
277 | sizeof(struct ath10k_htt_txbuf_64); |
278 | |
279 | htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(dev: ar->dev, size, |
280 | dma_handle: &htt->txbuf.paddr, |
281 | GFP_KERNEL); |
282 | if (!htt->txbuf.vaddr_txbuff_64) |
283 | return -ENOMEM; |
284 | |
285 | htt->txbuf.size = size; |
286 | |
287 | return 0; |
288 | } |
289 | |
290 | static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) |
291 | { |
292 | size_t size; |
293 | |
294 | if (!htt->frag_desc.vaddr_desc_32) |
295 | return; |
296 | |
297 | size = htt->max_num_pending_tx * |
298 | sizeof(struct htt_msdu_ext_desc); |
299 | |
300 | dma_free_coherent(dev: htt->ar->dev, |
301 | size, |
302 | cpu_addr: htt->frag_desc.vaddr_desc_32, |
303 | dma_handle: htt->frag_desc.paddr); |
304 | |
305 | htt->frag_desc.vaddr_desc_32 = NULL; |
306 | } |
307 | |
308 | static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) |
309 | { |
310 | struct ath10k *ar = htt->ar; |
311 | size_t size; |
312 | |
313 | if (!ar->hw_params.continuous_frag_desc) |
314 | return 0; |
315 | |
316 | size = htt->max_num_pending_tx * |
317 | sizeof(struct htt_msdu_ext_desc); |
318 | htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(dev: ar->dev, size, |
319 | dma_handle: &htt->frag_desc.paddr, |
320 | GFP_KERNEL); |
321 | if (!htt->frag_desc.vaddr_desc_32) { |
322 | ath10k_err(ar, fmt: "failed to alloc fragment desc memory\n" ); |
323 | return -ENOMEM; |
324 | } |
325 | htt->frag_desc.size = size; |
326 | |
327 | return 0; |
328 | } |
329 | |
330 | static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) |
331 | { |
332 | size_t size; |
333 | |
334 | if (!htt->frag_desc.vaddr_desc_64) |
335 | return; |
336 | |
337 | size = htt->max_num_pending_tx * |
338 | sizeof(struct htt_msdu_ext_desc_64); |
339 | |
340 | dma_free_coherent(dev: htt->ar->dev, |
341 | size, |
342 | cpu_addr: htt->frag_desc.vaddr_desc_64, |
343 | dma_handle: htt->frag_desc.paddr); |
344 | |
345 | htt->frag_desc.vaddr_desc_64 = NULL; |
346 | } |
347 | |
348 | static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) |
349 | { |
350 | struct ath10k *ar = htt->ar; |
351 | size_t size; |
352 | |
353 | if (!ar->hw_params.continuous_frag_desc) |
354 | return 0; |
355 | |
356 | size = htt->max_num_pending_tx * |
357 | sizeof(struct htt_msdu_ext_desc_64); |
358 | |
359 | htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(dev: ar->dev, size, |
360 | dma_handle: &htt->frag_desc.paddr, |
361 | GFP_KERNEL); |
362 | if (!htt->frag_desc.vaddr_desc_64) { |
363 | ath10k_err(ar, fmt: "failed to alloc fragment desc memory\n" ); |
364 | return -ENOMEM; |
365 | } |
366 | htt->frag_desc.size = size; |
367 | |
368 | return 0; |
369 | } |
370 | |
371 | static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) |
372 | { |
373 | struct ath10k *ar = htt->ar; |
374 | size_t size; |
375 | |
376 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
377 | ar->running_fw->fw_file.fw_features)) |
378 | return; |
379 | |
380 | size = sizeof(*htt->tx_q_state.vaddr); |
381 | |
382 | dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); |
383 | kfree(objp: htt->tx_q_state.vaddr); |
384 | } |
385 | |
386 | static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) |
387 | { |
388 | struct ath10k *ar = htt->ar; |
389 | size_t size; |
390 | int ret; |
391 | |
392 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
393 | ar->running_fw->fw_file.fw_features)) |
394 | return 0; |
395 | |
396 | htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; |
397 | htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; |
398 | htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; |
399 | |
400 | size = sizeof(*htt->tx_q_state.vaddr); |
401 | htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); |
402 | if (!htt->tx_q_state.vaddr) |
403 | return -ENOMEM; |
404 | |
405 | htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, |
406 | size, DMA_TO_DEVICE); |
407 | ret = dma_mapping_error(dev: ar->dev, dma_addr: htt->tx_q_state.paddr); |
408 | if (ret) { |
409 | ath10k_warn(ar, fmt: "failed to dma map tx_q_state: %d\n" , ret); |
410 | kfree(objp: htt->tx_q_state.vaddr); |
411 | return -EIO; |
412 | } |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) |
418 | { |
419 | WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); |
420 | kfifo_free(&htt->txdone_fifo); |
421 | } |
422 | |
423 | static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) |
424 | { |
425 | int ret; |
426 | size_t size; |
427 | |
428 | size = roundup_pow_of_two(htt->max_num_pending_tx); |
429 | ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); |
430 | return ret; |
431 | } |
432 | |
433 | static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) |
434 | { |
435 | struct ath10k *ar = htt->ar; |
436 | int ret; |
437 | |
438 | ret = ath10k_htt_alloc_txbuff(htt); |
439 | if (ret) { |
440 | ath10k_err(ar, fmt: "failed to alloc cont tx buffer: %d\n" , ret); |
441 | return ret; |
442 | } |
443 | |
444 | ret = ath10k_htt_alloc_frag_desc(htt); |
445 | if (ret) { |
446 | ath10k_err(ar, fmt: "failed to alloc cont frag desc: %d\n" , ret); |
447 | goto free_txbuf; |
448 | } |
449 | |
450 | ret = ath10k_htt_tx_alloc_txq(htt); |
451 | if (ret) { |
452 | ath10k_err(ar, fmt: "failed to alloc txq: %d\n" , ret); |
453 | goto free_frag_desc; |
454 | } |
455 | |
456 | ret = ath10k_htt_tx_alloc_txdone_fifo(htt); |
457 | if (ret) { |
458 | ath10k_err(ar, fmt: "failed to alloc txdone fifo: %d\n" , ret); |
459 | goto free_txq; |
460 | } |
461 | |
462 | return 0; |
463 | |
464 | free_txq: |
465 | ath10k_htt_tx_free_txq(htt); |
466 | |
467 | free_frag_desc: |
468 | ath10k_htt_free_frag_desc(htt); |
469 | |
470 | free_txbuf: |
471 | ath10k_htt_free_txbuff(htt); |
472 | |
473 | return ret; |
474 | } |
475 | |
476 | int ath10k_htt_tx_start(struct ath10k_htt *htt) |
477 | { |
478 | struct ath10k *ar = htt->ar; |
479 | int ret; |
480 | |
481 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n" , |
482 | htt->max_num_pending_tx); |
483 | |
484 | spin_lock_init(&htt->tx_lock); |
485 | idr_init(idr: &htt->pending_tx); |
486 | |
487 | if (htt->tx_mem_allocated) |
488 | return 0; |
489 | |
490 | if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) |
491 | return 0; |
492 | |
493 | ret = ath10k_htt_tx_alloc_buf(htt); |
494 | if (ret) |
495 | goto free_idr_pending_tx; |
496 | |
497 | htt->tx_mem_allocated = true; |
498 | |
499 | return 0; |
500 | |
501 | free_idr_pending_tx: |
502 | idr_destroy(&htt->pending_tx); |
503 | |
504 | return ret; |
505 | } |
506 | |
507 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
508 | { |
509 | struct ath10k *ar = ctx; |
510 | struct ath10k_htt *htt = &ar->htt; |
511 | struct htt_tx_done tx_done = {0}; |
512 | |
513 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n" , msdu_id); |
514 | |
515 | tx_done.msdu_id = msdu_id; |
516 | tx_done.status = HTT_TX_COMPL_STATE_DISCARD; |
517 | |
518 | ath10k_txrx_tx_unref(htt, tx_done: &tx_done); |
519 | |
520 | return 0; |
521 | } |
522 | |
523 | void ath10k_htt_tx_destroy(struct ath10k_htt *htt) |
524 | { |
525 | if (!htt->tx_mem_allocated) |
526 | return; |
527 | |
528 | ath10k_htt_free_txbuff(htt); |
529 | ath10k_htt_tx_free_txq(htt); |
530 | ath10k_htt_free_frag_desc(htt); |
531 | ath10k_htt_tx_free_txdone_fifo(htt); |
532 | htt->tx_mem_allocated = false; |
533 | } |
534 | |
535 | static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) |
536 | { |
537 | ath10k_htc_stop_hl(ar: htt->ar); |
538 | idr_for_each(&htt->pending_tx, fn: ath10k_htt_tx_clean_up_pending, data: htt->ar); |
539 | } |
540 | |
541 | void ath10k_htt_tx_stop(struct ath10k_htt *htt) |
542 | { |
543 | ath10k_htt_flush_tx_queue(htt); |
544 | idr_destroy(&htt->pending_tx); |
545 | } |
546 | |
547 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
548 | { |
549 | ath10k_htt_tx_stop(htt); |
550 | ath10k_htt_tx_destroy(htt); |
551 | } |
552 | |
553 | void ath10k_htt_op_ep_tx_credits(struct ath10k *ar) |
554 | { |
555 | queue_work(wq: ar->workqueue, work: &ar->bundle_tx_work); |
556 | } |
557 | |
558 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
559 | { |
560 | struct ath10k_htt *htt = &ar->htt; |
561 | struct htt_tx_done tx_done = {0}; |
562 | struct htt_cmd_hdr *htt_hdr; |
563 | struct htt_data_tx_desc *desc_hdr = NULL; |
564 | u16 flags1 = 0; |
565 | u8 msg_type = 0; |
566 | |
567 | if (htt->disable_tx_comp) { |
568 | htt_hdr = (struct htt_cmd_hdr *)skb->data; |
569 | msg_type = htt_hdr->msg_type; |
570 | |
571 | if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) { |
572 | desc_hdr = (struct htt_data_tx_desc *) |
573 | (skb->data + sizeof(*htt_hdr)); |
574 | flags1 = __le16_to_cpu(desc_hdr->flags1); |
575 | skb_pull(skb, len: sizeof(struct htt_cmd_hdr)); |
576 | skb_pull(skb, len: sizeof(struct htt_data_tx_desc)); |
577 | } |
578 | } |
579 | |
580 | dev_kfree_skb_any(skb); |
581 | |
582 | if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM)) |
583 | return; |
584 | |
585 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
586 | "htt tx complete msdu id:%u ,flags1:%x\n" , |
587 | __le16_to_cpu(desc_hdr->id), flags1); |
588 | |
589 | if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE) |
590 | return; |
591 | |
592 | tx_done.status = HTT_TX_COMPL_STATE_ACK; |
593 | tx_done.msdu_id = __le16_to_cpu(desc_hdr->id); |
594 | ath10k_txrx_tx_unref(htt: &ar->htt, tx_done: &tx_done); |
595 | } |
596 | |
597 | void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
598 | { |
599 | dev_kfree_skb_any(skb); |
600 | } |
601 | EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); |
602 | |
603 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) |
604 | { |
605 | struct ath10k *ar = htt->ar; |
606 | struct sk_buff *skb; |
607 | struct htt_cmd *cmd; |
608 | int len = 0; |
609 | int ret; |
610 | |
611 | len += sizeof(cmd->hdr); |
612 | len += sizeof(cmd->ver_req); |
613 | |
614 | skb = ath10k_htc_alloc_skb(ar, size: len); |
615 | if (!skb) |
616 | return -ENOMEM; |
617 | |
618 | skb_put(skb, len); |
619 | cmd = (struct htt_cmd *)skb->data; |
620 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; |
621 | |
622 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
623 | if (ret) { |
624 | dev_kfree_skb_any(skb); |
625 | return ret; |
626 | } |
627 | |
628 | return 0; |
629 | } |
630 | |
631 | int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, |
632 | u64 cookie) |
633 | { |
634 | struct ath10k *ar = htt->ar; |
635 | struct htt_stats_req *req; |
636 | struct sk_buff *skb; |
637 | struct htt_cmd *cmd; |
638 | int len = 0, ret; |
639 | |
640 | len += sizeof(cmd->hdr); |
641 | len += sizeof(cmd->stats_req); |
642 | |
643 | skb = ath10k_htc_alloc_skb(ar, size: len); |
644 | if (!skb) |
645 | return -ENOMEM; |
646 | |
647 | skb_put(skb, len); |
648 | cmd = (struct htt_cmd *)skb->data; |
649 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; |
650 | |
651 | req = &cmd->stats_req; |
652 | |
653 | memset(req, 0, sizeof(*req)); |
654 | |
655 | /* currently we support only max 24 bit masks so no need to worry |
656 | * about endian support |
657 | */ |
658 | memcpy(req->upload_types, &mask, 3); |
659 | memcpy(req->reset_types, &reset_mask, 3); |
660 | req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; |
661 | req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); |
662 | req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); |
663 | |
664 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
665 | if (ret) { |
666 | ath10k_warn(ar, fmt: "failed to send htt type stats request: %d" , |
667 | ret); |
668 | dev_kfree_skb_any(skb); |
669 | return ret; |
670 | } |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) |
676 | { |
677 | struct ath10k *ar = htt->ar; |
678 | struct sk_buff *skb; |
679 | struct htt_cmd *cmd; |
680 | struct htt_frag_desc_bank_cfg32 *cfg; |
681 | int ret, size; |
682 | u8 info; |
683 | |
684 | if (!ar->hw_params.continuous_frag_desc) |
685 | return 0; |
686 | |
687 | if (!htt->frag_desc.paddr) { |
688 | ath10k_warn(ar, fmt: "invalid frag desc memory\n" ); |
689 | return -EINVAL; |
690 | } |
691 | |
692 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); |
693 | skb = ath10k_htc_alloc_skb(ar, size); |
694 | if (!skb) |
695 | return -ENOMEM; |
696 | |
697 | skb_put(skb, len: size); |
698 | cmd = (struct htt_cmd *)skb->data; |
699 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; |
700 | |
701 | info = 0; |
702 | info |= SM(htt->tx_q_state.type, |
703 | HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); |
704 | |
705 | if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
706 | ar->running_fw->fw_file.fw_features)) |
707 | info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; |
708 | |
709 | cfg = &cmd->frag_desc_bank_cfg32; |
710 | cfg->info = info; |
711 | cfg->num_banks = 1; |
712 | cfg->desc_size = sizeof(struct htt_msdu_ext_desc); |
713 | cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); |
714 | cfg->bank_id[0].bank_min_id = 0; |
715 | cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - |
716 | 1); |
717 | |
718 | cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); |
719 | cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); |
720 | cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); |
721 | cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; |
722 | cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; |
723 | |
724 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n" ); |
725 | |
726 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
727 | if (ret) { |
728 | ath10k_warn(ar, fmt: "failed to send frag desc bank cfg request: %d\n" , |
729 | ret); |
730 | dev_kfree_skb_any(skb); |
731 | return ret; |
732 | } |
733 | |
734 | return 0; |
735 | } |
736 | |
737 | static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) |
738 | { |
739 | struct ath10k *ar = htt->ar; |
740 | struct sk_buff *skb; |
741 | struct htt_cmd *cmd; |
742 | struct htt_frag_desc_bank_cfg64 *cfg; |
743 | int ret, size; |
744 | u8 info; |
745 | |
746 | if (!ar->hw_params.continuous_frag_desc) |
747 | return 0; |
748 | |
749 | if (!htt->frag_desc.paddr) { |
750 | ath10k_warn(ar, fmt: "invalid frag desc memory\n" ); |
751 | return -EINVAL; |
752 | } |
753 | |
754 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); |
755 | skb = ath10k_htc_alloc_skb(ar, size); |
756 | if (!skb) |
757 | return -ENOMEM; |
758 | |
759 | skb_put(skb, len: size); |
760 | cmd = (struct htt_cmd *)skb->data; |
761 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; |
762 | |
763 | info = 0; |
764 | info |= SM(htt->tx_q_state.type, |
765 | HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); |
766 | |
767 | if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
768 | ar->running_fw->fw_file.fw_features)) |
769 | info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; |
770 | |
771 | cfg = &cmd->frag_desc_bank_cfg64; |
772 | cfg->info = info; |
773 | cfg->num_banks = 1; |
774 | cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); |
775 | cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); |
776 | cfg->bank_id[0].bank_min_id = 0; |
777 | cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - |
778 | 1); |
779 | |
780 | cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); |
781 | cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); |
782 | cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); |
783 | cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; |
784 | cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; |
785 | |
786 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n" ); |
787 | |
788 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
789 | if (ret) { |
790 | ath10k_warn(ar, fmt: "failed to send frag desc bank cfg request: %d\n" , |
791 | ret); |
792 | dev_kfree_skb_any(skb); |
793 | return ret; |
794 | } |
795 | |
796 | return 0; |
797 | } |
798 | |
799 | static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, |
800 | struct htt_rx_ring_setup_ring32 *rx_ring) |
801 | { |
802 | ath10k_htt_rx_desc_get_offsets(hw, off: &rx_ring->offsets); |
803 | } |
804 | |
805 | static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, |
806 | struct htt_rx_ring_setup_ring64 *rx_ring) |
807 | { |
808 | ath10k_htt_rx_desc_get_offsets(hw, off: &rx_ring->offsets); |
809 | } |
810 | |
811 | static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) |
812 | { |
813 | struct ath10k *ar = htt->ar; |
814 | struct ath10k_hw_params *hw = &ar->hw_params; |
815 | struct sk_buff *skb; |
816 | struct htt_cmd *cmd; |
817 | struct htt_rx_ring_setup_ring32 *ring; |
818 | const int num_rx_ring = 1; |
819 | u16 flags; |
820 | u32 fw_idx; |
821 | int len; |
822 | int ret; |
823 | |
824 | /* |
825 | * the HW expects the buffer to be an integral number of 4-byte |
826 | * "words" |
827 | */ |
828 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
829 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
830 | |
831 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) |
832 | + (sizeof(*ring) * num_rx_ring); |
833 | skb = ath10k_htc_alloc_skb(ar, size: len); |
834 | if (!skb) |
835 | return -ENOMEM; |
836 | |
837 | skb_put(skb, len); |
838 | |
839 | cmd = (struct htt_cmd *)skb->data; |
840 | ring = &cmd->rx_setup_32.rings[0]; |
841 | |
842 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
843 | cmd->rx_setup_32.hdr.num_rings = 1; |
844 | |
845 | /* FIXME: do we need all of this? */ |
846 | flags = 0; |
847 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; |
848 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
849 | flags |= HTT_RX_RING_FLAGS_PPDU_START; |
850 | flags |= HTT_RX_RING_FLAGS_PPDU_END; |
851 | flags |= HTT_RX_RING_FLAGS_MPDU_START; |
852 | flags |= HTT_RX_RING_FLAGS_MPDU_END; |
853 | flags |= HTT_RX_RING_FLAGS_MSDU_START; |
854 | flags |= HTT_RX_RING_FLAGS_MSDU_END; |
855 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; |
856 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; |
857 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
858 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
859 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; |
860 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; |
861 | flags |= HTT_RX_RING_FLAGS_NULL_RX; |
862 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; |
863 | |
864 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
865 | |
866 | ring->fw_idx_shadow_reg_paddr = |
867 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); |
868 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); |
869 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); |
870 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
871 | ring->flags = __cpu_to_le16(flags); |
872 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); |
873 | |
874 | ath10k_htt_fill_rx_desc_offset_32(hw, rx_ring: ring); |
875 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
876 | if (ret) { |
877 | dev_kfree_skb_any(skb); |
878 | return ret; |
879 | } |
880 | |
881 | return 0; |
882 | } |
883 | |
884 | static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) |
885 | { |
886 | struct ath10k *ar = htt->ar; |
887 | struct ath10k_hw_params *hw = &ar->hw_params; |
888 | struct sk_buff *skb; |
889 | struct htt_cmd *cmd; |
890 | struct htt_rx_ring_setup_ring64 *ring; |
891 | const int num_rx_ring = 1; |
892 | u16 flags; |
893 | u32 fw_idx; |
894 | int len; |
895 | int ret; |
896 | |
897 | /* HW expects the buffer to be an integral number of 4-byte |
898 | * "words" |
899 | */ |
900 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
901 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
902 | |
903 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) |
904 | + (sizeof(*ring) * num_rx_ring); |
905 | skb = ath10k_htc_alloc_skb(ar, size: len); |
906 | if (!skb) |
907 | return -ENOMEM; |
908 | |
909 | skb_put(skb, len); |
910 | |
911 | cmd = (struct htt_cmd *)skb->data; |
912 | ring = &cmd->rx_setup_64.rings[0]; |
913 | |
914 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
915 | cmd->rx_setup_64.hdr.num_rings = 1; |
916 | |
917 | flags = 0; |
918 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; |
919 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
920 | flags |= HTT_RX_RING_FLAGS_PPDU_START; |
921 | flags |= HTT_RX_RING_FLAGS_PPDU_END; |
922 | flags |= HTT_RX_RING_FLAGS_MPDU_START; |
923 | flags |= HTT_RX_RING_FLAGS_MPDU_END; |
924 | flags |= HTT_RX_RING_FLAGS_MSDU_START; |
925 | flags |= HTT_RX_RING_FLAGS_MSDU_END; |
926 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; |
927 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; |
928 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
929 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
930 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; |
931 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; |
932 | flags |= HTT_RX_RING_FLAGS_NULL_RX; |
933 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; |
934 | |
935 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
936 | |
937 | ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); |
938 | ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); |
939 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); |
940 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
941 | ring->flags = __cpu_to_le16(flags); |
942 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); |
943 | |
944 | ath10k_htt_fill_rx_desc_offset_64(hw, rx_ring: ring); |
945 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
946 | if (ret) { |
947 | dev_kfree_skb_any(skb); |
948 | return ret; |
949 | } |
950 | |
951 | return 0; |
952 | } |
953 | |
954 | static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) |
955 | { |
956 | struct ath10k *ar = htt->ar; |
957 | struct sk_buff *skb; |
958 | struct htt_cmd *cmd; |
959 | struct htt_rx_ring_setup_ring32 *ring; |
960 | const int num_rx_ring = 1; |
961 | u16 flags; |
962 | int len; |
963 | int ret; |
964 | |
965 | /* |
966 | * the HW expects the buffer to be an integral number of 4-byte |
967 | * "words" |
968 | */ |
969 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
970 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
971 | |
972 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) |
973 | + (sizeof(*ring) * num_rx_ring); |
974 | skb = ath10k_htc_alloc_skb(ar, size: len); |
975 | if (!skb) |
976 | return -ENOMEM; |
977 | |
978 | skb_put(skb, len); |
979 | |
980 | cmd = (struct htt_cmd *)skb->data; |
981 | ring = &cmd->rx_setup_32.rings[0]; |
982 | |
983 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
984 | cmd->rx_setup_32.hdr.num_rings = 1; |
985 | |
986 | flags = 0; |
987 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
988 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
989 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
990 | |
991 | memset(ring, 0, sizeof(*ring)); |
992 | ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); |
993 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
994 | ring->flags = __cpu_to_le16(flags); |
995 | |
996 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
997 | if (ret) { |
998 | dev_kfree_skb_any(skb); |
999 | return ret; |
1000 | } |
1001 | |
1002 | return 0; |
1003 | } |
1004 | |
1005 | static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt, |
1006 | u8 max_subfrms_ampdu, |
1007 | u8 max_subfrms_amsdu) |
1008 | { |
1009 | struct ath10k *ar = htt->ar; |
1010 | struct htt_aggr_conf *aggr_conf; |
1011 | struct sk_buff *skb; |
1012 | struct htt_cmd *cmd; |
1013 | int len; |
1014 | int ret; |
1015 | |
1016 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ |
1017 | |
1018 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) |
1019 | return -EINVAL; |
1020 | |
1021 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) |
1022 | return -EINVAL; |
1023 | |
1024 | len = sizeof(cmd->hdr); |
1025 | len += sizeof(cmd->aggr_conf); |
1026 | |
1027 | skb = ath10k_htc_alloc_skb(ar, size: len); |
1028 | if (!skb) |
1029 | return -ENOMEM; |
1030 | |
1031 | skb_put(skb, len); |
1032 | cmd = (struct htt_cmd *)skb->data; |
1033 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; |
1034 | |
1035 | aggr_conf = &cmd->aggr_conf; |
1036 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; |
1037 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; |
1038 | |
1039 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d" , |
1040 | aggr_conf->max_num_amsdu_subframes, |
1041 | aggr_conf->max_num_ampdu_subframes); |
1042 | |
1043 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
1044 | if (ret) { |
1045 | dev_kfree_skb_any(skb); |
1046 | return ret; |
1047 | } |
1048 | |
1049 | return 0; |
1050 | } |
1051 | |
1052 | static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, |
1053 | u8 max_subfrms_ampdu, |
1054 | u8 max_subfrms_amsdu) |
1055 | { |
1056 | struct ath10k *ar = htt->ar; |
1057 | struct htt_aggr_conf_v2 *aggr_conf; |
1058 | struct sk_buff *skb; |
1059 | struct htt_cmd *cmd; |
1060 | int len; |
1061 | int ret; |
1062 | |
1063 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ |
1064 | |
1065 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) |
1066 | return -EINVAL; |
1067 | |
1068 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) |
1069 | return -EINVAL; |
1070 | |
1071 | len = sizeof(cmd->hdr); |
1072 | len += sizeof(cmd->aggr_conf_v2); |
1073 | |
1074 | skb = ath10k_htc_alloc_skb(ar, size: len); |
1075 | if (!skb) |
1076 | return -ENOMEM; |
1077 | |
1078 | skb_put(skb, len); |
1079 | cmd = (struct htt_cmd *)skb->data; |
1080 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; |
1081 | |
1082 | aggr_conf = &cmd->aggr_conf_v2; |
1083 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; |
1084 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; |
1085 | |
1086 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d" , |
1087 | aggr_conf->max_num_amsdu_subframes, |
1088 | aggr_conf->max_num_ampdu_subframes); |
1089 | |
1090 | ret = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: skb); |
1091 | if (ret) { |
1092 | dev_kfree_skb_any(skb); |
1093 | return ret; |
1094 | } |
1095 | |
1096 | return 0; |
1097 | } |
1098 | |
1099 | int ath10k_htt_tx_fetch_resp(struct ath10k *ar, |
1100 | __le32 token, |
1101 | __le16 fetch_seq_num, |
1102 | struct htt_tx_fetch_record *records, |
1103 | size_t num_records) |
1104 | { |
1105 | struct sk_buff *skb; |
1106 | struct htt_cmd *cmd; |
1107 | const u16 resp_id = 0; |
1108 | int len = 0; |
1109 | int ret; |
1110 | |
1111 | /* Response IDs are echo-ed back only for host driver convenience |
1112 | * purposes. They aren't used for anything in the driver yet so use 0. |
1113 | */ |
1114 | |
1115 | len += sizeof(cmd->hdr); |
1116 | len += sizeof(cmd->tx_fetch_resp); |
1117 | len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; |
1118 | |
1119 | skb = ath10k_htc_alloc_skb(ar, size: len); |
1120 | if (!skb) |
1121 | return -ENOMEM; |
1122 | |
1123 | skb_put(skb, len); |
1124 | cmd = (struct htt_cmd *)skb->data; |
1125 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; |
1126 | cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); |
1127 | cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; |
1128 | cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); |
1129 | cmd->tx_fetch_resp.token = token; |
1130 | |
1131 | memcpy(cmd->tx_fetch_resp.records, records, |
1132 | sizeof(records[0]) * num_records); |
1133 | |
1134 | ret = ath10k_htc_send(htc: &ar->htc, eid: ar->htt.eid, packet: skb); |
1135 | if (ret) { |
1136 | ath10k_warn(ar, fmt: "failed to submit htc command: %d\n" , ret); |
1137 | goto err_free_skb; |
1138 | } |
1139 | |
1140 | return 0; |
1141 | |
1142 | err_free_skb: |
1143 | dev_kfree_skb_any(skb); |
1144 | |
1145 | return ret; |
1146 | } |
1147 | |
1148 | static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) |
1149 | { |
1150 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1151 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
1152 | struct ath10k_vif *arvif; |
1153 | |
1154 | if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { |
1155 | return ar->scan.vdev_id; |
1156 | } else if (cb->vif) { |
1157 | arvif = (void *)cb->vif->drv_priv; |
1158 | return arvif->vdev_id; |
1159 | } else if (ar->monitor_started) { |
1160 | return ar->monitor_vdev_id; |
1161 | } else { |
1162 | return 0; |
1163 | } |
1164 | } |
1165 | |
1166 | static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) |
1167 | { |
1168 | struct ieee80211_hdr *hdr = (void *)skb->data; |
1169 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
1170 | |
1171 | if (!is_eth && ieee80211_is_mgmt(fc: hdr->frame_control)) |
1172 | return HTT_DATA_TX_EXT_TID_MGMT; |
1173 | else if (cb->flags & ATH10K_SKB_F_QOS) |
1174 | return skb->priority & IEEE80211_QOS_CTL_TID_MASK; |
1175 | else |
1176 | return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; |
1177 | } |
1178 | |
1179 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
1180 | { |
1181 | struct ath10k *ar = htt->ar; |
1182 | struct device *dev = ar->dev; |
1183 | struct sk_buff *txdesc = NULL; |
1184 | struct htt_cmd *cmd; |
1185 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb: msdu); |
1186 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, skb: msdu); |
1187 | int len = 0; |
1188 | int msdu_id = -1; |
1189 | int res; |
1190 | const u8 *peer_addr; |
1191 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
1192 | |
1193 | len += sizeof(cmd->hdr); |
1194 | len += sizeof(cmd->mgmt_tx); |
1195 | |
1196 | res = ath10k_htt_tx_alloc_msdu_id(htt, skb: msdu); |
1197 | if (res < 0) |
1198 | goto err; |
1199 | |
1200 | msdu_id = res; |
1201 | |
1202 | if ((ieee80211_is_action(fc: hdr->frame_control) || |
1203 | ieee80211_is_deauth(fc: hdr->frame_control) || |
1204 | ieee80211_is_disassoc(fc: hdr->frame_control)) && |
1205 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1206 | peer_addr = hdr->addr1; |
1207 | if (is_multicast_ether_addr(addr: peer_addr)) { |
1208 | skb_put(skb: msdu, len: sizeof(struct ieee80211_mmie_16)); |
1209 | } else { |
1210 | if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || |
1211 | skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) |
1212 | skb_put(skb: msdu, IEEE80211_GCMP_MIC_LEN); |
1213 | else |
1214 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1215 | } |
1216 | } |
1217 | |
1218 | txdesc = ath10k_htc_alloc_skb(ar, size: len); |
1219 | if (!txdesc) { |
1220 | res = -ENOMEM; |
1221 | goto err_free_msdu_id; |
1222 | } |
1223 | |
1224 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
1225 | DMA_TO_DEVICE); |
1226 | res = dma_mapping_error(dev, dma_addr: skb_cb->paddr); |
1227 | if (res) { |
1228 | res = -EIO; |
1229 | goto err_free_txdesc; |
1230 | } |
1231 | |
1232 | skb_put(skb: txdesc, len); |
1233 | cmd = (struct htt_cmd *)txdesc->data; |
1234 | memset(cmd, 0, len); |
1235 | |
1236 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; |
1237 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); |
1238 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); |
1239 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); |
1240 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); |
1241 | memcpy(cmd->mgmt_tx.hdr, msdu->data, |
1242 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); |
1243 | |
1244 | res = ath10k_htc_send(htc: &htt->ar->htc, eid: htt->eid, packet: txdesc); |
1245 | if (res) |
1246 | goto err_unmap_msdu; |
1247 | |
1248 | return 0; |
1249 | |
1250 | err_unmap_msdu: |
1251 | if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) |
1252 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
1253 | err_free_txdesc: |
1254 | dev_kfree_skb_any(skb: txdesc); |
1255 | err_free_msdu_id: |
1256 | spin_lock_bh(lock: &htt->tx_lock); |
1257 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
1258 | spin_unlock_bh(lock: &htt->tx_lock); |
1259 | err: |
1260 | return res; |
1261 | } |
1262 | |
1263 | #define HTT_TX_HL_NEEDED_HEADROOM \ |
1264 | (unsigned int)(sizeof(struct htt_cmd_hdr) + \ |
1265 | sizeof(struct htt_data_tx_desc) + \ |
1266 | sizeof(struct ath10k_htc_hdr)) |
1267 | |
1268 | static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, |
1269 | struct sk_buff *msdu) |
1270 | { |
1271 | struct ath10k *ar = htt->ar; |
1272 | int res, data_len; |
1273 | struct htt_cmd_hdr *cmd_hdr; |
1274 | struct htt_data_tx_desc *tx_desc; |
1275 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb: msdu); |
1276 | struct sk_buff *tmp_skb; |
1277 | bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); |
1278 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, skb: msdu); |
1279 | u8 tid = ath10k_htt_tx_get_tid(skb: msdu, is_eth); |
1280 | u8 flags0 = 0; |
1281 | u16 flags1 = 0; |
1282 | u16 msdu_id = 0; |
1283 | |
1284 | if (!is_eth) { |
1285 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
1286 | |
1287 | if ((ieee80211_is_action(fc: hdr->frame_control) || |
1288 | ieee80211_is_deauth(fc: hdr->frame_control) || |
1289 | ieee80211_is_disassoc(fc: hdr->frame_control)) && |
1290 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1291 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1292 | } |
1293 | } |
1294 | |
1295 | data_len = msdu->len; |
1296 | |
1297 | switch (txmode) { |
1298 | case ATH10K_HW_TXRX_RAW: |
1299 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
1300 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1301 | fallthrough; |
1302 | case ATH10K_HW_TXRX_ETHERNET: |
1303 | flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1304 | break; |
1305 | case ATH10K_HW_TXRX_MGMT: |
1306 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
1307 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1308 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1309 | |
1310 | if (htt->disable_tx_comp) |
1311 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE; |
1312 | break; |
1313 | } |
1314 | |
1315 | if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) |
1316 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
1317 | |
1318 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
1319 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
1320 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
1321 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
1322 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
1323 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
1324 | } |
1325 | |
1326 | /* Prepend the HTT header and TX desc struct to the data message |
1327 | * and realloc the skb if it does not have enough headroom. |
1328 | */ |
1329 | if (skb_headroom(skb: msdu) < HTT_TX_HL_NEEDED_HEADROOM) { |
1330 | tmp_skb = msdu; |
1331 | |
1332 | ath10k_dbg(htt->ar, ATH10K_DBG_HTT, |
1333 | "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n" , |
1334 | skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); |
1335 | msdu = skb_realloc_headroom(skb: msdu, HTT_TX_HL_NEEDED_HEADROOM); |
1336 | kfree_skb(skb: tmp_skb); |
1337 | if (!msdu) { |
1338 | ath10k_warn(ar: htt->ar, fmt: "htt hl tx: Unable to realloc skb!\n" ); |
1339 | res = -ENOMEM; |
1340 | goto out; |
1341 | } |
1342 | } |
1343 | |
1344 | if (ar->bus_param.hl_msdu_ids) { |
1345 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
1346 | res = ath10k_htt_tx_alloc_msdu_id(htt, skb: msdu); |
1347 | if (res < 0) { |
1348 | ath10k_err(ar, fmt: "msdu_id allocation failed %d\n" , res); |
1349 | goto out; |
1350 | } |
1351 | msdu_id = res; |
1352 | } |
1353 | |
1354 | /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by |
1355 | * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase |
1356 | * reference by one to avoid a use-after-free case and a double |
1357 | * free. |
1358 | */ |
1359 | skb_get(skb: msdu); |
1360 | |
1361 | skb_push(skb: msdu, len: sizeof(*cmd_hdr)); |
1362 | skb_push(skb: msdu, len: sizeof(*tx_desc)); |
1363 | cmd_hdr = (struct htt_cmd_hdr *)msdu->data; |
1364 | tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); |
1365 | |
1366 | cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
1367 | tx_desc->flags0 = flags0; |
1368 | tx_desc->flags1 = __cpu_to_le16(flags1); |
1369 | tx_desc->len = __cpu_to_le16(data_len); |
1370 | tx_desc->id = __cpu_to_le16(msdu_id); |
1371 | tx_desc->frags_paddr = 0; /* always zero */ |
1372 | /* Initialize peer_id to INVALID_PEER because this is NOT |
1373 | * Reinjection path |
1374 | */ |
1375 | tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); |
1376 | |
1377 | res = ath10k_htc_send_hl(htc: &htt->ar->htc, eid: htt->eid, packet: msdu); |
1378 | |
1379 | out: |
1380 | return res; |
1381 | } |
1382 | |
1383 | static int ath10k_htt_tx_32(struct ath10k_htt *htt, |
1384 | enum ath10k_hw_txrx_mode txmode, |
1385 | struct sk_buff *msdu) |
1386 | { |
1387 | struct ath10k *ar = htt->ar; |
1388 | struct device *dev = ar->dev; |
1389 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb: msdu); |
1390 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb: msdu); |
1391 | struct ath10k_hif_sg_item sg_items[2]; |
1392 | struct ath10k_htt_txbuf_32 *txbuf; |
1393 | struct htt_data_tx_desc_frag *frags; |
1394 | bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); |
1395 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, skb: msdu); |
1396 | u8 tid = ath10k_htt_tx_get_tid(skb: msdu, is_eth); |
1397 | int prefetch_len; |
1398 | int res; |
1399 | u8 flags0 = 0; |
1400 | u16 msdu_id, flags1 = 0; |
1401 | u16 freq = 0; |
1402 | u32 frags_paddr = 0; |
1403 | u32 txbuf_paddr; |
1404 | struct htt_msdu_ext_desc *ext_desc = NULL; |
1405 | struct htt_msdu_ext_desc *ext_desc_t = NULL; |
1406 | |
1407 | res = ath10k_htt_tx_alloc_msdu_id(htt, skb: msdu); |
1408 | if (res < 0) |
1409 | goto err; |
1410 | |
1411 | msdu_id = res; |
1412 | |
1413 | prefetch_len = min(htt->prefetch_len, msdu->len); |
1414 | prefetch_len = roundup(prefetch_len, 4); |
1415 | |
1416 | txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; |
1417 | txbuf_paddr = htt->txbuf.paddr + |
1418 | (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); |
1419 | |
1420 | if (!is_eth) { |
1421 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
1422 | |
1423 | if ((ieee80211_is_action(fc: hdr->frame_control) || |
1424 | ieee80211_is_deauth(fc: hdr->frame_control) || |
1425 | ieee80211_is_disassoc(fc: hdr->frame_control)) && |
1426 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1427 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1428 | } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && |
1429 | txmode == ATH10K_HW_TXRX_RAW && |
1430 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1431 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1432 | } |
1433 | } |
1434 | |
1435 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
1436 | DMA_TO_DEVICE); |
1437 | res = dma_mapping_error(dev, dma_addr: skb_cb->paddr); |
1438 | if (res) { |
1439 | res = -EIO; |
1440 | goto err_free_msdu_id; |
1441 | } |
1442 | |
1443 | if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) |
1444 | freq = ar->scan.roc_freq; |
1445 | |
1446 | switch (txmode) { |
1447 | case ATH10K_HW_TXRX_RAW: |
1448 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
1449 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1450 | fallthrough; |
1451 | case ATH10K_HW_TXRX_ETHERNET: |
1452 | if (ar->hw_params.continuous_frag_desc) { |
1453 | ext_desc_t = htt->frag_desc.vaddr_desc_32; |
1454 | memset(&ext_desc_t[msdu_id], 0, |
1455 | sizeof(struct htt_msdu_ext_desc)); |
1456 | frags = (struct htt_data_tx_desc_frag *) |
1457 | &ext_desc_t[msdu_id].frags; |
1458 | ext_desc = &ext_desc_t[msdu_id]; |
1459 | frags[0].tword_addr.paddr_lo = |
1460 | __cpu_to_le32(skb_cb->paddr); |
1461 | frags[0].tword_addr.paddr_hi = 0; |
1462 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
1463 | |
1464 | frags_paddr = htt->frag_desc.paddr + |
1465 | (sizeof(struct htt_msdu_ext_desc) * msdu_id); |
1466 | } else { |
1467 | frags = txbuf->frags; |
1468 | frags[0].dword_addr.paddr = |
1469 | __cpu_to_le32(skb_cb->paddr); |
1470 | frags[0].dword_addr.len = __cpu_to_le32(msdu->len); |
1471 | frags[1].dword_addr.paddr = 0; |
1472 | frags[1].dword_addr.len = 0; |
1473 | |
1474 | frags_paddr = txbuf_paddr; |
1475 | } |
1476 | flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1477 | break; |
1478 | case ATH10K_HW_TXRX_MGMT: |
1479 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
1480 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1481 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1482 | |
1483 | frags_paddr = skb_cb->paddr; |
1484 | break; |
1485 | } |
1486 | |
1487 | /* Normally all commands go through HTC which manages tx credits for |
1488 | * each endpoint and notifies when tx is completed. |
1489 | * |
1490 | * HTT endpoint is creditless so there's no need to care about HTC |
1491 | * flags. In that case it is trivial to fill the HTC header here. |
1492 | * |
1493 | * MSDU transmission is considered completed upon HTT event. This |
1494 | * implies no relevant resources can be freed until after the event is |
1495 | * received. That's why HTC tx completion handler itself is ignored by |
1496 | * setting NULL to transfer_context for all sg items. |
1497 | * |
1498 | * There is simply no point in pushing HTT TX_FRM through HTC tx path |
1499 | * as it's a waste of resources. By bypassing HTC it is possible to |
1500 | * avoid extra memory allocations, compress data structures and thus |
1501 | * improve performance. |
1502 | */ |
1503 | |
1504 | txbuf->htc_hdr.eid = htt->eid; |
1505 | txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + |
1506 | sizeof(txbuf->cmd_tx) + |
1507 | prefetch_len); |
1508 | txbuf->htc_hdr.flags = 0; |
1509 | |
1510 | if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) |
1511 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
1512 | |
1513 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
1514 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
1515 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
1516 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
1517 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
1518 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
1519 | if (ar->hw_params.continuous_frag_desc) |
1520 | ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; |
1521 | } |
1522 | |
1523 | /* Prevent firmware from sending up tx inspection requests. There's |
1524 | * nothing ath10k can do with frames requested for inspection so force |
1525 | * it to simply rely a regular tx completion with discard status. |
1526 | */ |
1527 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
1528 | |
1529 | txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
1530 | txbuf->cmd_tx.flags0 = flags0; |
1531 | txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); |
1532 | txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); |
1533 | txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); |
1534 | txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); |
1535 | if (ath10k_mac_tx_frm_has_freq(ar)) { |
1536 | txbuf->cmd_tx.offchan_tx.peerid = |
1537 | __cpu_to_le16(HTT_INVALID_PEERID); |
1538 | txbuf->cmd_tx.offchan_tx.freq = |
1539 | __cpu_to_le16(freq); |
1540 | } else { |
1541 | txbuf->cmd_tx.peerid = |
1542 | __cpu_to_le32(HTT_INVALID_PEERID); |
1543 | } |
1544 | |
1545 | trace_ath10k_htt_tx(ar, msdu_id, msdu_len: msdu->len, vdev_id, tid); |
1546 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
1547 | "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n" , |
1548 | flags0, flags1, msdu->len, msdu_id, &frags_paddr, |
1549 | &skb_cb->paddr, vdev_id, tid, freq); |
1550 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_HTT_DUMP, NULL, prefix: "htt tx msdu: " , |
1551 | buf: msdu->data, len: msdu->len); |
1552 | trace_ath10k_tx_hdr(ar, data: msdu->data, len: msdu->len); |
1553 | trace_ath10k_tx_payload(ar, data: msdu->data, len: msdu->len); |
1554 | |
1555 | sg_items[0].transfer_id = 0; |
1556 | sg_items[0].transfer_context = NULL; |
1557 | sg_items[0].vaddr = &txbuf->htc_hdr; |
1558 | sg_items[0].paddr = txbuf_paddr + |
1559 | sizeof(txbuf->frags); |
1560 | sg_items[0].len = sizeof(txbuf->htc_hdr) + |
1561 | sizeof(txbuf->cmd_hdr) + |
1562 | sizeof(txbuf->cmd_tx); |
1563 | |
1564 | sg_items[1].transfer_id = 0; |
1565 | sg_items[1].transfer_context = NULL; |
1566 | sg_items[1].vaddr = msdu->data; |
1567 | sg_items[1].paddr = skb_cb->paddr; |
1568 | sg_items[1].len = prefetch_len; |
1569 | |
1570 | res = ath10k_hif_tx_sg(ar: htt->ar, |
1571 | pipe_id: htt->ar->htc.endpoint[htt->eid].ul_pipe_id, |
1572 | items: sg_items, ARRAY_SIZE(sg_items)); |
1573 | if (res) |
1574 | goto err_unmap_msdu; |
1575 | |
1576 | return 0; |
1577 | |
1578 | err_unmap_msdu: |
1579 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
1580 | err_free_msdu_id: |
1581 | spin_lock_bh(lock: &htt->tx_lock); |
1582 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
1583 | spin_unlock_bh(lock: &htt->tx_lock); |
1584 | err: |
1585 | return res; |
1586 | } |
1587 | |
1588 | static int ath10k_htt_tx_64(struct ath10k_htt *htt, |
1589 | enum ath10k_hw_txrx_mode txmode, |
1590 | struct sk_buff *msdu) |
1591 | { |
1592 | struct ath10k *ar = htt->ar; |
1593 | struct device *dev = ar->dev; |
1594 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb: msdu); |
1595 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb: msdu); |
1596 | struct ath10k_hif_sg_item sg_items[2]; |
1597 | struct ath10k_htt_txbuf_64 *txbuf; |
1598 | struct htt_data_tx_desc_frag *frags; |
1599 | bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); |
1600 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, skb: msdu); |
1601 | u8 tid = ath10k_htt_tx_get_tid(skb: msdu, is_eth); |
1602 | int prefetch_len; |
1603 | int res; |
1604 | u8 flags0 = 0; |
1605 | u16 msdu_id, flags1 = 0; |
1606 | u16 freq = 0; |
1607 | dma_addr_t frags_paddr = 0; |
1608 | dma_addr_t txbuf_paddr; |
1609 | struct htt_msdu_ext_desc_64 *ext_desc = NULL; |
1610 | struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; |
1611 | |
1612 | res = ath10k_htt_tx_alloc_msdu_id(htt, skb: msdu); |
1613 | if (res < 0) |
1614 | goto err; |
1615 | |
1616 | msdu_id = res; |
1617 | |
1618 | prefetch_len = min(htt->prefetch_len, msdu->len); |
1619 | prefetch_len = roundup(prefetch_len, 4); |
1620 | |
1621 | txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; |
1622 | txbuf_paddr = htt->txbuf.paddr + |
1623 | (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); |
1624 | |
1625 | if (!is_eth) { |
1626 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
1627 | |
1628 | if ((ieee80211_is_action(fc: hdr->frame_control) || |
1629 | ieee80211_is_deauth(fc: hdr->frame_control) || |
1630 | ieee80211_is_disassoc(fc: hdr->frame_control)) && |
1631 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1632 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1633 | } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && |
1634 | txmode == ATH10K_HW_TXRX_RAW && |
1635 | ieee80211_has_protected(fc: hdr->frame_control)) { |
1636 | skb_put(skb: msdu, IEEE80211_CCMP_MIC_LEN); |
1637 | } |
1638 | } |
1639 | |
1640 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
1641 | DMA_TO_DEVICE); |
1642 | res = dma_mapping_error(dev, dma_addr: skb_cb->paddr); |
1643 | if (res) { |
1644 | res = -EIO; |
1645 | goto err_free_msdu_id; |
1646 | } |
1647 | |
1648 | if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) |
1649 | freq = ar->scan.roc_freq; |
1650 | |
1651 | switch (txmode) { |
1652 | case ATH10K_HW_TXRX_RAW: |
1653 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
1654 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1655 | fallthrough; |
1656 | case ATH10K_HW_TXRX_ETHERNET: |
1657 | if (ar->hw_params.continuous_frag_desc) { |
1658 | ext_desc_t = htt->frag_desc.vaddr_desc_64; |
1659 | memset(&ext_desc_t[msdu_id], 0, |
1660 | sizeof(struct htt_msdu_ext_desc_64)); |
1661 | frags = (struct htt_data_tx_desc_frag *) |
1662 | &ext_desc_t[msdu_id].frags; |
1663 | ext_desc = &ext_desc_t[msdu_id]; |
1664 | frags[0].tword_addr.paddr_lo = |
1665 | __cpu_to_le32(skb_cb->paddr); |
1666 | frags[0].tword_addr.paddr_hi = |
1667 | __cpu_to_le16(upper_32_bits(skb_cb->paddr)); |
1668 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
1669 | |
1670 | frags_paddr = htt->frag_desc.paddr + |
1671 | (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); |
1672 | } else { |
1673 | frags = txbuf->frags; |
1674 | frags[0].tword_addr.paddr_lo = |
1675 | __cpu_to_le32(skb_cb->paddr); |
1676 | frags[0].tword_addr.paddr_hi = |
1677 | __cpu_to_le16(upper_32_bits(skb_cb->paddr)); |
1678 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
1679 | frags[1].tword_addr.paddr_lo = 0; |
1680 | frags[1].tword_addr.paddr_hi = 0; |
1681 | frags[1].tword_addr.len_16 = 0; |
1682 | } |
1683 | flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1684 | break; |
1685 | case ATH10K_HW_TXRX_MGMT: |
1686 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
1687 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
1688 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
1689 | |
1690 | frags_paddr = skb_cb->paddr; |
1691 | break; |
1692 | } |
1693 | |
1694 | /* Normally all commands go through HTC which manages tx credits for |
1695 | * each endpoint and notifies when tx is completed. |
1696 | * |
1697 | * HTT endpoint is creditless so there's no need to care about HTC |
1698 | * flags. In that case it is trivial to fill the HTC header here. |
1699 | * |
1700 | * MSDU transmission is considered completed upon HTT event. This |
1701 | * implies no relevant resources can be freed until after the event is |
1702 | * received. That's why HTC tx completion handler itself is ignored by |
1703 | * setting NULL to transfer_context for all sg items. |
1704 | * |
1705 | * There is simply no point in pushing HTT TX_FRM through HTC tx path |
1706 | * as it's a waste of resources. By bypassing HTC it is possible to |
1707 | * avoid extra memory allocations, compress data structures and thus |
1708 | * improve performance. |
1709 | */ |
1710 | |
1711 | txbuf->htc_hdr.eid = htt->eid; |
1712 | txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + |
1713 | sizeof(txbuf->cmd_tx) + |
1714 | prefetch_len); |
1715 | txbuf->htc_hdr.flags = 0; |
1716 | |
1717 | if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) |
1718 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
1719 | |
1720 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
1721 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
1722 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
1723 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
1724 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
1725 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
1726 | if (ar->hw_params.continuous_frag_desc) { |
1727 | memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); |
1728 | ext_desc->tso_flag[3] |= |
1729 | __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); |
1730 | } |
1731 | } |
1732 | |
1733 | /* Prevent firmware from sending up tx inspection requests. There's |
1734 | * nothing ath10k can do with frames requested for inspection so force |
1735 | * it to simply rely a regular tx completion with discard status. |
1736 | */ |
1737 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
1738 | |
1739 | txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
1740 | txbuf->cmd_tx.flags0 = flags0; |
1741 | txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); |
1742 | txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); |
1743 | txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); |
1744 | |
1745 | /* fill fragment descriptor */ |
1746 | txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); |
1747 | if (ath10k_mac_tx_frm_has_freq(ar)) { |
1748 | txbuf->cmd_tx.offchan_tx.peerid = |
1749 | __cpu_to_le16(HTT_INVALID_PEERID); |
1750 | txbuf->cmd_tx.offchan_tx.freq = |
1751 | __cpu_to_le16(freq); |
1752 | } else { |
1753 | txbuf->cmd_tx.peerid = |
1754 | __cpu_to_le32(HTT_INVALID_PEERID); |
1755 | } |
1756 | |
1757 | trace_ath10k_htt_tx(ar, msdu_id, msdu_len: msdu->len, vdev_id, tid); |
1758 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
1759 | "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n" , |
1760 | flags0, flags1, msdu->len, msdu_id, &frags_paddr, |
1761 | &skb_cb->paddr, vdev_id, tid, freq); |
1762 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_HTT_DUMP, NULL, prefix: "htt tx msdu: " , |
1763 | buf: msdu->data, len: msdu->len); |
1764 | trace_ath10k_tx_hdr(ar, data: msdu->data, len: msdu->len); |
1765 | trace_ath10k_tx_payload(ar, data: msdu->data, len: msdu->len); |
1766 | |
1767 | sg_items[0].transfer_id = 0; |
1768 | sg_items[0].transfer_context = NULL; |
1769 | sg_items[0].vaddr = &txbuf->htc_hdr; |
1770 | sg_items[0].paddr = txbuf_paddr + |
1771 | sizeof(txbuf->frags); |
1772 | sg_items[0].len = sizeof(txbuf->htc_hdr) + |
1773 | sizeof(txbuf->cmd_hdr) + |
1774 | sizeof(txbuf->cmd_tx); |
1775 | |
1776 | sg_items[1].transfer_id = 0; |
1777 | sg_items[1].transfer_context = NULL; |
1778 | sg_items[1].vaddr = msdu->data; |
1779 | sg_items[1].paddr = skb_cb->paddr; |
1780 | sg_items[1].len = prefetch_len; |
1781 | |
1782 | res = ath10k_hif_tx_sg(ar: htt->ar, |
1783 | pipe_id: htt->ar->htc.endpoint[htt->eid].ul_pipe_id, |
1784 | items: sg_items, ARRAY_SIZE(sg_items)); |
1785 | if (res) |
1786 | goto err_unmap_msdu; |
1787 | |
1788 | return 0; |
1789 | |
1790 | err_unmap_msdu: |
1791 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
1792 | err_free_msdu_id: |
1793 | spin_lock_bh(lock: &htt->tx_lock); |
1794 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
1795 | spin_unlock_bh(lock: &htt->tx_lock); |
1796 | err: |
1797 | return res; |
1798 | } |
1799 | |
1800 | static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { |
1801 | .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, |
1802 | .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, |
1803 | .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, |
1804 | .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, |
1805 | .htt_tx = ath10k_htt_tx_32, |
1806 | .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, |
1807 | .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, |
1808 | .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, |
1809 | }; |
1810 | |
1811 | static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { |
1812 | .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, |
1813 | .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, |
1814 | .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, |
1815 | .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, |
1816 | .htt_tx = ath10k_htt_tx_64, |
1817 | .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, |
1818 | .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, |
1819 | .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, |
1820 | }; |
1821 | |
1822 | static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { |
1823 | .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, |
1824 | .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, |
1825 | .htt_tx = ath10k_htt_tx_hl, |
1826 | .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, |
1827 | .htt_flush_tx = ath10k_htt_flush_tx_queue, |
1828 | }; |
1829 | |
1830 | void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) |
1831 | { |
1832 | struct ath10k *ar = htt->ar; |
1833 | |
1834 | if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) |
1835 | htt->tx_ops = &htt_tx_ops_hl; |
1836 | else if (ar->hw_params.target_64bit) |
1837 | htt->tx_ops = &htt_tx_ops_64; |
1838 | else |
1839 | htt->tx_ops = &htt_tx_ops_32; |
1840 | } |
1841 | |