1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
2 | /* Copyright(c) 2020 Realtek Corporation |
3 | */ |
4 | |
5 | #include <linux/pci.h> |
6 | |
7 | #include "mac.h" |
8 | #include "pci.h" |
9 | #include "reg.h" |
10 | #include "ser.h" |
11 | |
12 | static bool rtw89_pci_disable_clkreq; |
13 | static bool rtw89_pci_disable_aspm_l1; |
14 | static bool rtw89_pci_disable_l1ss; |
15 | module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); |
16 | module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); |
17 | module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); |
18 | MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support" ); |
19 | MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support" ); |
20 | MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support" ); |
21 | |
22 | static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) |
23 | { |
24 | u32 val; |
25 | int ret; |
26 | |
27 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); |
28 | |
29 | ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), |
30 | 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, |
31 | rtwdev, R_AX_PCIE_INIT_CFG1); |
32 | |
33 | return ret; |
34 | } |
35 | |
36 | static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, |
37 | struct rtw89_pci_dma_ring *bd_ring, |
38 | u32 cur_idx, bool tx) |
39 | { |
40 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
41 | u32 cnt, cur_rp, wp, rp, len; |
42 | |
43 | rp = bd_ring->rp; |
44 | wp = bd_ring->wp; |
45 | len = bd_ring->len; |
46 | |
47 | cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); |
48 | if (tx) { |
49 | cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); |
50 | } else { |
51 | if (info->rx_ring_eq_is_full) |
52 | wp += 1; |
53 | |
54 | cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); |
55 | } |
56 | |
57 | bd_ring->rp = cur_rp; |
58 | |
59 | return cnt; |
60 | } |
61 | |
62 | static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, |
63 | struct rtw89_pci_tx_ring *tx_ring) |
64 | { |
65 | struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; |
66 | u32 addr_idx = bd_ring->addr.idx; |
67 | u32 cnt, idx; |
68 | |
69 | idx = rtw89_read32(rtwdev, addr: addr_idx); |
70 | cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, cur_idx: idx, tx: true); |
71 | |
72 | return cnt; |
73 | } |
74 | |
75 | static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, |
76 | struct rtw89_pci *rtwpci, |
77 | u32 cnt, bool release_all) |
78 | { |
79 | struct rtw89_pci_tx_data *tx_data; |
80 | struct sk_buff *skb; |
81 | u32 qlen; |
82 | |
83 | while (cnt--) { |
84 | skb = skb_dequeue(list: &rtwpci->h2c_queue); |
85 | if (!skb) { |
86 | rtw89_err(rtwdev, "failed to pre-release fwcmd\n" ); |
87 | return; |
88 | } |
89 | skb_queue_tail(list: &rtwpci->h2c_release_queue, newsk: skb); |
90 | } |
91 | |
92 | qlen = skb_queue_len(list_: &rtwpci->h2c_release_queue); |
93 | if (!release_all) |
94 | qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; |
95 | |
96 | while (qlen--) { |
97 | skb = skb_dequeue(list: &rtwpci->h2c_release_queue); |
98 | if (!skb) { |
99 | rtw89_err(rtwdev, "failed to release fwcmd\n" ); |
100 | return; |
101 | } |
102 | tx_data = RTW89_PCI_TX_SKB_CB(skb); |
103 | dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, |
104 | DMA_TO_DEVICE); |
105 | dev_kfree_skb_any(skb); |
106 | } |
107 | } |
108 | |
109 | static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, |
110 | struct rtw89_pci *rtwpci) |
111 | { |
112 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; |
113 | u32 cnt; |
114 | |
115 | cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); |
116 | if (!cnt) |
117 | return; |
118 | rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, release_all: false); |
119 | } |
120 | |
121 | static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, |
122 | struct rtw89_pci_rx_ring *rx_ring) |
123 | { |
124 | struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; |
125 | u32 addr_idx = bd_ring->addr.idx; |
126 | u32 cnt, idx; |
127 | |
128 | idx = rtw89_read32(rtwdev, addr: addr_idx); |
129 | cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, cur_idx: idx, tx: false); |
130 | |
131 | return cnt; |
132 | } |
133 | |
134 | static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, |
135 | struct sk_buff *skb) |
136 | { |
137 | struct rtw89_pci_rx_info *rx_info; |
138 | dma_addr_t dma; |
139 | |
140 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
141 | dma = rx_info->dma; |
142 | dma_sync_single_for_cpu(dev: rtwdev->dev, addr: dma, RTW89_PCI_RX_BUF_SIZE, |
143 | dir: DMA_FROM_DEVICE); |
144 | } |
145 | |
146 | static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, |
147 | struct sk_buff *skb) |
148 | { |
149 | struct rtw89_pci_rx_info *rx_info; |
150 | dma_addr_t dma; |
151 | |
152 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
153 | dma = rx_info->dma; |
154 | dma_sync_single_for_device(dev: rtwdev->dev, addr: dma, RTW89_PCI_RX_BUF_SIZE, |
155 | dir: DMA_FROM_DEVICE); |
156 | } |
157 | |
158 | static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, |
159 | struct sk_buff *skb) |
160 | { |
161 | struct rtw89_pci_rxbd_info *rxbd_info; |
162 | struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); |
163 | |
164 | rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; |
165 | rx_info->fs = le32_get_bits(v: rxbd_info->dword, RTW89_PCI_RXBD_FS); |
166 | rx_info->ls = le32_get_bits(v: rxbd_info->dword, RTW89_PCI_RXBD_LS); |
167 | rx_info->len = le32_get_bits(v: rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); |
168 | rx_info->tag = le32_get_bits(v: rxbd_info->dword, RTW89_PCI_RXBD_TAG); |
169 | } |
170 | |
171 | static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, |
172 | struct rtw89_pci_rx_ring *rx_ring, |
173 | struct sk_buff *skb) |
174 | { |
175 | struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); |
176 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
177 | u32 target_rx_tag; |
178 | |
179 | if (!info->check_rx_tag) |
180 | return 0; |
181 | |
182 | /* valid range is 1 ~ 0x1FFF */ |
183 | if (rx_ring->target_rx_tag == 0) |
184 | target_rx_tag = 1; |
185 | else |
186 | target_rx_tag = rx_ring->target_rx_tag; |
187 | |
188 | if (rx_info->tag != target_rx_tag) { |
189 | rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "mismatch RX tag 0x%x 0x%x\n" , |
190 | rx_info->tag, target_rx_tag); |
191 | return -EAGAIN; |
192 | } |
193 | |
194 | return 0; |
195 | } |
196 | |
197 | static |
198 | int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, |
199 | struct rtw89_pci_rx_ring *rx_ring, |
200 | struct sk_buff *skb) |
201 | { |
202 | struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); |
203 | int rx_tag_retry = 100; |
204 | int ret; |
205 | |
206 | do { |
207 | rtw89_pci_sync_skb_for_cpu(rtwdev, skb); |
208 | rtw89_pci_rxbd_info_update(rtwdev, skb); |
209 | |
210 | ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); |
211 | if (ret != -EAGAIN) |
212 | break; |
213 | } while (rx_tag_retry--); |
214 | |
215 | /* update target rx_tag for next RX */ |
216 | rx_ring->target_rx_tag = rx_info->tag + 1; |
217 | |
218 | return ret; |
219 | } |
220 | |
221 | static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) |
222 | { |
223 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
224 | const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; |
225 | const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; |
226 | |
227 | if (enable) { |
228 | rtw89_write32_clr(rtwdev, addr: dma_stop1->addr, bit: dma_stop1->mask); |
229 | if (dma_stop2->addr) |
230 | rtw89_write32_clr(rtwdev, addr: dma_stop2->addr, bit: dma_stop2->mask); |
231 | } else { |
232 | rtw89_write32_set(rtwdev, addr: dma_stop1->addr, bit: dma_stop1->mask); |
233 | if (dma_stop2->addr) |
234 | rtw89_write32_set(rtwdev, addr: dma_stop2->addr, bit: dma_stop2->mask); |
235 | } |
236 | } |
237 | |
238 | static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) |
239 | { |
240 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
241 | const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; |
242 | |
243 | if (enable) |
244 | rtw89_write32_clr(rtwdev, addr: dma_stop1->addr, B_AX_STOP_CH12); |
245 | else |
246 | rtw89_write32_set(rtwdev, addr: dma_stop1->addr, B_AX_STOP_CH12); |
247 | } |
248 | |
249 | static bool |
250 | rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, |
251 | struct sk_buff *new, |
252 | const struct sk_buff *skb, u32 offset, |
253 | const struct rtw89_pci_rx_info *rx_info, |
254 | const struct rtw89_rx_desc_info *desc_info) |
255 | { |
256 | u32 copy_len = rx_info->len - offset; |
257 | |
258 | if (unlikely(skb_tailroom(new) < copy_len)) { |
259 | rtw89_debug(rtwdev, mask: RTW89_DBG_TXRX, |
260 | fmt: "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n" , |
261 | rx_info->len, desc_info->pkt_size, offset, fs, ls); |
262 | rtw89_hex_dump(rtwdev, mask: RTW89_DBG_TXRX, prefix_str: "rx_data: " , |
263 | buf: skb->data, len: rx_info->len); |
264 | /* length of a single segment skb is desc_info->pkt_size */ |
265 | if (fs && ls) { |
266 | copy_len = desc_info->pkt_size; |
267 | } else { |
268 | rtw89_info(rtwdev, "drop rx data due to invalid length\n" ); |
269 | return false; |
270 | } |
271 | } |
272 | |
273 | skb_put_data(skb: new, data: skb->data + offset, len: copy_len); |
274 | |
275 | return true; |
276 | } |
277 | |
278 | static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, |
279 | struct rtw89_pci_dma_ring *bd_ring) |
280 | { |
281 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
282 | u32 wp = bd_ring->wp; |
283 | |
284 | if (!info->rx_ring_eq_is_full) |
285 | return wp; |
286 | |
287 | if (++wp >= bd_ring->len) |
288 | wp = 0; |
289 | |
290 | return wp; |
291 | } |
292 | |
293 | static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, |
294 | struct rtw89_pci_rx_ring *rx_ring) |
295 | { |
296 | struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; |
297 | struct rtw89_pci_rx_info *rx_info; |
298 | struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; |
299 | struct sk_buff *new = rx_ring->diliver_skb; |
300 | struct sk_buff *skb; |
301 | u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); |
302 | u32 skb_idx; |
303 | u32 offset; |
304 | u32 cnt = 1; |
305 | bool fs, ls; |
306 | int ret; |
307 | |
308 | skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); |
309 | skb = rx_ring->buf[skb_idx]; |
310 | |
311 | ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); |
312 | if (ret) { |
313 | rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n" , |
314 | bd_ring->wp, ret); |
315 | goto err_sync_device; |
316 | } |
317 | |
318 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
319 | fs = rx_info->fs; |
320 | ls = rx_info->ls; |
321 | |
322 | if (fs) { |
323 | if (new) { |
324 | rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, |
325 | fmt: "skb should not be ready before first segment start\n" ); |
326 | goto err_sync_device; |
327 | } |
328 | if (desc_info->ready) { |
329 | rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n" ); |
330 | goto err_sync_device; |
331 | } |
332 | |
333 | rtw89_chip_query_rxdesc(rtwdev, desc_info, data: skb->data, data_offset: rxinfo_size); |
334 | |
335 | new = rtw89_alloc_skb_for_rx(rtwdev, length: desc_info->pkt_size); |
336 | if (!new) |
337 | goto err_sync_device; |
338 | |
339 | rx_ring->diliver_skb = new; |
340 | |
341 | /* first segment has RX desc */ |
342 | offset = desc_info->offset + desc_info->rxd_len; |
343 | } else { |
344 | offset = sizeof(struct rtw89_pci_rxbd_info); |
345 | if (!new) { |
346 | rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "no last skb\n" ); |
347 | goto err_sync_device; |
348 | } |
349 | } |
350 | if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) |
351 | goto err_sync_device; |
352 | rtw89_pci_sync_skb_for_device(rtwdev, skb); |
353 | rtw89_pci_rxbd_increase(rx_ring, cnt: 1); |
354 | |
355 | if (!desc_info->ready) { |
356 | rtw89_warn(rtwdev, "no rx desc information\n" ); |
357 | goto err_free_resource; |
358 | } |
359 | if (ls) { |
360 | rtw89_core_rx(rtwdev, desc_info, skb: new); |
361 | rx_ring->diliver_skb = NULL; |
362 | desc_info->ready = false; |
363 | } |
364 | |
365 | return cnt; |
366 | |
367 | err_sync_device: |
368 | rtw89_pci_sync_skb_for_device(rtwdev, skb); |
369 | rtw89_pci_rxbd_increase(rx_ring, cnt: 1); |
370 | err_free_resource: |
371 | if (new) |
372 | dev_kfree_skb_any(skb: new); |
373 | rx_ring->diliver_skb = NULL; |
374 | desc_info->ready = false; |
375 | |
376 | return cnt; |
377 | } |
378 | |
379 | static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, |
380 | struct rtw89_pci_rx_ring *rx_ring, |
381 | u32 cnt) |
382 | { |
383 | struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; |
384 | u32 rx_cnt; |
385 | |
386 | while (cnt && rtwdev->napi_budget_countdown > 0) { |
387 | rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); |
388 | if (!rx_cnt) { |
389 | rtw89_err(rtwdev, "failed to deliver RXBD skb\n" ); |
390 | |
391 | /* skip the rest RXBD bufs */ |
392 | rtw89_pci_rxbd_increase(rx_ring, cnt); |
393 | break; |
394 | } |
395 | |
396 | cnt -= rx_cnt; |
397 | } |
398 | |
399 | rtw89_write16(rtwdev, addr: bd_ring->addr.idx, data: bd_ring->wp); |
400 | } |
401 | |
402 | static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, |
403 | struct rtw89_pci *rtwpci, int budget) |
404 | { |
405 | struct rtw89_pci_rx_ring *rx_ring; |
406 | int countdown = rtwdev->napi_budget_countdown; |
407 | u32 cnt; |
408 | |
409 | rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; |
410 | |
411 | cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); |
412 | if (!cnt) |
413 | return 0; |
414 | |
415 | cnt = min_t(u32, budget, cnt); |
416 | |
417 | rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); |
418 | |
419 | /* In case of flushing pending SKBs, the countdown may exceed. */ |
420 | if (rtwdev->napi_budget_countdown <= 0) |
421 | return budget; |
422 | |
423 | return budget - countdown; |
424 | } |
425 | |
426 | static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, |
427 | struct rtw89_pci_tx_ring *tx_ring, |
428 | struct sk_buff *skb, u8 tx_status) |
429 | { |
430 | struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); |
431 | struct ieee80211_tx_info *info; |
432 | |
433 | rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_done: tx_status == RTW89_TX_DONE); |
434 | |
435 | info = IEEE80211_SKB_CB(skb); |
436 | ieee80211_tx_info_clear_status(info); |
437 | |
438 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
439 | info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; |
440 | if (tx_status == RTW89_TX_DONE) { |
441 | info->flags |= IEEE80211_TX_STAT_ACK; |
442 | tx_ring->tx_acked++; |
443 | } else { |
444 | if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) |
445 | rtw89_debug(rtwdev, mask: RTW89_DBG_FW, |
446 | fmt: "failed to TX of status %x\n" , tx_status); |
447 | switch (tx_status) { |
448 | case RTW89_TX_RETRY_LIMIT: |
449 | tx_ring->tx_retry_lmt++; |
450 | break; |
451 | case RTW89_TX_LIFE_TIME: |
452 | tx_ring->tx_life_time++; |
453 | break; |
454 | case RTW89_TX_MACID_DROP: |
455 | tx_ring->tx_mac_id_drop++; |
456 | break; |
457 | default: |
458 | rtw89_warn(rtwdev, "invalid TX status %x\n" , tx_status); |
459 | break; |
460 | } |
461 | } |
462 | |
463 | ieee80211_tx_status_ni(hw: rtwdev->hw, skb); |
464 | } |
465 | |
466 | static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) |
467 | { |
468 | struct rtw89_pci_tx_wd *txwd; |
469 | u32 cnt; |
470 | |
471 | cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); |
472 | while (cnt--) { |
473 | txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); |
474 | if (!txwd) { |
475 | rtw89_warn(rtwdev, "No busy txwd pages available\n" ); |
476 | break; |
477 | } |
478 | |
479 | list_del_init(entry: &txwd->list); |
480 | |
481 | /* this skb has been freed by RPP */ |
482 | if (skb_queue_len(list_: &txwd->queue) == 0) |
483 | rtw89_pci_enqueue_txwd(tx_ring, txwd); |
484 | } |
485 | } |
486 | |
487 | static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, |
488 | struct rtw89_pci_tx_ring *tx_ring) |
489 | { |
490 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
491 | struct rtw89_pci_tx_wd *txwd; |
492 | int i; |
493 | |
494 | for (i = 0; i < wd_ring->page_num; i++) { |
495 | txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); |
496 | if (!txwd) |
497 | break; |
498 | |
499 | list_del_init(entry: &txwd->list); |
500 | } |
501 | } |
502 | |
503 | static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, |
504 | struct rtw89_pci_tx_ring *tx_ring, |
505 | struct rtw89_pci_tx_wd *txwd, u16 seq, |
506 | u8 tx_status) |
507 | { |
508 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
509 | struct rtw89_pci_tx_data *tx_data; |
510 | struct sk_buff *skb, *tmp; |
511 | u8 txch = tx_ring->txch; |
512 | |
513 | if (!list_empty(head: &txwd->list)) { |
514 | rtw89_pci_reclaim_txbd(rtwdev, tx_ring); |
515 | /* In low power mode, RPP can receive before updating of TX BD. |
516 | * In normal mode, it should not happen so give it a warning. |
517 | */ |
518 | if (!rtwpci->low_power && !list_empty(head: &txwd->list)) |
519 | rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n" , |
520 | txch, seq); |
521 | } |
522 | |
523 | skb_queue_walk_safe(&txwd->queue, skb, tmp) { |
524 | skb_unlink(skb, list: &txwd->queue); |
525 | |
526 | tx_data = RTW89_PCI_TX_SKB_CB(skb); |
527 | dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, |
528 | DMA_TO_DEVICE); |
529 | |
530 | rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); |
531 | } |
532 | |
533 | if (list_empty(head: &txwd->list)) |
534 | rtw89_pci_enqueue_txwd(tx_ring, txwd); |
535 | } |
536 | |
537 | static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, |
538 | struct rtw89_pci_rpp_fmt *rpp) |
539 | { |
540 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
541 | struct rtw89_pci_tx_ring *tx_ring; |
542 | struct rtw89_pci_tx_wd_ring *wd_ring; |
543 | struct rtw89_pci_tx_wd *txwd; |
544 | u16 seq; |
545 | u8 qsel, tx_status, txch; |
546 | |
547 | seq = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_SEQ); |
548 | qsel = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_QSEL); |
549 | tx_status = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_TX_STATUS); |
550 | txch = rtw89_core_get_ch_dma(rtwdev, qsel); |
551 | |
552 | if (txch == RTW89_TXCH_CH12) { |
553 | rtw89_warn(rtwdev, "should no fwcmd release report\n" ); |
554 | return; |
555 | } |
556 | |
557 | tx_ring = &rtwpci->tx_rings[txch]; |
558 | wd_ring = &tx_ring->wd_ring; |
559 | txwd = &wd_ring->pages[seq]; |
560 | |
561 | rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); |
562 | } |
563 | |
564 | static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, |
565 | struct rtw89_pci_tx_ring *tx_ring) |
566 | { |
567 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
568 | struct rtw89_pci_tx_wd *txwd; |
569 | int i; |
570 | |
571 | for (i = 0; i < wd_ring->page_num; i++) { |
572 | txwd = &wd_ring->pages[i]; |
573 | |
574 | if (!list_empty(head: &txwd->list)) |
575 | continue; |
576 | |
577 | rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq: i, RTW89_TX_MACID_DROP); |
578 | } |
579 | } |
580 | |
581 | static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, |
582 | struct rtw89_pci_rx_ring *rx_ring, |
583 | u32 max_cnt) |
584 | { |
585 | struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; |
586 | struct rtw89_pci_rx_info *rx_info; |
587 | struct rtw89_pci_rpp_fmt *rpp; |
588 | struct rtw89_rx_desc_info desc_info = {}; |
589 | struct sk_buff *skb; |
590 | u32 cnt = 0; |
591 | u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); |
592 | u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); |
593 | u32 skb_idx; |
594 | u32 offset; |
595 | int ret; |
596 | |
597 | skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); |
598 | skb = rx_ring->buf[skb_idx]; |
599 | |
600 | ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); |
601 | if (ret) { |
602 | rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n" , |
603 | bd_ring->wp, ret); |
604 | goto err_sync_device; |
605 | } |
606 | |
607 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
608 | if (!rx_info->fs || !rx_info->ls) { |
609 | rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n" ); |
610 | return cnt; |
611 | } |
612 | |
613 | rtw89_chip_query_rxdesc(rtwdev, desc_info: &desc_info, data: skb->data, data_offset: rxinfo_size); |
614 | |
615 | /* first segment has RX desc */ |
616 | offset = desc_info.offset + desc_info.rxd_len; |
617 | for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { |
618 | rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); |
619 | rtw89_pci_release_rpp(rtwdev, rpp); |
620 | } |
621 | |
622 | rtw89_pci_sync_skb_for_device(rtwdev, skb); |
623 | rtw89_pci_rxbd_increase(rx_ring, cnt: 1); |
624 | cnt++; |
625 | |
626 | return cnt; |
627 | |
628 | err_sync_device: |
629 | rtw89_pci_sync_skb_for_device(rtwdev, skb); |
630 | return 0; |
631 | } |
632 | |
633 | static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, |
634 | struct rtw89_pci_rx_ring *rx_ring, |
635 | u32 cnt) |
636 | { |
637 | struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; |
638 | u32 release_cnt; |
639 | |
640 | while (cnt) { |
641 | release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, max_cnt: cnt); |
642 | if (!release_cnt) { |
643 | rtw89_err(rtwdev, "failed to release TX skbs\n" ); |
644 | |
645 | /* skip the rest RXBD bufs */ |
646 | rtw89_pci_rxbd_increase(rx_ring, cnt); |
647 | break; |
648 | } |
649 | |
650 | cnt -= release_cnt; |
651 | } |
652 | |
653 | rtw89_write16(rtwdev, addr: bd_ring->addr.idx, data: bd_ring->wp); |
654 | } |
655 | |
656 | static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, |
657 | struct rtw89_pci *rtwpci, int budget) |
658 | { |
659 | struct rtw89_pci_rx_ring *rx_ring; |
660 | u32 cnt; |
661 | int work_done; |
662 | |
663 | rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; |
664 | |
665 | spin_lock_bh(lock: &rtwpci->trx_lock); |
666 | |
667 | cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); |
668 | if (cnt == 0) |
669 | goto out_unlock; |
670 | |
671 | rtw89_pci_release_tx(rtwdev, rx_ring, cnt); |
672 | |
673 | out_unlock: |
674 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
675 | |
676 | /* always release all RPQ */ |
677 | work_done = min_t(int, cnt, budget); |
678 | rtwdev->napi_budget_countdown -= work_done; |
679 | |
680 | return work_done; |
681 | } |
682 | |
683 | static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, |
684 | struct rtw89_pci *rtwpci) |
685 | { |
686 | struct rtw89_pci_rx_ring *rx_ring; |
687 | struct rtw89_pci_dma_ring *bd_ring; |
688 | u32 reg_idx; |
689 | u16 hw_idx, hw_idx_next, host_idx; |
690 | int i; |
691 | |
692 | for (i = 0; i < RTW89_RXCH_NUM; i++) { |
693 | rx_ring = &rtwpci->rx_rings[i]; |
694 | bd_ring = &rx_ring->bd_ring; |
695 | |
696 | reg_idx = rtw89_read32(rtwdev, addr: bd_ring->addr.idx); |
697 | hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); |
698 | host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); |
699 | hw_idx_next = (hw_idx + 1) % bd_ring->len; |
700 | |
701 | if (hw_idx_next == host_idx) |
702 | rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "%d RXD unavailable\n" , i); |
703 | |
704 | rtw89_debug(rtwdev, mask: RTW89_DBG_TXRX, |
705 | fmt: "%d RXD unavailable, idx=0x%08x, len=%d\n" , |
706 | i, reg_idx, bd_ring->len); |
707 | } |
708 | } |
709 | |
710 | void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, |
711 | struct rtw89_pci *rtwpci, |
712 | struct rtw89_pci_isrs *isrs) |
713 | { |
714 | isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; |
715 | isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; |
716 | isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; |
717 | |
718 | rtw89_write32(rtwdev, R_AX_HISR0, data: isrs->halt_c2h_isrs); |
719 | rtw89_write32(rtwdev, R_AX_PCIE_HISR00, data: isrs->isrs[0]); |
720 | rtw89_write32(rtwdev, R_AX_PCIE_HISR10, data: isrs->isrs[1]); |
721 | } |
722 | EXPORT_SYMBOL(rtw89_pci_recognize_intrs); |
723 | |
724 | void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, |
725 | struct rtw89_pci *rtwpci, |
726 | struct rtw89_pci_isrs *isrs) |
727 | { |
728 | isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; |
729 | isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? |
730 | rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; |
731 | isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? |
732 | rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; |
733 | isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? |
734 | rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; |
735 | |
736 | if (isrs->halt_c2h_isrs) |
737 | rtw89_write32(rtwdev, R_AX_HISR0, data: isrs->halt_c2h_isrs); |
738 | if (isrs->isrs[0]) |
739 | rtw89_write32(rtwdev, R_AX_HAXI_HISR00, data: isrs->isrs[0]); |
740 | if (isrs->isrs[1]) |
741 | rtw89_write32(rtwdev, R_AX_HISR1, data: isrs->isrs[1]); |
742 | } |
743 | EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); |
744 | |
745 | void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, |
746 | struct rtw89_pci *rtwpci, |
747 | struct rtw89_pci_isrs *isrs) |
748 | { |
749 | isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; |
750 | isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? |
751 | rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; |
752 | isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? |
753 | rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; |
754 | isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; |
755 | |
756 | if (isrs->halt_c2h_isrs) |
757 | rtw89_write32(rtwdev, R_BE_HISR0, data: isrs->halt_c2h_isrs); |
758 | if (isrs->isrs[0]) |
759 | rtw89_write32(rtwdev, R_BE_HAXI_HISR00, data: isrs->isrs[0]); |
760 | if (isrs->isrs[1]) |
761 | rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, data: isrs->isrs[1]); |
762 | rtw89_write32(rtwdev, R_BE_PCIE_HISR, data: isrs->ind_isrs); |
763 | } |
764 | EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); |
765 | |
766 | void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
767 | { |
768 | rtw89_write32(rtwdev, R_AX_HIMR0, data: rtwpci->halt_c2h_intrs); |
769 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, data: rtwpci->intrs[0]); |
770 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, data: rtwpci->intrs[1]); |
771 | } |
772 | EXPORT_SYMBOL(rtw89_pci_enable_intr); |
773 | |
774 | void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
775 | { |
776 | rtw89_write32(rtwdev, R_AX_HIMR0, data: 0); |
777 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, data: 0); |
778 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, data: 0); |
779 | } |
780 | EXPORT_SYMBOL(rtw89_pci_disable_intr); |
781 | |
782 | void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
783 | { |
784 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, data: rtwpci->ind_intrs); |
785 | rtw89_write32(rtwdev, R_AX_HIMR0, data: rtwpci->halt_c2h_intrs); |
786 | rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, data: rtwpci->intrs[0]); |
787 | rtw89_write32(rtwdev, R_AX_HIMR1, data: rtwpci->intrs[1]); |
788 | } |
789 | EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); |
790 | |
791 | void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
792 | { |
793 | rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, data: 0); |
794 | } |
795 | EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); |
796 | |
797 | void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
798 | { |
799 | rtw89_write32(rtwdev, R_BE_HIMR0, data: rtwpci->halt_c2h_intrs); |
800 | rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, data: rtwpci->intrs[0]); |
801 | rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: rtwpci->intrs[1]); |
802 | rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: rtwpci->ind_intrs); |
803 | } |
804 | EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); |
805 | |
806 | void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) |
807 | { |
808 | rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: 0); |
809 | rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: 0); |
810 | } |
811 | EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); |
812 | |
813 | static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) |
814 | { |
815 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
816 | unsigned long flags; |
817 | |
818 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
819 | rtw89_chip_disable_intr(rtwdev, rtwpci); |
820 | rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RECOVERY_START); |
821 | rtw89_chip_enable_intr(rtwdev, rtwpci); |
822 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
823 | } |
824 | |
825 | static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) |
826 | { |
827 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
828 | unsigned long flags; |
829 | |
830 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
831 | rtw89_chip_disable_intr(rtwdev, rtwpci); |
832 | rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); |
833 | rtw89_chip_enable_intr(rtwdev, rtwpci); |
834 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
835 | } |
836 | |
837 | static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) |
838 | { |
839 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
840 | int budget = NAPI_POLL_WEIGHT; |
841 | |
842 | /* To prevent RXQ get stuck due to run out of budget. */ |
843 | rtwdev->napi_budget_countdown = budget; |
844 | |
845 | rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); |
846 | rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); |
847 | } |
848 | |
849 | static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) |
850 | { |
851 | struct rtw89_dev *rtwdev = dev; |
852 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
853 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
854 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
855 | struct rtw89_pci_isrs isrs; |
856 | unsigned long flags; |
857 | |
858 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
859 | rtw89_chip_recognize_intrs(rtwdev, rtwpci, isrs: &isrs); |
860 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
861 | |
862 | if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) |
863 | rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); |
864 | |
865 | if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) |
866 | rtw89_ser_notify(rtwdev, err: rtw89_mac_get_err_status(rtwdev)); |
867 | |
868 | if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) |
869 | rtw89_ser_notify(rtwdev, err: MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); |
870 | |
871 | if (unlikely(rtwpci->under_recovery)) |
872 | goto enable_intr; |
873 | |
874 | if (unlikely(rtwpci->low_power)) { |
875 | rtw89_pci_low_power_interrupt_handler(rtwdev); |
876 | goto enable_intr; |
877 | } |
878 | |
879 | if (likely(rtwpci->running)) { |
880 | local_bh_disable(); |
881 | napi_schedule(n: &rtwdev->napi); |
882 | local_bh_enable(); |
883 | } |
884 | |
885 | return IRQ_HANDLED; |
886 | |
887 | enable_intr: |
888 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
889 | if (likely(rtwpci->running)) |
890 | rtw89_chip_enable_intr(rtwdev, rtwpci); |
891 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
892 | return IRQ_HANDLED; |
893 | } |
894 | |
895 | static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) |
896 | { |
897 | struct rtw89_dev *rtwdev = dev; |
898 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
899 | unsigned long flags; |
900 | irqreturn_t irqret = IRQ_WAKE_THREAD; |
901 | |
902 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
903 | |
904 | /* If interrupt event is on the road, it is still trigger interrupt |
905 | * even we have done pci_stop() to turn off IMR. |
906 | */ |
907 | if (unlikely(!rtwpci->running)) { |
908 | irqret = IRQ_HANDLED; |
909 | goto exit; |
910 | } |
911 | |
912 | rtw89_chip_disable_intr(rtwdev, rtwpci); |
913 | exit: |
914 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
915 | |
916 | return irqret; |
917 | } |
918 | |
919 | #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ |
920 | [RTW89_TXCH_##ch_idx] = { \ |
921 | .num = R_##gen##_##txch##_TXBD_NUM ##v, \ |
922 | .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ |
923 | .bdram = 0, \ |
924 | .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ |
925 | .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ |
926 | } |
927 | |
928 | #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ |
929 | [RTW89_TXCH_##txch] = { \ |
930 | .num = R_AX_##txch##_TXBD_NUM ##v, \ |
931 | .idx = R_AX_##txch##_TXBD_IDX ##v, \ |
932 | .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ |
933 | .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ |
934 | .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ |
935 | } |
936 | |
937 | #define DEF_TXCHADDRS(info, txch, v...) \ |
938 | [RTW89_TXCH_##txch] = { \ |
939 | .num = R_AX_##txch##_TXBD_NUM, \ |
940 | .idx = R_AX_##txch##_TXBD_IDX, \ |
941 | .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ |
942 | .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ |
943 | .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ |
944 | } |
945 | |
946 | #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ |
947 | [RTW89_RXCH_##ch_idx] = { \ |
948 | .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ |
949 | .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ |
950 | .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ |
951 | .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ |
952 | } |
953 | |
954 | const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { |
955 | .tx = { |
956 | DEF_TXCHADDRS(info, ACH0), |
957 | DEF_TXCHADDRS(info, ACH1), |
958 | DEF_TXCHADDRS(info, ACH2), |
959 | DEF_TXCHADDRS(info, ACH3), |
960 | DEF_TXCHADDRS(info, ACH4), |
961 | DEF_TXCHADDRS(info, ACH5), |
962 | DEF_TXCHADDRS(info, ACH6), |
963 | DEF_TXCHADDRS(info, ACH7), |
964 | DEF_TXCHADDRS(info, CH8), |
965 | DEF_TXCHADDRS(info, CH9), |
966 | DEF_TXCHADDRS_TYPE1(info, CH10), |
967 | DEF_TXCHADDRS_TYPE1(info, CH11), |
968 | DEF_TXCHADDRS(info, CH12), |
969 | }, |
970 | .rx = { |
971 | DEF_RXCHADDRS(AX, RXQ, RXQ), |
972 | DEF_RXCHADDRS(AX, RPQ, RPQ), |
973 | }, |
974 | }; |
975 | EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); |
976 | |
977 | const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { |
978 | .tx = { |
979 | DEF_TXCHADDRS(info, ACH0, _V1), |
980 | DEF_TXCHADDRS(info, ACH1, _V1), |
981 | DEF_TXCHADDRS(info, ACH2, _V1), |
982 | DEF_TXCHADDRS(info, ACH3, _V1), |
983 | DEF_TXCHADDRS(info, ACH4, _V1), |
984 | DEF_TXCHADDRS(info, ACH5, _V1), |
985 | DEF_TXCHADDRS(info, ACH6, _V1), |
986 | DEF_TXCHADDRS(info, ACH7, _V1), |
987 | DEF_TXCHADDRS(info, CH8, _V1), |
988 | DEF_TXCHADDRS(info, CH9, _V1), |
989 | DEF_TXCHADDRS_TYPE1(info, CH10, _V1), |
990 | DEF_TXCHADDRS_TYPE1(info, CH11, _V1), |
991 | DEF_TXCHADDRS(info, CH12, _V1), |
992 | }, |
993 | .rx = { |
994 | DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), |
995 | DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), |
996 | }, |
997 | }; |
998 | EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); |
999 | |
1000 | const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { |
1001 | .tx = { |
1002 | DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), |
1003 | DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), |
1004 | DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), |
1005 | DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), |
1006 | DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), |
1007 | DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), |
1008 | DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), |
1009 | DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), |
1010 | DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), |
1011 | DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), |
1012 | DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), |
1013 | DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), |
1014 | DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), |
1015 | }, |
1016 | .rx = { |
1017 | DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), |
1018 | DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), |
1019 | }, |
1020 | }; |
1021 | EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); |
1022 | |
1023 | #undef DEF_TXCHADDRS_TYPE1 |
1024 | #undef DEF_TXCHADDRS |
1025 | #undef DEF_RXCHADDRS |
1026 | |
1027 | static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, |
1028 | enum rtw89_tx_channel txch, |
1029 | const struct rtw89_pci_ch_dma_addr **addr) |
1030 | { |
1031 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1032 | |
1033 | if (txch >= RTW89_TXCH_NUM) |
1034 | return -EINVAL; |
1035 | |
1036 | *addr = &info->dma_addr_set->tx[txch]; |
1037 | |
1038 | return 0; |
1039 | } |
1040 | |
1041 | static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, |
1042 | enum rtw89_rx_channel rxch, |
1043 | const struct rtw89_pci_ch_dma_addr **addr) |
1044 | { |
1045 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1046 | |
1047 | if (rxch >= RTW89_RXCH_NUM) |
1048 | return -EINVAL; |
1049 | |
1050 | *addr = &info->dma_addr_set->rx[rxch]; |
1051 | |
1052 | return 0; |
1053 | } |
1054 | |
1055 | static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) |
1056 | { |
1057 | struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; |
1058 | |
1059 | /* reserved 1 desc check ring is full or not */ |
1060 | if (bd_ring->rp > bd_ring->wp) |
1061 | return bd_ring->rp - bd_ring->wp - 1; |
1062 | |
1063 | return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; |
1064 | } |
1065 | |
1066 | static |
1067 | u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) |
1068 | { |
1069 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1070 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; |
1071 | u32 cnt; |
1072 | |
1073 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1074 | rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); |
1075 | cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1076 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1077 | |
1078 | return cnt; |
1079 | } |
1080 | |
1081 | static |
1082 | u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, |
1083 | u8 txch) |
1084 | { |
1085 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1086 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; |
1087 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
1088 | u32 cnt; |
1089 | |
1090 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1091 | cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1092 | cnt = min(cnt, wd_ring->curr_num); |
1093 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1094 | |
1095 | return cnt; |
1096 | } |
1097 | |
1098 | static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, |
1099 | u8 txch) |
1100 | { |
1101 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1102 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; |
1103 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
1104 | const struct rtw89_chip_info *chip = rtwdev->chip; |
1105 | u32 bd_cnt, wd_cnt, min_cnt = 0; |
1106 | struct rtw89_pci_rx_ring *rx_ring; |
1107 | enum rtw89_debug_mask debug_mask; |
1108 | u32 cnt; |
1109 | |
1110 | rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; |
1111 | |
1112 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1113 | bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1114 | wd_cnt = wd_ring->curr_num; |
1115 | |
1116 | if (wd_cnt == 0 || bd_cnt == 0) { |
1117 | cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); |
1118 | if (cnt) |
1119 | rtw89_pci_release_tx(rtwdev, rx_ring, cnt); |
1120 | else if (wd_cnt == 0) |
1121 | goto out_unlock; |
1122 | |
1123 | bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1124 | if (bd_cnt == 0) |
1125 | rtw89_pci_reclaim_txbd(rtwdev, tx_ring); |
1126 | } |
1127 | |
1128 | bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1129 | wd_cnt = wd_ring->curr_num; |
1130 | min_cnt = min(bd_cnt, wd_cnt); |
1131 | if (min_cnt == 0) { |
1132 | /* This message can be frequently shown in low power mode or |
1133 | * high traffic with small FIFO chips, and we have recognized it as normal |
1134 | * behavior, so print with mask RTW89_DBG_TXRX in these situations. |
1135 | */ |
1136 | if (rtwpci->low_power || chip->small_fifo_size) |
1137 | debug_mask = RTW89_DBG_TXRX; |
1138 | else |
1139 | debug_mask = RTW89_DBG_UNEXP; |
1140 | |
1141 | rtw89_debug(rtwdev, mask: debug_mask, |
1142 | fmt: "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n" , |
1143 | wd_cnt, bd_cnt); |
1144 | } |
1145 | |
1146 | out_unlock: |
1147 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1148 | |
1149 | return min_cnt; |
1150 | } |
1151 | |
1152 | static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, |
1153 | u8 txch) |
1154 | { |
1155 | if (rtwdev->hci.paused) |
1156 | return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); |
1157 | |
1158 | if (txch == RTW89_TXCH_CH12) |
1159 | return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); |
1160 | |
1161 | return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); |
1162 | } |
1163 | |
1164 | static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) |
1165 | { |
1166 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1167 | struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; |
1168 | u32 host_idx, addr; |
1169 | |
1170 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1171 | |
1172 | addr = bd_ring->addr.idx; |
1173 | host_idx = bd_ring->wp; |
1174 | rtw89_write16(rtwdev, addr, data: host_idx); |
1175 | |
1176 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1177 | } |
1178 | |
1179 | static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, |
1180 | int n_txbd) |
1181 | { |
1182 | struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; |
1183 | u32 host_idx, len; |
1184 | |
1185 | len = bd_ring->len; |
1186 | host_idx = bd_ring->wp + n_txbd; |
1187 | host_idx = host_idx < len ? host_idx : host_idx - len; |
1188 | |
1189 | bd_ring->wp = host_idx; |
1190 | } |
1191 | |
1192 | static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) |
1193 | { |
1194 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1195 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; |
1196 | |
1197 | if (rtwdev->hci.paused) { |
1198 | set_bit(nr: txch, addr: rtwpci->kick_map); |
1199 | return; |
1200 | } |
1201 | |
1202 | __rtw89_pci_tx_kick_off(rtwdev, tx_ring); |
1203 | } |
1204 | |
1205 | static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) |
1206 | { |
1207 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1208 | struct rtw89_pci_tx_ring *tx_ring; |
1209 | int txch; |
1210 | |
1211 | for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { |
1212 | if (!test_and_clear_bit(nr: txch, addr: rtwpci->kick_map)) |
1213 | continue; |
1214 | |
1215 | tx_ring = &rtwpci->tx_rings[txch]; |
1216 | __rtw89_pci_tx_kick_off(rtwdev, tx_ring); |
1217 | } |
1218 | } |
1219 | |
1220 | static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) |
1221 | { |
1222 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1223 | struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; |
1224 | struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; |
1225 | u32 cur_idx, cur_rp; |
1226 | u8 i; |
1227 | |
1228 | /* Because the time taked by the I/O is a bit dynamic, it's hard to |
1229 | * define a reasonable fixed total timeout to use read_poll_timeout* |
1230 | * helper. Instead, we can ensure a reasonable polling times, so we |
1231 | * just use for loop with udelay here. |
1232 | */ |
1233 | for (i = 0; i < 60; i++) { |
1234 | cur_idx = rtw89_read32(rtwdev, addr: bd_ring->addr.idx); |
1235 | cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); |
1236 | if (cur_rp == bd_ring->wp) |
1237 | return; |
1238 | |
1239 | udelay(1); |
1240 | } |
1241 | |
1242 | if (!drop) |
1243 | rtw89_info(rtwdev, "timed out to flush pci txch: %d\n" , txch); |
1244 | } |
1245 | |
1246 | static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, |
1247 | bool drop) |
1248 | { |
1249 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1250 | u8 i; |
1251 | |
1252 | for (i = 0; i < RTW89_TXCH_NUM; i++) { |
1253 | /* It may be unnecessary to flush FWCMD queue. */ |
1254 | if (i == RTW89_TXCH_CH12) |
1255 | continue; |
1256 | if (info->tx_dma_ch_mask & BIT(i)) |
1257 | continue; |
1258 | |
1259 | if (txchs & BIT(i)) |
1260 | __pci_flush_txch(rtwdev, txch: i, drop); |
1261 | } |
1262 | } |
1263 | |
1264 | static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, |
1265 | bool drop) |
1266 | { |
1267 | __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); |
1268 | } |
1269 | |
1270 | u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, |
1271 | void *txaddr_info_addr, u32 total_len, |
1272 | dma_addr_t dma, u8 *add_info_nr) |
1273 | { |
1274 | struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; |
1275 | |
1276 | txaddr_info->length = cpu_to_le16(total_len); |
1277 | txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | |
1278 | RTW89_PCI_ADDR_NUM(1)); |
1279 | txaddr_info->dma = cpu_to_le32(dma); |
1280 | |
1281 | *add_info_nr = 1; |
1282 | |
1283 | return sizeof(*txaddr_info); |
1284 | } |
1285 | EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); |
1286 | |
1287 | u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, |
1288 | void *txaddr_info_addr, u32 total_len, |
1289 | dma_addr_t dma, u8 *add_info_nr) |
1290 | { |
1291 | struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; |
1292 | u32 remain = total_len; |
1293 | u32 len; |
1294 | u16 length_option; |
1295 | int n; |
1296 | |
1297 | for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { |
1298 | len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? |
1299 | TXADDR_INFO_LENTHG_V1_MAX : remain; |
1300 | remain -= len; |
1301 | |
1302 | length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | |
1303 | FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | |
1304 | FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); |
1305 | txaddr_info->length_opt = cpu_to_le16(length_option); |
1306 | txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); |
1307 | txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); |
1308 | |
1309 | dma += len; |
1310 | txaddr_info++; |
1311 | } |
1312 | |
1313 | WARN_ONCE(remain, "length overflow remain=%u total_len=%u" , |
1314 | remain, total_len); |
1315 | |
1316 | *add_info_nr = n; |
1317 | |
1318 | return n * sizeof(*txaddr_info); |
1319 | } |
1320 | EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); |
1321 | |
1322 | static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, |
1323 | struct rtw89_pci_tx_ring *tx_ring, |
1324 | struct rtw89_pci_tx_wd *txwd, |
1325 | struct rtw89_core_tx_request *tx_req) |
1326 | { |
1327 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1328 | const struct rtw89_chip_info *chip = rtwdev->chip; |
1329 | struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; |
1330 | struct rtw89_pci_tx_wp_info *txwp_info; |
1331 | void *txaddr_info_addr; |
1332 | struct pci_dev *pdev = rtwpci->pdev; |
1333 | struct sk_buff *skb = tx_req->skb; |
1334 | struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); |
1335 | struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); |
1336 | bool en_wd_info = desc_info->en_wd_info; |
1337 | u32 txwd_len; |
1338 | u32 txwp_len; |
1339 | u32 txaddr_info_len; |
1340 | dma_addr_t dma; |
1341 | int ret; |
1342 | |
1343 | dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); |
1344 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) { |
1345 | rtw89_err(rtwdev, "failed to map skb dma data\n" ); |
1346 | ret = -EBUSY; |
1347 | goto err; |
1348 | } |
1349 | |
1350 | tx_data->dma = dma; |
1351 | rcu_assign_pointer(skb_data->wait, NULL); |
1352 | |
1353 | txwp_len = sizeof(*txwp_info); |
1354 | txwd_len = chip->txwd_body_size; |
1355 | txwd_len += en_wd_info ? chip->txwd_info_size : 0; |
1356 | |
1357 | txwp_info = txwd->vaddr + txwd_len; |
1358 | txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); |
1359 | txwp_info->seq1 = 0; |
1360 | txwp_info->seq2 = 0; |
1361 | txwp_info->seq3 = 0; |
1362 | |
1363 | tx_ring->tx_cnt++; |
1364 | txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; |
1365 | txaddr_info_len = |
1366 | rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, total_len: skb->len, |
1367 | dma, add_info_nr: &desc_info->addr_info_nr); |
1368 | |
1369 | txwd->len = txwd_len + txwp_len + txaddr_info_len; |
1370 | |
1371 | rtw89_chip_fill_txdesc(rtwdev, desc_info, txdesc: txwd->vaddr); |
1372 | |
1373 | skb_queue_tail(list: &txwd->queue, newsk: skb); |
1374 | |
1375 | return 0; |
1376 | |
1377 | err: |
1378 | return ret; |
1379 | } |
1380 | |
1381 | static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, |
1382 | struct rtw89_pci_tx_ring *tx_ring, |
1383 | struct rtw89_pci_tx_bd_32 *txbd, |
1384 | struct rtw89_core_tx_request *tx_req) |
1385 | { |
1386 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1387 | const struct rtw89_chip_info *chip = rtwdev->chip; |
1388 | struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; |
1389 | void *txdesc; |
1390 | int txdesc_size = chip->h2c_desc_size; |
1391 | struct pci_dev *pdev = rtwpci->pdev; |
1392 | struct sk_buff *skb = tx_req->skb; |
1393 | struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); |
1394 | dma_addr_t dma; |
1395 | |
1396 | txdesc = skb_push(skb, len: txdesc_size); |
1397 | memset(txdesc, 0, txdesc_size); |
1398 | rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); |
1399 | |
1400 | dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); |
1401 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) { |
1402 | rtw89_err(rtwdev, "failed to map fwcmd dma data\n" ); |
1403 | return -EBUSY; |
1404 | } |
1405 | |
1406 | tx_data->dma = dma; |
1407 | txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); |
1408 | txbd->length = cpu_to_le16(skb->len); |
1409 | txbd->dma = cpu_to_le32(tx_data->dma); |
1410 | skb_queue_tail(list: &rtwpci->h2c_queue, newsk: skb); |
1411 | |
1412 | rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, n_txbd: 1); |
1413 | |
1414 | return 0; |
1415 | } |
1416 | |
1417 | static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, |
1418 | struct rtw89_pci_tx_ring *tx_ring, |
1419 | struct rtw89_pci_tx_bd_32 *txbd, |
1420 | struct rtw89_core_tx_request *tx_req) |
1421 | { |
1422 | struct rtw89_pci_tx_wd *txwd; |
1423 | int ret; |
1424 | |
1425 | /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD |
1426 | * buffer with WD BODY only. So here we don't need to check the free |
1427 | * pages of the wd ring. |
1428 | */ |
1429 | if (tx_ring->txch == RTW89_TXCH_CH12) |
1430 | return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); |
1431 | |
1432 | txwd = rtw89_pci_dequeue_txwd(tx_ring); |
1433 | if (!txwd) { |
1434 | rtw89_err(rtwdev, "no available TXWD\n" ); |
1435 | ret = -ENOSPC; |
1436 | goto err; |
1437 | } |
1438 | |
1439 | ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); |
1440 | if (ret) { |
1441 | rtw89_err(rtwdev, "failed to submit TXWD %d\n" , txwd->seq); |
1442 | goto err_enqueue_wd; |
1443 | } |
1444 | |
1445 | list_add_tail(new: &txwd->list, head: &tx_ring->busy_pages); |
1446 | |
1447 | txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); |
1448 | txbd->length = cpu_to_le16(txwd->len); |
1449 | txbd->dma = cpu_to_le32(txwd->paddr); |
1450 | |
1451 | rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, n_txbd: 1); |
1452 | |
1453 | return 0; |
1454 | |
1455 | err_enqueue_wd: |
1456 | rtw89_pci_enqueue_txwd(tx_ring, txwd); |
1457 | err: |
1458 | return ret; |
1459 | } |
1460 | |
1461 | static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, |
1462 | u8 txch) |
1463 | { |
1464 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1465 | struct rtw89_pci_tx_ring *tx_ring; |
1466 | struct rtw89_pci_tx_bd_32 *txbd; |
1467 | u32 n_avail_txbd; |
1468 | int ret = 0; |
1469 | |
1470 | /* check the tx type and dma channel for fw cmd queue */ |
1471 | if ((txch == RTW89_TXCH_CH12 || |
1472 | tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && |
1473 | (txch != RTW89_TXCH_CH12 || |
1474 | tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { |
1475 | rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n" ); |
1476 | return -EINVAL; |
1477 | } |
1478 | |
1479 | tx_ring = &rtwpci->tx_rings[txch]; |
1480 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1481 | |
1482 | n_avail_txbd = rtw89_pci_get_avail_txbd_num(ring: tx_ring); |
1483 | if (n_avail_txbd == 0) { |
1484 | rtw89_err(rtwdev, "no available TXBD\n" ); |
1485 | ret = -ENOSPC; |
1486 | goto err_unlock; |
1487 | } |
1488 | |
1489 | txbd = rtw89_pci_get_next_txbd(tx_ring); |
1490 | ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); |
1491 | if (ret) { |
1492 | rtw89_err(rtwdev, "failed to submit TXBD\n" ); |
1493 | goto err_unlock; |
1494 | } |
1495 | |
1496 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1497 | return 0; |
1498 | |
1499 | err_unlock: |
1500 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1501 | return ret; |
1502 | } |
1503 | |
1504 | static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) |
1505 | { |
1506 | struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; |
1507 | int ret; |
1508 | |
1509 | ret = rtw89_pci_tx_write(rtwdev, tx_req, txch: desc_info->ch_dma); |
1510 | if (ret) { |
1511 | rtw89_err(rtwdev, "failed to TX Queue %d\n" , desc_info->ch_dma); |
1512 | return ret; |
1513 | } |
1514 | |
1515 | return 0; |
1516 | } |
1517 | |
1518 | const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { |
1519 | [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, |
1520 | [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, |
1521 | [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, |
1522 | [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, |
1523 | [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, |
1524 | [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, |
1525 | [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, |
1526 | [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, |
1527 | [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, |
1528 | [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, |
1529 | [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, |
1530 | [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, |
1531 | [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, |
1532 | }; |
1533 | EXPORT_SYMBOL(rtw89_bd_ram_table_dual); |
1534 | |
1535 | const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { |
1536 | [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, |
1537 | [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, |
1538 | [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, |
1539 | [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, |
1540 | [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, |
1541 | [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, |
1542 | [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, |
1543 | }; |
1544 | EXPORT_SYMBOL(rtw89_bd_ram_table_single); |
1545 | |
1546 | static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) |
1547 | { |
1548 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1549 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1550 | const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; |
1551 | struct rtw89_pci_tx_ring *tx_ring; |
1552 | struct rtw89_pci_rx_ring *rx_ring; |
1553 | struct rtw89_pci_dma_ring *bd_ring; |
1554 | const struct rtw89_pci_bd_ram *bd_ram; |
1555 | u32 addr_num; |
1556 | u32 addr_idx; |
1557 | u32 addr_bdram; |
1558 | u32 addr_desa_l; |
1559 | u32 val32; |
1560 | int i; |
1561 | |
1562 | for (i = 0; i < RTW89_TXCH_NUM; i++) { |
1563 | if (info->tx_dma_ch_mask & BIT(i)) |
1564 | continue; |
1565 | |
1566 | tx_ring = &rtwpci->tx_rings[i]; |
1567 | bd_ring = &tx_ring->bd_ring; |
1568 | bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; |
1569 | addr_num = bd_ring->addr.num; |
1570 | addr_bdram = bd_ring->addr.bdram; |
1571 | addr_desa_l = bd_ring->addr.desa_l; |
1572 | bd_ring->wp = 0; |
1573 | bd_ring->rp = 0; |
1574 | |
1575 | rtw89_write16(rtwdev, addr: addr_num, data: bd_ring->len); |
1576 | if (addr_bdram && bd_ram) { |
1577 | val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | |
1578 | FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | |
1579 | FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); |
1580 | |
1581 | rtw89_write32(rtwdev, addr: addr_bdram, data: val32); |
1582 | } |
1583 | rtw89_write32(rtwdev, addr: addr_desa_l, data: bd_ring->dma); |
1584 | } |
1585 | |
1586 | for (i = 0; i < RTW89_RXCH_NUM; i++) { |
1587 | rx_ring = &rtwpci->rx_rings[i]; |
1588 | bd_ring = &rx_ring->bd_ring; |
1589 | addr_num = bd_ring->addr.num; |
1590 | addr_idx = bd_ring->addr.idx; |
1591 | addr_desa_l = bd_ring->addr.desa_l; |
1592 | if (info->rx_ring_eq_is_full) |
1593 | bd_ring->wp = bd_ring->len - 1; |
1594 | else |
1595 | bd_ring->wp = 0; |
1596 | bd_ring->rp = 0; |
1597 | rx_ring->diliver_skb = NULL; |
1598 | rx_ring->diliver_desc.ready = false; |
1599 | rx_ring->target_rx_tag = 0; |
1600 | |
1601 | rtw89_write16(rtwdev, addr: addr_num, data: bd_ring->len); |
1602 | rtw89_write32(rtwdev, addr: addr_desa_l, data: bd_ring->dma); |
1603 | |
1604 | if (info->rx_ring_eq_is_full) |
1605 | rtw89_write16(rtwdev, addr: addr_idx, data: bd_ring->wp); |
1606 | } |
1607 | } |
1608 | |
1609 | static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, |
1610 | struct rtw89_pci_tx_ring *tx_ring) |
1611 | { |
1612 | rtw89_pci_release_busy_txwd(rtwdev, tx_ring); |
1613 | rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); |
1614 | } |
1615 | |
1616 | void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) |
1617 | { |
1618 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1619 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1620 | int txch; |
1621 | |
1622 | rtw89_pci_reset_trx_rings(rtwdev); |
1623 | |
1624 | spin_lock_bh(lock: &rtwpci->trx_lock); |
1625 | for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { |
1626 | if (info->tx_dma_ch_mask & BIT(txch)) |
1627 | continue; |
1628 | if (txch == RTW89_TXCH_CH12) { |
1629 | rtw89_pci_release_fwcmd(rtwdev, rtwpci, |
1630 | cnt: skb_queue_len(list_: &rtwpci->h2c_queue), release_all: true); |
1631 | continue; |
1632 | } |
1633 | rtw89_pci_release_tx_ring(rtwdev, tx_ring: &rtwpci->tx_rings[txch]); |
1634 | } |
1635 | spin_unlock_bh(lock: &rtwpci->trx_lock); |
1636 | } |
1637 | |
1638 | static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) |
1639 | { |
1640 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1641 | unsigned long flags; |
1642 | |
1643 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
1644 | rtwpci->running = true; |
1645 | rtw89_chip_enable_intr(rtwdev, rtwpci); |
1646 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
1647 | } |
1648 | |
1649 | static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) |
1650 | { |
1651 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1652 | unsigned long flags; |
1653 | |
1654 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
1655 | rtwpci->running = false; |
1656 | rtw89_chip_disable_intr(rtwdev, rtwpci); |
1657 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
1658 | } |
1659 | |
1660 | static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) |
1661 | { |
1662 | rtw89_core_napi_start(rtwdev); |
1663 | rtw89_pci_enable_intr_lock(rtwdev); |
1664 | |
1665 | return 0; |
1666 | } |
1667 | |
1668 | static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) |
1669 | { |
1670 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1671 | struct pci_dev *pdev = rtwpci->pdev; |
1672 | |
1673 | rtw89_pci_disable_intr_lock(rtwdev); |
1674 | synchronize_irq(irq: pdev->irq); |
1675 | rtw89_core_napi_stop(rtwdev); |
1676 | } |
1677 | |
1678 | static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) |
1679 | { |
1680 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1681 | struct pci_dev *pdev = rtwpci->pdev; |
1682 | |
1683 | if (pause) { |
1684 | rtw89_pci_disable_intr_lock(rtwdev); |
1685 | synchronize_irq(irq: pdev->irq); |
1686 | if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) |
1687 | napi_synchronize(n: &rtwdev->napi); |
1688 | } else { |
1689 | rtw89_pci_enable_intr_lock(rtwdev); |
1690 | rtw89_pci_tx_kick_off_pending(rtwdev); |
1691 | } |
1692 | } |
1693 | |
1694 | static |
1695 | void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) |
1696 | { |
1697 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1698 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1699 | const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; |
1700 | const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; |
1701 | struct rtw89_pci_tx_ring *tx_ring; |
1702 | struct rtw89_pci_rx_ring *rx_ring; |
1703 | int i; |
1704 | |
1705 | if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n" )) |
1706 | return; |
1707 | |
1708 | for (i = 0; i < RTW89_TXCH_NUM; i++) { |
1709 | tx_ring = &rtwpci->tx_rings[i]; |
1710 | tx_ring->bd_ring.addr.idx = low_power ? |
1711 | bd_idx_addr->tx_bd_addrs[i] : |
1712 | dma_addr_set->tx[i].idx; |
1713 | } |
1714 | |
1715 | for (i = 0; i < RTW89_RXCH_NUM; i++) { |
1716 | rx_ring = &rtwpci->rx_rings[i]; |
1717 | rx_ring->bd_ring.addr.idx = low_power ? |
1718 | bd_idx_addr->rx_bd_addrs[i] : |
1719 | dma_addr_set->rx[i].idx; |
1720 | } |
1721 | } |
1722 | |
1723 | static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) |
1724 | { |
1725 | enum rtw89_pci_intr_mask_cfg cfg; |
1726 | |
1727 | WARN(!rtwdev->hci.paused, "HCI isn't paused\n" ); |
1728 | |
1729 | cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; |
1730 | rtw89_chip_config_intr_mask(rtwdev, cfg); |
1731 | rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); |
1732 | } |
1733 | |
1734 | static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); |
1735 | |
1736 | static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) |
1737 | { |
1738 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1739 | u32 val = readl(addr: rtwpci->mmap + addr); |
1740 | int count; |
1741 | |
1742 | for (count = 0; ; count++) { |
1743 | if (val != RTW89_R32_DEAD) |
1744 | return val; |
1745 | if (count >= MAC_REG_POOL_COUNT) { |
1746 | rtw89_warn(rtwdev, "addr %#x = %#x\n" , addr, val); |
1747 | return RTW89_R32_DEAD; |
1748 | } |
1749 | rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); |
1750 | val = readl(addr: rtwpci->mmap + addr); |
1751 | } |
1752 | |
1753 | return val; |
1754 | } |
1755 | |
1756 | static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) |
1757 | { |
1758 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1759 | u32 addr32, val32, shift; |
1760 | |
1761 | if (!ACCESS_CMAC(addr)) |
1762 | return readb(addr: rtwpci->mmap + addr); |
1763 | |
1764 | addr32 = addr & ~0x3; |
1765 | shift = (addr & 0x3) * 8; |
1766 | val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr: addr32); |
1767 | return val32 >> shift; |
1768 | } |
1769 | |
1770 | static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) |
1771 | { |
1772 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1773 | u32 addr32, val32, shift; |
1774 | |
1775 | if (!ACCESS_CMAC(addr)) |
1776 | return readw(addr: rtwpci->mmap + addr); |
1777 | |
1778 | addr32 = addr & ~0x3; |
1779 | shift = (addr & 0x3) * 8; |
1780 | val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr: addr32); |
1781 | return val32 >> shift; |
1782 | } |
1783 | |
1784 | static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) |
1785 | { |
1786 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1787 | |
1788 | if (!ACCESS_CMAC(addr)) |
1789 | return readl(addr: rtwpci->mmap + addr); |
1790 | |
1791 | return rtw89_pci_ops_read32_cmac(rtwdev, addr); |
1792 | } |
1793 | |
1794 | static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) |
1795 | { |
1796 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1797 | |
1798 | writeb(val: data, addr: rtwpci->mmap + addr); |
1799 | } |
1800 | |
1801 | static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) |
1802 | { |
1803 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1804 | |
1805 | writew(val: data, addr: rtwpci->mmap + addr); |
1806 | } |
1807 | |
1808 | static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) |
1809 | { |
1810 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
1811 | |
1812 | writel(val: data, addr: rtwpci->mmap + addr); |
1813 | } |
1814 | |
1815 | static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) |
1816 | { |
1817 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1818 | |
1819 | if (enable) |
1820 | rtw89_write32_set(rtwdev, addr: info->init_cfg_reg, |
1821 | bit: info->rxhci_en_bit | info->txhci_en_bit); |
1822 | else |
1823 | rtw89_write32_clr(rtwdev, addr: info->init_cfg_reg, |
1824 | bit: info->rxhci_en_bit | info->txhci_en_bit); |
1825 | } |
1826 | |
1827 | static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) |
1828 | { |
1829 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
1830 | const struct rtw89_reg_def *reg = &info->dma_io_stop; |
1831 | |
1832 | if (enable) |
1833 | rtw89_write32_clr(rtwdev, addr: reg->addr, bit: reg->mask); |
1834 | else |
1835 | rtw89_write32_set(rtwdev, addr: reg->addr, bit: reg->mask); |
1836 | } |
1837 | |
1838 | void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) |
1839 | { |
1840 | rtw89_pci_ctrl_dma_io(rtwdev, enable); |
1841 | rtw89_pci_ctrl_dma_trx(rtwdev, enable); |
1842 | } |
1843 | |
1844 | static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) |
1845 | { |
1846 | u16 val; |
1847 | |
1848 | rtw89_write8(rtwdev, R_AX_MDIO_CFG, data: addr & 0x1F); |
1849 | |
1850 | val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); |
1851 | switch (speed) { |
1852 | case PCIE_PHY_GEN1: |
1853 | if (addr < 0x20) |
1854 | val = u16_replace_bits(old: val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); |
1855 | else |
1856 | val = u16_replace_bits(old: val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); |
1857 | break; |
1858 | case PCIE_PHY_GEN2: |
1859 | if (addr < 0x20) |
1860 | val = u16_replace_bits(old: val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); |
1861 | else |
1862 | val = u16_replace_bits(old: val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); |
1863 | break; |
1864 | default: |
1865 | rtw89_err(rtwdev, "[ERR]Error Speed %d!\n" , speed); |
1866 | return -EINVAL; |
1867 | } |
1868 | rtw89_write16(rtwdev, R_AX_MDIO_CFG, data: val); |
1869 | rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, bit: rw_bit); |
1870 | |
1871 | return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, |
1872 | false, rtwdev, R_AX_MDIO_CFG); |
1873 | } |
1874 | |
1875 | static int |
1876 | rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) |
1877 | { |
1878 | int ret; |
1879 | |
1880 | ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); |
1881 | if (ret) { |
1882 | rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n" , addr, ret); |
1883 | return ret; |
1884 | } |
1885 | *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); |
1886 | |
1887 | return 0; |
1888 | } |
1889 | |
1890 | static int |
1891 | rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) |
1892 | { |
1893 | int ret; |
1894 | |
1895 | rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); |
1896 | ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); |
1897 | if (ret) { |
1898 | rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n" , addr, data, ret); |
1899 | return ret; |
1900 | } |
1901 | |
1902 | return 0; |
1903 | } |
1904 | |
1905 | static int |
1906 | rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) |
1907 | { |
1908 | u32 shift; |
1909 | int ret; |
1910 | u16 val; |
1911 | |
1912 | ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val); |
1913 | if (ret) |
1914 | return ret; |
1915 | |
1916 | shift = __ffs(mask); |
1917 | val &= ~mask; |
1918 | val |= ((data << shift) & mask); |
1919 | |
1920 | ret = rtw89_write16_mdio(rtwdev, addr, data: val, speed); |
1921 | if (ret) |
1922 | return ret; |
1923 | |
1924 | return 0; |
1925 | } |
1926 | |
1927 | static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) |
1928 | { |
1929 | int ret; |
1930 | u16 val; |
1931 | |
1932 | ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val); |
1933 | if (ret) |
1934 | return ret; |
1935 | ret = rtw89_write16_mdio(rtwdev, addr, data: val | mask, speed); |
1936 | if (ret) |
1937 | return ret; |
1938 | |
1939 | return 0; |
1940 | } |
1941 | |
1942 | static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) |
1943 | { |
1944 | int ret; |
1945 | u16 val; |
1946 | |
1947 | ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val); |
1948 | if (ret) |
1949 | return ret; |
1950 | ret = rtw89_write16_mdio(rtwdev, addr, data: val & ~mask, speed); |
1951 | if (ret) |
1952 | return ret; |
1953 | |
1954 | return 0; |
1955 | } |
1956 | |
1957 | static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) |
1958 | { |
1959 | u16 addr_2lsb = addr & B_AX_DBI_2LSB; |
1960 | u16 write_addr; |
1961 | u8 flag; |
1962 | int ret; |
1963 | |
1964 | write_addr = addr & B_AX_DBI_ADDR_MSK; |
1965 | write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); |
1966 | rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); |
1967 | rtw89_write16(rtwdev, R_AX_DBI_FLAG, data: write_addr); |
1968 | rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); |
1969 | |
1970 | ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, |
1971 | 10 * RTW89_PCI_WR_RETRY_CNT, false, |
1972 | rtwdev, R_AX_DBI_FLAG + 2); |
1973 | if (ret) |
1974 | rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n" , |
1975 | addr); |
1976 | |
1977 | return ret; |
1978 | } |
1979 | |
1980 | static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) |
1981 | { |
1982 | u16 read_addr = addr & B_AX_DBI_ADDR_MSK; |
1983 | u8 flag; |
1984 | int ret; |
1985 | |
1986 | rtw89_write16(rtwdev, R_AX_DBI_FLAG, data: read_addr); |
1987 | rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); |
1988 | |
1989 | ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, |
1990 | 10 * RTW89_PCI_WR_RETRY_CNT, false, |
1991 | rtwdev, R_AX_DBI_FLAG + 2); |
1992 | if (ret) { |
1993 | rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n" , |
1994 | addr); |
1995 | return ret; |
1996 | } |
1997 | |
1998 | read_addr = R_AX_DBI_RDATA + (addr & 3); |
1999 | *value = rtw89_read8(rtwdev, addr: read_addr); |
2000 | |
2001 | return 0; |
2002 | } |
2003 | |
2004 | static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, |
2005 | u8 data) |
2006 | { |
2007 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2008 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2009 | struct pci_dev *pdev = rtwpci->pdev; |
2010 | int ret; |
2011 | |
2012 | ret = pci_write_config_byte(dev: pdev, where: addr, val: data); |
2013 | if (!ret) |
2014 | return 0; |
2015 | |
2016 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) |
2017 | ret = rtw89_dbi_write8(rtwdev, addr, data); |
2018 | |
2019 | return ret; |
2020 | } |
2021 | |
2022 | static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, |
2023 | u8 *value) |
2024 | { |
2025 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2026 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2027 | struct pci_dev *pdev = rtwpci->pdev; |
2028 | int ret; |
2029 | |
2030 | ret = pci_read_config_byte(dev: pdev, where: addr, val: value); |
2031 | if (!ret) |
2032 | return 0; |
2033 | |
2034 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) |
2035 | ret = rtw89_dbi_read8(rtwdev, addr, value); |
2036 | |
2037 | return ret; |
2038 | } |
2039 | |
2040 | static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, |
2041 | u8 bit) |
2042 | { |
2043 | u8 value; |
2044 | int ret; |
2045 | |
2046 | ret = rtw89_pci_read_config_byte(rtwdev, addr, value: &value); |
2047 | if (ret) |
2048 | return ret; |
2049 | |
2050 | value |= bit; |
2051 | ret = rtw89_pci_write_config_byte(rtwdev, addr, data: value); |
2052 | |
2053 | return ret; |
2054 | } |
2055 | |
2056 | static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, |
2057 | u8 bit) |
2058 | { |
2059 | u8 value; |
2060 | int ret; |
2061 | |
2062 | ret = rtw89_pci_read_config_byte(rtwdev, addr, value: &value); |
2063 | if (ret) |
2064 | return ret; |
2065 | |
2066 | value &= ~bit; |
2067 | ret = rtw89_pci_write_config_byte(rtwdev, addr, data: value); |
2068 | |
2069 | return ret; |
2070 | } |
2071 | |
2072 | static int |
2073 | __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) |
2074 | { |
2075 | u16 val, tar; |
2076 | int ret; |
2077 | |
2078 | /* Enable counter */ |
2079 | ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val); |
2080 | if (ret) |
2081 | return ret; |
2082 | ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val & ~B_AX_CLK_CALIB_EN, |
2083 | speed: phy_rate); |
2084 | if (ret) |
2085 | return ret; |
2086 | ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val | B_AX_CLK_CALIB_EN, |
2087 | speed: phy_rate); |
2088 | if (ret) |
2089 | return ret; |
2090 | |
2091 | fsleep(usecs: 300); |
2092 | |
2093 | ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &tar); |
2094 | if (ret) |
2095 | return ret; |
2096 | ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val & ~B_AX_CLK_CALIB_EN, |
2097 | speed: phy_rate); |
2098 | if (ret) |
2099 | return ret; |
2100 | |
2101 | tar = tar & 0x0FFF; |
2102 | if (tar == 0 || tar == 0x0FFF) { |
2103 | rtw89_err(rtwdev, "[ERR]Get target failed.\n" ); |
2104 | return -EINVAL; |
2105 | } |
2106 | |
2107 | *target = tar; |
2108 | |
2109 | return 0; |
2110 | } |
2111 | |
2112 | static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) |
2113 | { |
2114 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2115 | int ret; |
2116 | |
2117 | if (chip_id != RTL8852B && chip_id != RTL8851B) |
2118 | return 0; |
2119 | |
2120 | ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, |
2121 | PCIE_AUTOK_4, speed: PCIE_PHY_GEN1); |
2122 | return ret; |
2123 | } |
2124 | |
2125 | static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) |
2126 | { |
2127 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2128 | enum rtw89_pcie_phy phy_rate; |
2129 | u16 val16, mgn_set, div_set, tar; |
2130 | u8 val8, bdr_ori; |
2131 | bool l1_flag = false; |
2132 | int ret = 0; |
2133 | |
2134 | if (chip_id != RTL8852B && chip_id != RTL8851B) |
2135 | return 0; |
2136 | |
2137 | ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, value: &val8); |
2138 | if (ret) { |
2139 | rtw89_err(rtwdev, "[ERR]pci config read %X\n" , |
2140 | RTW89_PCIE_PHY_RATE); |
2141 | return ret; |
2142 | } |
2143 | |
2144 | if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { |
2145 | phy_rate = PCIE_PHY_GEN1; |
2146 | } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { |
2147 | phy_rate = PCIE_PHY_GEN2; |
2148 | } else { |
2149 | rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n" , val8); |
2150 | return -EOPNOTSUPP; |
2151 | } |
2152 | /* Disable L1BD */ |
2153 | ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, value: &bdr_ori); |
2154 | if (ret) { |
2155 | rtw89_err(rtwdev, "[ERR]pci config read %X\n" , RTW89_PCIE_L1_CTRL); |
2156 | return ret; |
2157 | } |
2158 | |
2159 | if (bdr_ori & RTW89_PCIE_BIT_L1) { |
2160 | ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, |
2161 | data: bdr_ori & ~RTW89_PCIE_BIT_L1); |
2162 | if (ret) { |
2163 | rtw89_err(rtwdev, "[ERR]pci config write %X\n" , |
2164 | RTW89_PCIE_L1_CTRL); |
2165 | return ret; |
2166 | } |
2167 | l1_flag = true; |
2168 | } |
2169 | |
2170 | ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val16); |
2171 | if (ret) { |
2172 | rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2173 | goto end; |
2174 | } |
2175 | |
2176 | if (val16 & B_AX_CALIB_EN) { |
2177 | ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, |
2178 | data: val16 & ~B_AX_CALIB_EN, speed: phy_rate); |
2179 | if (ret) { |
2180 | rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2181 | goto end; |
2182 | } |
2183 | } |
2184 | |
2185 | if (!autook_en) |
2186 | goto end; |
2187 | /* Set div */ |
2188 | ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, speed: phy_rate); |
2189 | if (ret) { |
2190 | rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2191 | goto end; |
2192 | } |
2193 | |
2194 | /* Obtain div and margin */ |
2195 | ret = __get_target(rtwdev, target: &tar, phy_rate); |
2196 | if (ret) { |
2197 | rtw89_err(rtwdev, "[ERR]1st get target fail %d\n" , ret); |
2198 | goto end; |
2199 | } |
2200 | |
2201 | mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; |
2202 | |
2203 | if (mgn_set >= 128) { |
2204 | div_set = 0x0003; |
2205 | mgn_set = 0x000F; |
2206 | } else if (mgn_set >= 64) { |
2207 | div_set = 0x0003; |
2208 | mgn_set >>= 3; |
2209 | } else if (mgn_set >= 32) { |
2210 | div_set = 0x0002; |
2211 | mgn_set >>= 2; |
2212 | } else if (mgn_set >= 16) { |
2213 | div_set = 0x0001; |
2214 | mgn_set >>= 1; |
2215 | } else if (mgn_set == 0) { |
2216 | rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n" , tar); |
2217 | goto end; |
2218 | } else { |
2219 | div_set = 0x0000; |
2220 | } |
2221 | |
2222 | ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val16); |
2223 | if (ret) { |
2224 | rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2225 | goto end; |
2226 | } |
2227 | |
2228 | val16 |= u16_encode_bits(v: div_set, B_AX_DIV); |
2229 | |
2230 | ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val16, speed: phy_rate); |
2231 | if (ret) { |
2232 | rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2233 | goto end; |
2234 | } |
2235 | |
2236 | ret = __get_target(rtwdev, target: &tar, phy_rate); |
2237 | if (ret) { |
2238 | rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n" , ret); |
2239 | goto end; |
2240 | } |
2241 | |
2242 | rtw89_debug(rtwdev, mask: RTW89_DBG_HCI, fmt: "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n" , |
2243 | tar, div_set, mgn_set); |
2244 | ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, |
2245 | data: (tar & 0x0FFF) | (mgn_set << 12), speed: phy_rate); |
2246 | if (ret) { |
2247 | rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_SET_PPR_V1); |
2248 | goto end; |
2249 | } |
2250 | |
2251 | /* Enable function */ |
2252 | ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, speed: phy_rate); |
2253 | if (ret) { |
2254 | rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1); |
2255 | goto end; |
2256 | } |
2257 | |
2258 | /* CLK delay = 0 */ |
2259 | ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, |
2260 | data: PCIE_CLKDLY_HW_0); |
2261 | |
2262 | end: |
2263 | /* Set L1BD to ori */ |
2264 | if (l1_flag) { |
2265 | ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, |
2266 | data: bdr_ori); |
2267 | if (ret) { |
2268 | rtw89_err(rtwdev, "[ERR]pci config write %X\n" , |
2269 | RTW89_PCIE_L1_CTRL); |
2270 | return ret; |
2271 | } |
2272 | } |
2273 | |
2274 | return ret; |
2275 | } |
2276 | |
2277 | static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) |
2278 | { |
2279 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2280 | int ret; |
2281 | |
2282 | if (chip_id == RTL8852A) { |
2283 | ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, |
2284 | speed: PCIE_PHY_GEN1); |
2285 | if (ret) |
2286 | return ret; |
2287 | ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, |
2288 | speed: PCIE_PHY_GEN2); |
2289 | if (ret) |
2290 | return ret; |
2291 | } else if (chip_id == RTL8852C) { |
2292 | rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, |
2293 | B_AX_DEGLITCH); |
2294 | rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, |
2295 | B_AX_DEGLITCH); |
2296 | } |
2297 | |
2298 | return 0; |
2299 | } |
2300 | |
2301 | static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) |
2302 | { |
2303 | if (rtwdev->chip->chip_id != RTL8852A) |
2304 | return; |
2305 | |
2306 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); |
2307 | } |
2308 | |
2309 | static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) |
2310 | { |
2311 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2312 | |
2313 | if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) |
2314 | return; |
2315 | |
2316 | rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); |
2317 | } |
2318 | |
2319 | static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) |
2320 | { |
2321 | int ret; |
2322 | |
2323 | if (rtwdev->chip->chip_id != RTL8852A) |
2324 | return 0; |
2325 | |
2326 | ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, |
2327 | speed: PCIE_PHY_GEN1); |
2328 | if (ret) |
2329 | return ret; |
2330 | |
2331 | ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, |
2332 | speed: PCIE_PHY_GEN2); |
2333 | if (ret) |
2334 | return ret; |
2335 | |
2336 | return 0; |
2337 | } |
2338 | |
2339 | static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) |
2340 | { |
2341 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2342 | |
2343 | if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) |
2344 | return; |
2345 | |
2346 | rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); |
2347 | } |
2348 | |
2349 | static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) |
2350 | { |
2351 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2352 | |
2353 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
2354 | rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, |
2355 | B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); |
2356 | rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, |
2357 | B_AX_PCIE_DIS_WLSUS_AFT_PDN); |
2358 | } else if (rtwdev->chip->chip_id == RTL8852C) { |
2359 | rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, |
2360 | B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); |
2361 | } |
2362 | } |
2363 | |
2364 | static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) |
2365 | { |
2366 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2367 | |
2368 | if (chip_id != RTL8852B && chip_id != RTL8851B) |
2369 | return 0; |
2370 | |
2371 | return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, |
2372 | PCIE_DPHY_DLY_25US, speed: PCIE_PHY_GEN1); |
2373 | } |
2374 | |
2375 | static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) |
2376 | { |
2377 | if (pwr_up) |
2378 | rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); |
2379 | else |
2380 | rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); |
2381 | } |
2382 | |
2383 | static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) |
2384 | { |
2385 | if (rtwdev->chip->chip_id != RTL8852C) |
2386 | return; |
2387 | |
2388 | rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); |
2389 | rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); |
2390 | } |
2391 | |
2392 | static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) |
2393 | { |
2394 | if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) |
2395 | return; |
2396 | |
2397 | rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); |
2398 | } |
2399 | |
2400 | static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) |
2401 | { |
2402 | if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) |
2403 | return; |
2404 | |
2405 | rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, |
2406 | B_AX_SYSON_DIS_PMCR_AX_WRMSK); |
2407 | rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); |
2408 | rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, |
2409 | B_AX_SYSON_DIS_PMCR_AX_WRMSK); |
2410 | } |
2411 | |
2412 | static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) |
2413 | { |
2414 | if (rtwdev->chip->chip_id != RTL8852C) |
2415 | return; |
2416 | |
2417 | rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); |
2418 | } |
2419 | |
2420 | static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) |
2421 | { |
2422 | if (rtwdev->chip->chip_id != RTL8852C) |
2423 | return; |
2424 | |
2425 | rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); |
2426 | } |
2427 | |
2428 | static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) |
2429 | { |
2430 | if (rtwdev->chip->chip_id == RTL8852C) |
2431 | return; |
2432 | |
2433 | rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, |
2434 | B_AX_SIC_EN_FORCE_CLKREQ); |
2435 | } |
2436 | |
2437 | static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) |
2438 | { |
2439 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2440 | u32 lbc; |
2441 | |
2442 | if (rtwdev->chip->chip_id == RTL8852C) |
2443 | return; |
2444 | |
2445 | lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); |
2446 | if (info->lbc_en == MAC_AX_PCIE_ENABLE) { |
2447 | lbc = u32_replace_bits(old: lbc, val: info->lbc_tmr, B_AX_LBC_TIMER); |
2448 | lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; |
2449 | rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, data: lbc); |
2450 | } else { |
2451 | lbc &= ~B_AX_LBC_EN; |
2452 | } |
2453 | rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, bit: lbc); |
2454 | } |
2455 | |
2456 | static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) |
2457 | { |
2458 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2459 | u32 val32; |
2460 | |
2461 | if (rtwdev->chip->chip_id != RTL8852C) |
2462 | return; |
2463 | |
2464 | if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { |
2465 | val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, |
2466 | info->io_rcy_tmr); |
2467 | rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, data: val32); |
2468 | rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, data: val32); |
2469 | rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, data: val32); |
2470 | |
2471 | rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); |
2472 | rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); |
2473 | rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); |
2474 | } else { |
2475 | rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); |
2476 | rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); |
2477 | rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); |
2478 | } |
2479 | |
2480 | rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); |
2481 | } |
2482 | |
2483 | static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) |
2484 | { |
2485 | if (rtwdev->chip->chip_id == RTL8852C) |
2486 | return; |
2487 | |
2488 | rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, |
2489 | B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); |
2490 | |
2491 | if (rtwdev->chip->chip_id == RTL8852A) |
2492 | rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, |
2493 | B_AX_EN_CHKDSC_NO_RX_STUCK); |
2494 | } |
2495 | |
2496 | static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) |
2497 | { |
2498 | if (rtwdev->chip->chip_id == RTL8852C) |
2499 | return; |
2500 | |
2501 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, |
2502 | B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); |
2503 | } |
2504 | |
2505 | static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) |
2506 | { |
2507 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2508 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2509 | u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | |
2510 | B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | |
2511 | B_AX_CLR_CH12_IDX; |
2512 | u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; |
2513 | u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; |
2514 | |
2515 | if (chip_id == RTL8852A || chip_id == RTL8852C) |
2516 | val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | |
2517 | B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; |
2518 | /* clear DMA indexes */ |
2519 | rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, bit: val); |
2520 | if (chip_id == RTL8852A || chip_id == RTL8852C) |
2521 | rtw89_write32_set(rtwdev, addr: txbd_rwptr_clr2, |
2522 | B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); |
2523 | rtw89_write32_set(rtwdev, addr: rxbd_rwptr_clr, |
2524 | B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); |
2525 | } |
2526 | |
2527 | static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) |
2528 | { |
2529 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2530 | u32 ret, check, dma_busy; |
2531 | u32 dma_busy1 = info->dma_busy1.addr; |
2532 | u32 dma_busy2 = info->dma_busy2_reg; |
2533 | |
2534 | check = info->dma_busy1.mask; |
2535 | |
2536 | ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, |
2537 | 10, 100, false, rtwdev, dma_busy1); |
2538 | if (ret) |
2539 | return ret; |
2540 | |
2541 | if (!dma_busy2) |
2542 | return 0; |
2543 | |
2544 | check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; |
2545 | |
2546 | ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, |
2547 | 10, 100, false, rtwdev, dma_busy2); |
2548 | if (ret) |
2549 | return ret; |
2550 | |
2551 | return 0; |
2552 | } |
2553 | |
2554 | static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) |
2555 | { |
2556 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2557 | u32 ret, check, dma_busy; |
2558 | u32 dma_busy3 = info->dma_busy3_reg; |
2559 | |
2560 | check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; |
2561 | |
2562 | ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, |
2563 | 10, 100, false, rtwdev, dma_busy3); |
2564 | if (ret) |
2565 | return ret; |
2566 | |
2567 | return 0; |
2568 | } |
2569 | |
2570 | static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) |
2571 | { |
2572 | u32 ret; |
2573 | |
2574 | ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); |
2575 | if (ret) { |
2576 | rtw89_err(rtwdev, "txdma ch busy\n" ); |
2577 | return ret; |
2578 | } |
2579 | |
2580 | ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); |
2581 | if (ret) { |
2582 | rtw89_err(rtwdev, "rxdma ch busy\n" ); |
2583 | return ret; |
2584 | } |
2585 | |
2586 | return 0; |
2587 | } |
2588 | |
2589 | static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) |
2590 | { |
2591 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2592 | enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; |
2593 | enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; |
2594 | enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; |
2595 | enum mac_ax_tag_mode tag_mode = info->tag_mode; |
2596 | enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; |
2597 | enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; |
2598 | enum mac_ax_tx_burst tx_burst = info->tx_burst; |
2599 | enum mac_ax_rx_burst rx_burst = info->rx_burst; |
2600 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2601 | u8 cv = rtwdev->hal.cv; |
2602 | u32 val32; |
2603 | |
2604 | if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { |
2605 | if (chip_id == RTL8852A && cv == CHIP_CBV) |
2606 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); |
2607 | } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { |
2608 | if (chip_id == RTL8852A || chip_id == RTL8852B) |
2609 | rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); |
2610 | } |
2611 | |
2612 | if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { |
2613 | if (chip_id == RTL8852A && cv == CHIP_CBV) |
2614 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); |
2615 | } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { |
2616 | if (chip_id == RTL8852A || chip_id == RTL8852B) |
2617 | rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); |
2618 | } |
2619 | |
2620 | if (rxbd_mode == MAC_AX_RXBD_PKT) { |
2621 | rtw89_write32_clr(rtwdev, addr: info->init_cfg_reg, bit: info->rxbd_mode_bit); |
2622 | } else if (rxbd_mode == MAC_AX_RXBD_SEP) { |
2623 | rtw89_write32_set(rtwdev, addr: info->init_cfg_reg, bit: info->rxbd_mode_bit); |
2624 | |
2625 | if (chip_id == RTL8852A || chip_id == RTL8852B) |
2626 | rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, |
2627 | B_AX_PCIE_RX_APPLEN_MASK, data: 0); |
2628 | } |
2629 | |
2630 | if (chip_id == RTL8852A || chip_id == RTL8852B) { |
2631 | rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, data: tx_burst); |
2632 | rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, data: rx_burst); |
2633 | } else if (chip_id == RTL8852C) { |
2634 | rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, data: tx_burst); |
2635 | rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, data: rx_burst); |
2636 | } |
2637 | |
2638 | if (chip_id == RTL8852A || chip_id == RTL8852B) { |
2639 | if (tag_mode == MAC_AX_TAG_SGL) { |
2640 | val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & |
2641 | ~B_AX_LATENCY_CONTROL; |
2642 | rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, data: val32); |
2643 | } else if (tag_mode == MAC_AX_TAG_MULTI) { |
2644 | val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | |
2645 | B_AX_LATENCY_CONTROL; |
2646 | rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, data: val32); |
2647 | } |
2648 | } |
2649 | |
2650 | rtw89_write32_mask(rtwdev, addr: info->exp_ctrl_reg, mask: info->max_tag_num_mask, |
2651 | data: info->multi_tag_num); |
2652 | |
2653 | if (chip_id == RTL8852A || chip_id == RTL8852B) { |
2654 | rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, |
2655 | data: wd_dma_idle_intvl); |
2656 | rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, |
2657 | data: wd_dma_act_intvl); |
2658 | } else if (chip_id == RTL8852C) { |
2659 | rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, |
2660 | data: wd_dma_idle_intvl); |
2661 | rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, |
2662 | data: wd_dma_act_intvl); |
2663 | } |
2664 | |
2665 | if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { |
2666 | rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, |
2667 | B_AX_HOST_ADDR_INFO_8B_SEL); |
2668 | rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); |
2669 | } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { |
2670 | rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, |
2671 | B_AX_HOST_ADDR_INFO_8B_SEL); |
2672 | rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); |
2673 | } |
2674 | |
2675 | return 0; |
2676 | } |
2677 | |
2678 | static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) |
2679 | { |
2680 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2681 | |
2682 | if (rtwdev->chip->chip_id == RTL8852A) { |
2683 | /* ltr sw trigger */ |
2684 | rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); |
2685 | } |
2686 | info->ltr_set(rtwdev, false); |
2687 | rtw89_pci_ctrl_dma_all(rtwdev, enable: false); |
2688 | rtw89_pci_clr_idx_all(rtwdev); |
2689 | |
2690 | return 0; |
2691 | } |
2692 | |
2693 | static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) |
2694 | { |
2695 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2696 | int ret; |
2697 | |
2698 | rtw89_pci_rxdma_prefth(rtwdev); |
2699 | rtw89_pci_l1off_pwroff(rtwdev); |
2700 | rtw89_pci_deglitch_setting(rtwdev); |
2701 | ret = rtw89_pci_l2_rxen_lat(rtwdev); |
2702 | if (ret) { |
2703 | rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n" , ret); |
2704 | return ret; |
2705 | } |
2706 | |
2707 | rtw89_pci_aphy_pwrcut(rtwdev); |
2708 | rtw89_pci_hci_ldo(rtwdev); |
2709 | rtw89_pci_dphy_delay(rtwdev); |
2710 | |
2711 | ret = rtw89_pci_autok_x(rtwdev); |
2712 | if (ret) { |
2713 | rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n" , ret); |
2714 | return ret; |
2715 | } |
2716 | |
2717 | ret = rtw89_pci_auto_refclk_cal(rtwdev, autook_en: false); |
2718 | if (ret) { |
2719 | rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n" , ret); |
2720 | return ret; |
2721 | } |
2722 | |
2723 | rtw89_pci_power_wake(rtwdev, pwr_up: true); |
2724 | rtw89_pci_autoload_hang(rtwdev); |
2725 | rtw89_pci_l12_vmain(rtwdev); |
2726 | rtw89_pci_gen2_force_ib(rtwdev); |
2727 | rtw89_pci_l1_ent_lat(rtwdev); |
2728 | rtw89_pci_wd_exit_l1(rtwdev); |
2729 | rtw89_pci_set_sic(rtwdev); |
2730 | rtw89_pci_set_lbc(rtwdev); |
2731 | rtw89_pci_set_io_rcy(rtwdev); |
2732 | rtw89_pci_set_dbg(rtwdev); |
2733 | rtw89_pci_set_keep_reg(rtwdev); |
2734 | |
2735 | rtw89_write32_set(rtwdev, addr: info->dma_stop1.addr, B_AX_STOP_WPDMA); |
2736 | |
2737 | /* stop DMA activities */ |
2738 | rtw89_pci_ctrl_dma_all(rtwdev, enable: false); |
2739 | |
2740 | ret = rtw89_pci_poll_dma_all_idle(rtwdev); |
2741 | if (ret) { |
2742 | rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n" ); |
2743 | return ret; |
2744 | } |
2745 | |
2746 | rtw89_pci_clr_idx_all(rtwdev); |
2747 | rtw89_pci_mode_op(rtwdev); |
2748 | |
2749 | /* fill TRX BD indexes */ |
2750 | rtw89_pci_ops_reset(rtwdev); |
2751 | |
2752 | ret = rtw89_pci_rst_bdram_ax(rtwdev); |
2753 | if (ret) { |
2754 | rtw89_warn(rtwdev, "reset bdram busy\n" ); |
2755 | return ret; |
2756 | } |
2757 | |
2758 | /* disable all channels except to FW CMD channel to download firmware */ |
2759 | rtw89_pci_ctrl_txdma_ch_ax(rtwdev, enable: false); |
2760 | rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, enable: true); |
2761 | |
2762 | /* start DMA activities */ |
2763 | rtw89_pci_ctrl_dma_all(rtwdev, enable: true); |
2764 | |
2765 | return 0; |
2766 | } |
2767 | |
2768 | int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) |
2769 | { |
2770 | u32 val; |
2771 | |
2772 | if (!en) |
2773 | return 0; |
2774 | |
2775 | val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); |
2776 | if (rtw89_pci_ltr_is_err_reg_val(val)) |
2777 | return -EINVAL; |
2778 | val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); |
2779 | if (rtw89_pci_ltr_is_err_reg_val(val)) |
2780 | return -EINVAL; |
2781 | val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); |
2782 | if (rtw89_pci_ltr_is_err_reg_val(val)) |
2783 | return -EINVAL; |
2784 | val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); |
2785 | if (rtw89_pci_ltr_is_err_reg_val(val)) |
2786 | return -EINVAL; |
2787 | |
2788 | rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | |
2789 | B_AX_LTR_WD_NOEMP_CHK); |
2790 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, |
2791 | PCI_LTR_SPC_500US); |
2792 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, |
2793 | PCI_LTR_IDLE_TIMER_3_2MS); |
2794 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, data: 0x28); |
2795 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, data: 0x28); |
2796 | rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, data: 0x90039003); |
2797 | rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, data: 0x880b880b); |
2798 | |
2799 | return 0; |
2800 | } |
2801 | EXPORT_SYMBOL(rtw89_pci_ltr_set); |
2802 | |
2803 | int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) |
2804 | { |
2805 | u32 dec_ctrl; |
2806 | u32 val32; |
2807 | |
2808 | val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); |
2809 | if (rtw89_pci_ltr_is_err_reg_val(val: val32)) |
2810 | return -EINVAL; |
2811 | val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); |
2812 | if (rtw89_pci_ltr_is_err_reg_val(val: val32)) |
2813 | return -EINVAL; |
2814 | dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); |
2815 | if (rtw89_pci_ltr_is_err_reg_val(val: dec_ctrl)) |
2816 | return -EINVAL; |
2817 | val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); |
2818 | if (rtw89_pci_ltr_is_err_reg_val(val: val32)) |
2819 | return -EINVAL; |
2820 | val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); |
2821 | if (rtw89_pci_ltr_is_err_reg_val(val: val32)) |
2822 | return -EINVAL; |
2823 | |
2824 | if (!en) { |
2825 | dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); |
2826 | dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | |
2827 | B_AX_LTR_REQ_DRV; |
2828 | } else { |
2829 | dec_ctrl |= B_AX_LTR_HW_DEC_EN; |
2830 | } |
2831 | |
2832 | dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; |
2833 | dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); |
2834 | |
2835 | if (en) |
2836 | rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, |
2837 | B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); |
2838 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, |
2839 | PCI_LTR_IDLE_TIMER_3_2MS); |
2840 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, data: 0x28); |
2841 | rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, data: 0x28); |
2842 | rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, data: dec_ctrl); |
2843 | rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, data: 0x90039003); |
2844 | rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, data: 0x880b880b); |
2845 | |
2846 | return 0; |
2847 | } |
2848 | EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); |
2849 | |
2850 | static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) |
2851 | { |
2852 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2853 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
2854 | int ret; |
2855 | |
2856 | ret = info->ltr_set(rtwdev, true); |
2857 | if (ret) { |
2858 | rtw89_err(rtwdev, "pci ltr set fail\n" ); |
2859 | return ret; |
2860 | } |
2861 | if (chip_id == RTL8852A) { |
2862 | /* ltr sw trigger */ |
2863 | rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); |
2864 | } |
2865 | if (chip_id == RTL8852A || chip_id == RTL8852B) { |
2866 | /* ADDR info 8-byte mode */ |
2867 | rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, |
2868 | B_AX_HOST_ADDR_INFO_8B_SEL); |
2869 | rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); |
2870 | } |
2871 | |
2872 | /* enable DMA for all queues */ |
2873 | rtw89_pci_ctrl_txdma_ch_ax(rtwdev, enable: true); |
2874 | |
2875 | /* Release PCI IO */ |
2876 | rtw89_write32_clr(rtwdev, addr: info->dma_stop1.addr, |
2877 | B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); |
2878 | |
2879 | return 0; |
2880 | } |
2881 | |
2882 | static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, |
2883 | struct pci_dev *pdev) |
2884 | { |
2885 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2886 | int ret; |
2887 | |
2888 | ret = pci_enable_device(dev: pdev); |
2889 | if (ret) { |
2890 | rtw89_err(rtwdev, "failed to enable pci device\n" ); |
2891 | return ret; |
2892 | } |
2893 | |
2894 | pci_set_master(dev: pdev); |
2895 | pci_set_drvdata(pdev, data: rtwdev->hw); |
2896 | |
2897 | rtwpci->pdev = pdev; |
2898 | |
2899 | return 0; |
2900 | } |
2901 | |
2902 | static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, |
2903 | struct pci_dev *pdev) |
2904 | { |
2905 | pci_disable_device(dev: pdev); |
2906 | } |
2907 | |
2908 | static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, |
2909 | struct pci_dev *pdev) |
2910 | { |
2911 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2912 | unsigned long resource_len; |
2913 | u8 bar_id = 2; |
2914 | int ret; |
2915 | |
2916 | ret = pci_request_regions(pdev, KBUILD_MODNAME); |
2917 | if (ret) { |
2918 | rtw89_err(rtwdev, "failed to request pci regions\n" ); |
2919 | goto err; |
2920 | } |
2921 | |
2922 | ret = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2923 | if (ret) { |
2924 | rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n" ); |
2925 | goto err_release_regions; |
2926 | } |
2927 | |
2928 | ret = dma_set_coherent_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2929 | if (ret) { |
2930 | rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n" ); |
2931 | goto err_release_regions; |
2932 | } |
2933 | |
2934 | resource_len = pci_resource_len(pdev, bar_id); |
2935 | rtwpci->mmap = pci_iomap(dev: pdev, bar: bar_id, max: resource_len); |
2936 | if (!rtwpci->mmap) { |
2937 | rtw89_err(rtwdev, "failed to map pci io\n" ); |
2938 | ret = -EIO; |
2939 | goto err_release_regions; |
2940 | } |
2941 | |
2942 | return 0; |
2943 | |
2944 | err_release_regions: |
2945 | pci_release_regions(pdev); |
2946 | err: |
2947 | return ret; |
2948 | } |
2949 | |
2950 | static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, |
2951 | struct pci_dev *pdev) |
2952 | { |
2953 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2954 | |
2955 | if (rtwpci->mmap) { |
2956 | pci_iounmap(dev: pdev, rtwpci->mmap); |
2957 | pci_release_regions(pdev); |
2958 | } |
2959 | } |
2960 | |
2961 | static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, |
2962 | struct pci_dev *pdev, |
2963 | struct rtw89_pci_tx_ring *tx_ring) |
2964 | { |
2965 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
2966 | u8 *head = wd_ring->head; |
2967 | dma_addr_t dma = wd_ring->dma; |
2968 | u32 page_size = wd_ring->page_size; |
2969 | u32 page_num = wd_ring->page_num; |
2970 | u32 ring_sz = page_size * page_num; |
2971 | |
2972 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma); |
2973 | wd_ring->head = NULL; |
2974 | } |
2975 | |
2976 | static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, |
2977 | struct pci_dev *pdev, |
2978 | struct rtw89_pci_tx_ring *tx_ring) |
2979 | { |
2980 | int ring_sz; |
2981 | u8 *head; |
2982 | dma_addr_t dma; |
2983 | |
2984 | head = tx_ring->bd_ring.head; |
2985 | dma = tx_ring->bd_ring.dma; |
2986 | ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; |
2987 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma); |
2988 | |
2989 | tx_ring->bd_ring.head = NULL; |
2990 | } |
2991 | |
2992 | static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, |
2993 | struct pci_dev *pdev) |
2994 | { |
2995 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
2996 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
2997 | struct rtw89_pci_tx_ring *tx_ring; |
2998 | int i; |
2999 | |
3000 | for (i = 0; i < RTW89_TXCH_NUM; i++) { |
3001 | if (info->tx_dma_ch_mask & BIT(i)) |
3002 | continue; |
3003 | tx_ring = &rtwpci->tx_rings[i]; |
3004 | rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); |
3005 | rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); |
3006 | } |
3007 | } |
3008 | |
3009 | static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, |
3010 | struct pci_dev *pdev, |
3011 | struct rtw89_pci_rx_ring *rx_ring) |
3012 | { |
3013 | struct rtw89_pci_rx_info *rx_info; |
3014 | struct sk_buff *skb; |
3015 | dma_addr_t dma; |
3016 | u32 buf_sz; |
3017 | u8 *head; |
3018 | int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; |
3019 | int i; |
3020 | |
3021 | buf_sz = rx_ring->buf_sz; |
3022 | for (i = 0; i < rx_ring->bd_ring.len; i++) { |
3023 | skb = rx_ring->buf[i]; |
3024 | if (!skb) |
3025 | continue; |
3026 | |
3027 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
3028 | dma = rx_info->dma; |
3029 | dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); |
3030 | dev_kfree_skb(skb); |
3031 | rx_ring->buf[i] = NULL; |
3032 | } |
3033 | |
3034 | head = rx_ring->bd_ring.head; |
3035 | dma = rx_ring->bd_ring.dma; |
3036 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma); |
3037 | |
3038 | rx_ring->bd_ring.head = NULL; |
3039 | } |
3040 | |
3041 | static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, |
3042 | struct pci_dev *pdev) |
3043 | { |
3044 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3045 | struct rtw89_pci_rx_ring *rx_ring; |
3046 | int i; |
3047 | |
3048 | for (i = 0; i < RTW89_RXCH_NUM; i++) { |
3049 | rx_ring = &rtwpci->rx_rings[i]; |
3050 | rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); |
3051 | } |
3052 | } |
3053 | |
3054 | static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, |
3055 | struct pci_dev *pdev) |
3056 | { |
3057 | rtw89_pci_free_rx_rings(rtwdev, pdev); |
3058 | rtw89_pci_free_tx_rings(rtwdev, pdev); |
3059 | } |
3060 | |
3061 | static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, |
3062 | struct rtw89_pci_rx_ring *rx_ring, |
3063 | struct sk_buff *skb, int buf_sz, u32 idx) |
3064 | { |
3065 | struct rtw89_pci_rx_info *rx_info; |
3066 | struct rtw89_pci_rx_bd_32 *rx_bd; |
3067 | dma_addr_t dma; |
3068 | |
3069 | if (!skb) |
3070 | return -EINVAL; |
3071 | |
3072 | dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); |
3073 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) |
3074 | return -EBUSY; |
3075 | |
3076 | rx_info = RTW89_PCI_RX_SKB_CB(skb); |
3077 | rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); |
3078 | |
3079 | memset(rx_bd, 0, sizeof(*rx_bd)); |
3080 | rx_bd->buf_size = cpu_to_le16(buf_sz); |
3081 | rx_bd->dma = cpu_to_le32(dma); |
3082 | rx_info->dma = dma; |
3083 | |
3084 | return 0; |
3085 | } |
3086 | |
3087 | static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, |
3088 | struct pci_dev *pdev, |
3089 | struct rtw89_pci_tx_ring *tx_ring, |
3090 | enum rtw89_tx_channel txch) |
3091 | { |
3092 | struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; |
3093 | struct rtw89_pci_tx_wd *txwd; |
3094 | dma_addr_t dma; |
3095 | dma_addr_t cur_paddr; |
3096 | u8 *head; |
3097 | u8 *cur_vaddr; |
3098 | u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; |
3099 | u32 page_num = RTW89_PCI_TXWD_NUM_MAX; |
3100 | u32 ring_sz = page_size * page_num; |
3101 | u32 page_offset; |
3102 | int i; |
3103 | |
3104 | /* FWCMD queue doesn't use txwd as pages */ |
3105 | if (txch == RTW89_TXCH_CH12) |
3106 | return 0; |
3107 | |
3108 | head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL); |
3109 | if (!head) |
3110 | return -ENOMEM; |
3111 | |
3112 | INIT_LIST_HEAD(list: &wd_ring->free_pages); |
3113 | wd_ring->head = head; |
3114 | wd_ring->dma = dma; |
3115 | wd_ring->page_size = page_size; |
3116 | wd_ring->page_num = page_num; |
3117 | |
3118 | page_offset = 0; |
3119 | for (i = 0; i < page_num; i++) { |
3120 | txwd = &wd_ring->pages[i]; |
3121 | cur_paddr = dma + page_offset; |
3122 | cur_vaddr = head + page_offset; |
3123 | |
3124 | skb_queue_head_init(list: &txwd->queue); |
3125 | INIT_LIST_HEAD(list: &txwd->list); |
3126 | txwd->paddr = cur_paddr; |
3127 | txwd->vaddr = cur_vaddr; |
3128 | txwd->len = page_size; |
3129 | txwd->seq = i; |
3130 | rtw89_pci_enqueue_txwd(tx_ring, txwd); |
3131 | |
3132 | page_offset += page_size; |
3133 | } |
3134 | |
3135 | return 0; |
3136 | } |
3137 | |
3138 | static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, |
3139 | struct pci_dev *pdev, |
3140 | struct rtw89_pci_tx_ring *tx_ring, |
3141 | u32 desc_size, u32 len, |
3142 | enum rtw89_tx_channel txch) |
3143 | { |
3144 | const struct rtw89_pci_ch_dma_addr *txch_addr; |
3145 | int ring_sz = desc_size * len; |
3146 | u8 *head; |
3147 | dma_addr_t dma; |
3148 | int ret; |
3149 | |
3150 | ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); |
3151 | if (ret) { |
3152 | rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n" , txch); |
3153 | goto err; |
3154 | } |
3155 | |
3156 | ret = rtw89_pci_get_txch_addrs(rtwdev, txch, addr: &txch_addr); |
3157 | if (ret) { |
3158 | rtw89_err(rtwdev, "failed to get address of txch %d" , txch); |
3159 | goto err_free_wd_ring; |
3160 | } |
3161 | |
3162 | head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL); |
3163 | if (!head) { |
3164 | ret = -ENOMEM; |
3165 | goto err_free_wd_ring; |
3166 | } |
3167 | |
3168 | INIT_LIST_HEAD(list: &tx_ring->busy_pages); |
3169 | tx_ring->bd_ring.head = head; |
3170 | tx_ring->bd_ring.dma = dma; |
3171 | tx_ring->bd_ring.len = len; |
3172 | tx_ring->bd_ring.desc_size = desc_size; |
3173 | tx_ring->bd_ring.addr = *txch_addr; |
3174 | tx_ring->bd_ring.wp = 0; |
3175 | tx_ring->bd_ring.rp = 0; |
3176 | tx_ring->txch = txch; |
3177 | |
3178 | return 0; |
3179 | |
3180 | err_free_wd_ring: |
3181 | rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); |
3182 | err: |
3183 | return ret; |
3184 | } |
3185 | |
3186 | static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, |
3187 | struct pci_dev *pdev) |
3188 | { |
3189 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3190 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3191 | struct rtw89_pci_tx_ring *tx_ring; |
3192 | u32 desc_size; |
3193 | u32 len; |
3194 | u32 i, tx_allocated; |
3195 | int ret; |
3196 | |
3197 | for (i = 0; i < RTW89_TXCH_NUM; i++) { |
3198 | if (info->tx_dma_ch_mask & BIT(i)) |
3199 | continue; |
3200 | tx_ring = &rtwpci->tx_rings[i]; |
3201 | desc_size = sizeof(struct rtw89_pci_tx_bd_32); |
3202 | len = RTW89_PCI_TXBD_NUM_MAX; |
3203 | ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, |
3204 | desc_size, len, txch: i); |
3205 | if (ret) { |
3206 | rtw89_err(rtwdev, "failed to alloc tx ring %d\n" , i); |
3207 | goto err_free; |
3208 | } |
3209 | } |
3210 | |
3211 | return 0; |
3212 | |
3213 | err_free: |
3214 | tx_allocated = i; |
3215 | for (i = 0; i < tx_allocated; i++) { |
3216 | tx_ring = &rtwpci->tx_rings[i]; |
3217 | rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); |
3218 | } |
3219 | |
3220 | return ret; |
3221 | } |
3222 | |
3223 | static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, |
3224 | struct pci_dev *pdev, |
3225 | struct rtw89_pci_rx_ring *rx_ring, |
3226 | u32 desc_size, u32 len, u32 rxch) |
3227 | { |
3228 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3229 | const struct rtw89_pci_ch_dma_addr *rxch_addr; |
3230 | struct sk_buff *skb; |
3231 | u8 *head; |
3232 | dma_addr_t dma; |
3233 | int ring_sz = desc_size * len; |
3234 | int buf_sz = RTW89_PCI_RX_BUF_SIZE; |
3235 | int i, allocated; |
3236 | int ret; |
3237 | |
3238 | ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, addr: &rxch_addr); |
3239 | if (ret) { |
3240 | rtw89_err(rtwdev, "failed to get address of rxch %d" , rxch); |
3241 | return ret; |
3242 | } |
3243 | |
3244 | head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL); |
3245 | if (!head) { |
3246 | ret = -ENOMEM; |
3247 | goto err; |
3248 | } |
3249 | |
3250 | rx_ring->bd_ring.head = head; |
3251 | rx_ring->bd_ring.dma = dma; |
3252 | rx_ring->bd_ring.len = len; |
3253 | rx_ring->bd_ring.desc_size = desc_size; |
3254 | rx_ring->bd_ring.addr = *rxch_addr; |
3255 | if (info->rx_ring_eq_is_full) |
3256 | rx_ring->bd_ring.wp = len - 1; |
3257 | else |
3258 | rx_ring->bd_ring.wp = 0; |
3259 | rx_ring->bd_ring.rp = 0; |
3260 | rx_ring->buf_sz = buf_sz; |
3261 | rx_ring->diliver_skb = NULL; |
3262 | rx_ring->diliver_desc.ready = false; |
3263 | rx_ring->target_rx_tag = 0; |
3264 | |
3265 | for (i = 0; i < len; i++) { |
3266 | skb = dev_alloc_skb(length: buf_sz); |
3267 | if (!skb) { |
3268 | ret = -ENOMEM; |
3269 | goto err_free; |
3270 | } |
3271 | |
3272 | memset(skb->data, 0, buf_sz); |
3273 | rx_ring->buf[i] = skb; |
3274 | ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, |
3275 | buf_sz, idx: i); |
3276 | if (ret) { |
3277 | rtw89_err(rtwdev, "failed to init rx buf %d\n" , i); |
3278 | dev_kfree_skb_any(skb); |
3279 | rx_ring->buf[i] = NULL; |
3280 | goto err_free; |
3281 | } |
3282 | } |
3283 | |
3284 | return 0; |
3285 | |
3286 | err_free: |
3287 | allocated = i; |
3288 | for (i = 0; i < allocated; i++) { |
3289 | skb = rx_ring->buf[i]; |
3290 | if (!skb) |
3291 | continue; |
3292 | dma = *((dma_addr_t *)skb->cb); |
3293 | dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); |
3294 | dev_kfree_skb(skb); |
3295 | rx_ring->buf[i] = NULL; |
3296 | } |
3297 | |
3298 | head = rx_ring->bd_ring.head; |
3299 | dma = rx_ring->bd_ring.dma; |
3300 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma); |
3301 | |
3302 | rx_ring->bd_ring.head = NULL; |
3303 | err: |
3304 | return ret; |
3305 | } |
3306 | |
3307 | static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, |
3308 | struct pci_dev *pdev) |
3309 | { |
3310 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3311 | struct rtw89_pci_rx_ring *rx_ring; |
3312 | u32 desc_size; |
3313 | u32 len; |
3314 | int i, rx_allocated; |
3315 | int ret; |
3316 | |
3317 | for (i = 0; i < RTW89_RXCH_NUM; i++) { |
3318 | rx_ring = &rtwpci->rx_rings[i]; |
3319 | desc_size = sizeof(struct rtw89_pci_rx_bd_32); |
3320 | len = RTW89_PCI_RXBD_NUM_MAX; |
3321 | ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, |
3322 | desc_size, len, rxch: i); |
3323 | if (ret) { |
3324 | rtw89_err(rtwdev, "failed to alloc rx ring %d\n" , i); |
3325 | goto err_free; |
3326 | } |
3327 | } |
3328 | |
3329 | return 0; |
3330 | |
3331 | err_free: |
3332 | rx_allocated = i; |
3333 | for (i = 0; i < rx_allocated; i++) { |
3334 | rx_ring = &rtwpci->rx_rings[i]; |
3335 | rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); |
3336 | } |
3337 | |
3338 | return ret; |
3339 | } |
3340 | |
3341 | static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, |
3342 | struct pci_dev *pdev) |
3343 | { |
3344 | int ret; |
3345 | |
3346 | ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); |
3347 | if (ret) { |
3348 | rtw89_err(rtwdev, "failed to alloc dma tx rings\n" ); |
3349 | goto err; |
3350 | } |
3351 | |
3352 | ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); |
3353 | if (ret) { |
3354 | rtw89_err(rtwdev, "failed to alloc dma rx rings\n" ); |
3355 | goto err_free_tx_rings; |
3356 | } |
3357 | |
3358 | return 0; |
3359 | |
3360 | err_free_tx_rings: |
3361 | rtw89_pci_free_tx_rings(rtwdev, pdev); |
3362 | err: |
3363 | return ret; |
3364 | } |
3365 | |
3366 | static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, |
3367 | struct rtw89_pci *rtwpci) |
3368 | { |
3369 | skb_queue_head_init(list: &rtwpci->h2c_queue); |
3370 | skb_queue_head_init(list: &rtwpci->h2c_release_queue); |
3371 | } |
3372 | |
3373 | static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, |
3374 | struct pci_dev *pdev) |
3375 | { |
3376 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3377 | int ret; |
3378 | |
3379 | ret = rtw89_pci_setup_mapping(rtwdev, pdev); |
3380 | if (ret) { |
3381 | rtw89_err(rtwdev, "failed to setup pci mapping\n" ); |
3382 | goto err; |
3383 | } |
3384 | |
3385 | ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); |
3386 | if (ret) { |
3387 | rtw89_err(rtwdev, "failed to alloc pci trx rings\n" ); |
3388 | goto err_pci_unmap; |
3389 | } |
3390 | |
3391 | rtw89_pci_h2c_init(rtwdev, rtwpci); |
3392 | |
3393 | spin_lock_init(&rtwpci->irq_lock); |
3394 | spin_lock_init(&rtwpci->trx_lock); |
3395 | |
3396 | return 0; |
3397 | |
3398 | err_pci_unmap: |
3399 | rtw89_pci_clear_mapping(rtwdev, pdev); |
3400 | err: |
3401 | return ret; |
3402 | } |
3403 | |
3404 | static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, |
3405 | struct pci_dev *pdev) |
3406 | { |
3407 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3408 | |
3409 | rtw89_pci_free_trx_rings(rtwdev, pdev); |
3410 | rtw89_pci_clear_mapping(rtwdev, pdev); |
3411 | rtw89_pci_release_fwcmd(rtwdev, rtwpci, |
3412 | cnt: skb_queue_len(list_: &rtwpci->h2c_queue), release_all: true); |
3413 | } |
3414 | |
3415 | void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) |
3416 | { |
3417 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3418 | const struct rtw89_chip_info *chip = rtwdev->chip; |
3419 | u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; |
3420 | |
3421 | if (chip->chip_id == RTL8851B) |
3422 | hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; |
3423 | |
3424 | rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; |
3425 | |
3426 | if (rtwpci->under_recovery) { |
3427 | rtwpci->intrs[0] = hs0isr_ind_int_en; |
3428 | rtwpci->intrs[1] = 0; |
3429 | } else { |
3430 | rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | |
3431 | B_AX_RXDMA_INT_EN | |
3432 | B_AX_RXP1DMA_INT_EN | |
3433 | B_AX_RPQDMA_INT_EN | |
3434 | B_AX_RXDMA_STUCK_INT_EN | |
3435 | B_AX_RDU_INT_EN | |
3436 | B_AX_RPQBD_FULL_INT_EN | |
3437 | hs0isr_ind_int_en; |
3438 | |
3439 | rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; |
3440 | } |
3441 | } |
3442 | EXPORT_SYMBOL(rtw89_pci_config_intr_mask); |
3443 | |
3444 | static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) |
3445 | { |
3446 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3447 | |
3448 | rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; |
3449 | rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; |
3450 | rtwpci->intrs[0] = 0; |
3451 | rtwpci->intrs[1] = 0; |
3452 | } |
3453 | |
3454 | static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) |
3455 | { |
3456 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3457 | |
3458 | rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | |
3459 | B_AX_HS1ISR_IND_INT_EN | |
3460 | B_AX_HS0ISR_IND_INT_EN; |
3461 | rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; |
3462 | rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | |
3463 | B_AX_RXDMA_INT_EN | |
3464 | B_AX_RXP1DMA_INT_EN | |
3465 | B_AX_RPQDMA_INT_EN | |
3466 | B_AX_RXDMA_STUCK_INT_EN | |
3467 | B_AX_RDU_INT_EN | |
3468 | B_AX_RPQBD_FULL_INT_EN; |
3469 | rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; |
3470 | } |
3471 | |
3472 | static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) |
3473 | { |
3474 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3475 | |
3476 | rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | |
3477 | B_AX_HS0ISR_IND_INT_EN; |
3478 | rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; |
3479 | rtwpci->intrs[0] = 0; |
3480 | rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; |
3481 | } |
3482 | |
3483 | void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) |
3484 | { |
3485 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3486 | |
3487 | if (rtwpci->under_recovery) |
3488 | rtw89_pci_recovery_intr_mask_v1(rtwdev); |
3489 | else if (rtwpci->low_power) |
3490 | rtw89_pci_low_power_intr_mask_v1(rtwdev); |
3491 | else |
3492 | rtw89_pci_default_intr_mask_v1(rtwdev); |
3493 | } |
3494 | EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); |
3495 | |
3496 | static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) |
3497 | { |
3498 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3499 | |
3500 | rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; |
3501 | rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; |
3502 | rtwpci->intrs[0] = 0; |
3503 | rtwpci->intrs[1] = 0; |
3504 | } |
3505 | |
3506 | static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) |
3507 | { |
3508 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3509 | |
3510 | rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | |
3511 | B_BE_HS0_IND_INT_EN0; |
3512 | rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; |
3513 | rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | |
3514 | B_BE_RDU_CH0_INT_IMR_V1; |
3515 | rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | |
3516 | B_BE_PCIE_RX_RPQ0_IMR0_V1; |
3517 | } |
3518 | |
3519 | static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) |
3520 | { |
3521 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3522 | |
3523 | rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | |
3524 | B_BE_HS1_IND_INT_EN0; |
3525 | rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; |
3526 | rtwpci->intrs[0] = 0; |
3527 | rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | |
3528 | B_BE_PCIE_RX_RPQ0_IMR0_V1; |
3529 | } |
3530 | |
3531 | void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) |
3532 | { |
3533 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3534 | |
3535 | if (rtwpci->under_recovery) |
3536 | rtw89_pci_recovery_intr_mask_v2(rtwdev); |
3537 | else if (rtwpci->low_power) |
3538 | rtw89_pci_low_power_intr_mask_v2(rtwdev); |
3539 | else |
3540 | rtw89_pci_default_intr_mask_v2(rtwdev); |
3541 | } |
3542 | EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); |
3543 | |
3544 | static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, |
3545 | struct pci_dev *pdev) |
3546 | { |
3547 | unsigned long flags = 0; |
3548 | int ret; |
3549 | |
3550 | flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; |
3551 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, flags); |
3552 | if (ret < 0) { |
3553 | rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n" , ret); |
3554 | goto err; |
3555 | } |
3556 | |
3557 | ret = devm_request_threaded_irq(dev: rtwdev->dev, irq: pdev->irq, |
3558 | handler: rtw89_pci_interrupt_handler, |
3559 | thread_fn: rtw89_pci_interrupt_threadfn, |
3560 | IRQF_SHARED, KBUILD_MODNAME, dev_id: rtwdev); |
3561 | if (ret) { |
3562 | rtw89_err(rtwdev, "failed to request threaded irq\n" ); |
3563 | goto err_free_vector; |
3564 | } |
3565 | |
3566 | rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RESET); |
3567 | |
3568 | return 0; |
3569 | |
3570 | err_free_vector: |
3571 | pci_free_irq_vectors(dev: pdev); |
3572 | err: |
3573 | return ret; |
3574 | } |
3575 | |
3576 | static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, |
3577 | struct pci_dev *pdev) |
3578 | { |
3579 | devm_free_irq(dev: rtwdev->dev, irq: pdev->irq, dev_id: rtwdev); |
3580 | pci_free_irq_vectors(dev: pdev); |
3581 | } |
3582 | |
3583 | static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) |
3584 | { |
3585 | u16 bin = 0, gray_bit; |
3586 | u32 bit_idx; |
3587 | |
3588 | for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { |
3589 | gray_bit = (gray_code >> bit_idx) & 0x1; |
3590 | if (bit_num - bit_idx > 1) |
3591 | gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; |
3592 | bin |= (gray_bit << bit_idx); |
3593 | } |
3594 | |
3595 | return bin; |
3596 | } |
3597 | |
3598 | static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) |
3599 | { |
3600 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3601 | struct pci_dev *pdev = rtwpci->pdev; |
3602 | u16 val16, filter_out_val; |
3603 | u32 val, phy_offset; |
3604 | int ret; |
3605 | |
3606 | if (rtwdev->chip->chip_id != RTL8852C) |
3607 | return 0; |
3608 | |
3609 | val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); |
3610 | if (val == B_AX_ASPM_CTRL_L1) |
3611 | return 0; |
3612 | |
3613 | ret = pci_read_config_dword(dev: pdev, RTW89_PCIE_L1_STS_V1, val: &val); |
3614 | if (ret) |
3615 | return ret; |
3616 | |
3617 | val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); |
3618 | if (val == RTW89_PCIE_GEN1_SPEED) { |
3619 | phy_offset = R_RAC_DIRECT_OFFSET_G1; |
3620 | } else if (val == RTW89_PCIE_GEN2_SPEED) { |
3621 | phy_offset = R_RAC_DIRECT_OFFSET_G2; |
3622 | val16 = rtw89_read16(rtwdev, addr: phy_offset + RAC_ANA10 * RAC_MULT); |
3623 | rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA10 * RAC_MULT, |
3624 | bit: val16 | B_PCIE_BIT_PINOUT_DIS); |
3625 | rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA19 * RAC_MULT, |
3626 | bit: val16 & ~B_PCIE_BIT_RD_SEL); |
3627 | |
3628 | val16 = rtw89_read16_mask(rtwdev, |
3629 | addr: phy_offset + RAC_ANA1F * RAC_MULT, |
3630 | FILTER_OUT_EQ_MASK); |
3631 | val16 = gray_code_to_bin(gray_code: val16, hweight16(val16)); |
3632 | filter_out_val = rtw89_read16(rtwdev, addr: phy_offset + RAC_ANA24 * |
3633 | RAC_MULT); |
3634 | filter_out_val &= ~REG_FILTER_OUT_MASK; |
3635 | filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); |
3636 | |
3637 | rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA24 * RAC_MULT, |
3638 | data: filter_out_val); |
3639 | rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA0A * RAC_MULT, |
3640 | B_BAC_EQ_SEL); |
3641 | rtw89_write16_set(rtwdev, |
3642 | R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, |
3643 | B_PCIE_BIT_PSAVE); |
3644 | } else { |
3645 | return -EOPNOTSUPP; |
3646 | } |
3647 | rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA0C * RAC_MULT, |
3648 | B_PCIE_BIT_PSAVE); |
3649 | |
3650 | return 0; |
3651 | } |
3652 | |
3653 | static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) |
3654 | { |
3655 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3656 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
3657 | |
3658 | if (rtw89_pci_disable_clkreq) |
3659 | return; |
3660 | |
3661 | gen_def->clkreq_set(rtwdev, enable); |
3662 | } |
3663 | |
3664 | static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) |
3665 | { |
3666 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
3667 | int ret; |
3668 | |
3669 | ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, |
3670 | data: PCIE_CLKDLY_HW_30US); |
3671 | if (ret) |
3672 | rtw89_err(rtwdev, "failed to set CLKREQ Delay\n" ); |
3673 | |
3674 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
3675 | if (enable) |
3676 | ret = rtw89_pci_config_byte_set(rtwdev, |
3677 | RTW89_PCIE_L1_CTRL, |
3678 | RTW89_PCIE_BIT_CLK); |
3679 | else |
3680 | ret = rtw89_pci_config_byte_clr(rtwdev, |
3681 | RTW89_PCIE_L1_CTRL, |
3682 | RTW89_PCIE_BIT_CLK); |
3683 | if (ret) |
3684 | rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d" , |
3685 | enable ? "set" : "unset" , ret); |
3686 | } else if (chip_id == RTL8852C) { |
3687 | rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, |
3688 | B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); |
3689 | if (enable) |
3690 | rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, |
3691 | B_AX_CLK_REQ_N); |
3692 | else |
3693 | rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, |
3694 | B_AX_CLK_REQ_N); |
3695 | } |
3696 | } |
3697 | |
3698 | static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) |
3699 | { |
3700 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3701 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
3702 | |
3703 | if (rtw89_pci_disable_aspm_l1) |
3704 | return; |
3705 | |
3706 | gen_def->aspm_set(rtwdev, enable); |
3707 | } |
3708 | |
3709 | static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) |
3710 | { |
3711 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
3712 | u8 value = 0; |
3713 | int ret; |
3714 | |
3715 | ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value: &value); |
3716 | if (ret) |
3717 | rtw89_warn(rtwdev, "failed to read ASPM Delay\n" ); |
3718 | |
3719 | u8p_replace_bits(p: &value, val: PCIE_L1DLY_16US, RTW89_L1DLY_MASK); |
3720 | u8p_replace_bits(p: &value, val: PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); |
3721 | |
3722 | ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, data: value); |
3723 | if (ret) |
3724 | rtw89_warn(rtwdev, "failed to read ASPM Delay\n" ); |
3725 | |
3726 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
3727 | if (enable) |
3728 | ret = rtw89_pci_config_byte_set(rtwdev, |
3729 | RTW89_PCIE_L1_CTRL, |
3730 | RTW89_PCIE_BIT_L1); |
3731 | else |
3732 | ret = rtw89_pci_config_byte_clr(rtwdev, |
3733 | RTW89_PCIE_L1_CTRL, |
3734 | RTW89_PCIE_BIT_L1); |
3735 | } else if (chip_id == RTL8852C) { |
3736 | if (enable) |
3737 | rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, |
3738 | B_AX_ASPM_CTRL_L1); |
3739 | else |
3740 | rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, |
3741 | B_AX_ASPM_CTRL_L1); |
3742 | } |
3743 | if (ret) |
3744 | rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d" , |
3745 | enable ? "set" : "unset" , ret); |
3746 | } |
3747 | |
3748 | static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) |
3749 | { |
3750 | enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; |
3751 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3752 | struct rtw89_traffic_stats *stats = &rtwdev->stats; |
3753 | enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; |
3754 | enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; |
3755 | u32 val = 0; |
3756 | |
3757 | if (rtwdev->scanning || |
3758 | (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) |
3759 | goto out; |
3760 | |
3761 | if (chip_gen == RTW89_CHIP_BE) |
3762 | val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; |
3763 | else |
3764 | val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | |
3765 | FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | |
3766 | FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | |
3767 | FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); |
3768 | |
3769 | out: |
3770 | rtw89_write32(rtwdev, addr: info->mit_addr, data: val); |
3771 | } |
3772 | |
3773 | static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) |
3774 | { |
3775 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3776 | struct pci_dev *pdev = rtwpci->pdev; |
3777 | u16 link_ctrl; |
3778 | int ret; |
3779 | |
3780 | /* Though there is standard PCIE configuration space to set the |
3781 | * link control register, but by Realtek's design, driver should |
3782 | * check if host supports CLKREQ/ASPM to enable the HW module. |
3783 | * |
3784 | * These functions are implemented by two HW modules associated, |
3785 | * one is responsible to access PCIE configuration space to |
3786 | * follow the host settings, and another is in charge of doing |
3787 | * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes |
3788 | * the host does not support it, and due to some reasons or wrong |
3789 | * settings (ex. CLKREQ# not Bi-Direction), it could lead to device |
3790 | * loss if HW misbehaves on the link. |
3791 | * |
3792 | * Hence it's designed that driver should first check the PCIE |
3793 | * configuration space is sync'ed and enabled, then driver can turn |
3794 | * on the other module that is actually working on the mechanism. |
3795 | */ |
3796 | ret = pcie_capability_read_word(dev: pdev, PCI_EXP_LNKCTL, val: &link_ctrl); |
3797 | if (ret) { |
3798 | rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n" , ret); |
3799 | return; |
3800 | } |
3801 | |
3802 | if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) |
3803 | rtw89_pci_clkreq_set(rtwdev, enable: true); |
3804 | |
3805 | if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) |
3806 | rtw89_pci_aspm_set(rtwdev, enable: true); |
3807 | } |
3808 | |
3809 | static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) |
3810 | { |
3811 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3812 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
3813 | |
3814 | if (rtw89_pci_disable_l1ss) |
3815 | return; |
3816 | |
3817 | gen_def->l1ss_set(rtwdev, enable); |
3818 | } |
3819 | |
3820 | static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) |
3821 | { |
3822 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
3823 | int ret; |
3824 | |
3825 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
3826 | if (enable) |
3827 | ret = rtw89_pci_config_byte_set(rtwdev, |
3828 | RTW89_PCIE_TIMER_CTRL, |
3829 | RTW89_PCIE_BIT_L1SUB); |
3830 | else |
3831 | ret = rtw89_pci_config_byte_clr(rtwdev, |
3832 | RTW89_PCIE_TIMER_CTRL, |
3833 | RTW89_PCIE_BIT_L1SUB); |
3834 | if (ret) |
3835 | rtw89_err(rtwdev, "failed to %s L1SS, ret=%d" , |
3836 | enable ? "set" : "unset" , ret); |
3837 | } else if (chip_id == RTL8852C) { |
3838 | ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, |
3839 | RTW89_PCIE_BIT_ASPM_L11 | |
3840 | RTW89_PCIE_BIT_PCI_L11); |
3841 | if (ret) |
3842 | rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d" , ret); |
3843 | if (enable) |
3844 | rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, |
3845 | B_AX_L1SUB_DISABLE); |
3846 | else |
3847 | rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, |
3848 | B_AX_L1SUB_DISABLE); |
3849 | } |
3850 | } |
3851 | |
3852 | static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) |
3853 | { |
3854 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3855 | struct pci_dev *pdev = rtwpci->pdev; |
3856 | u32 l1ss_cap_ptr, l1ss_ctrl; |
3857 | |
3858 | if (rtw89_pci_disable_l1ss) |
3859 | return; |
3860 | |
3861 | l1ss_cap_ptr = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_L1SS); |
3862 | if (!l1ss_cap_ptr) |
3863 | return; |
3864 | |
3865 | pci_read_config_dword(dev: pdev, where: l1ss_cap_ptr + PCI_L1SS_CTL1, val: &l1ss_ctrl); |
3866 | |
3867 | if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) |
3868 | rtw89_pci_l1ss_set(rtwdev, enable: true); |
3869 | } |
3870 | |
3871 | static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) |
3872 | { |
3873 | int ret = 0; |
3874 | u32 sts; |
3875 | u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; |
3876 | |
3877 | ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, |
3878 | 10, 1000, false, rtwdev, |
3879 | R_AX_PCIE_DMA_BUSY1); |
3880 | if (ret) { |
3881 | rtw89_err(rtwdev, "pci dmach busy1 0x%X\n" , |
3882 | rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); |
3883 | return -EINVAL; |
3884 | } |
3885 | return ret; |
3886 | } |
3887 | |
3888 | static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) |
3889 | { |
3890 | u32 val; |
3891 | int ret; |
3892 | |
3893 | if (rtwdev->chip->chip_id == RTL8852C) |
3894 | return 0; |
3895 | |
3896 | rtw89_pci_ctrl_dma_all(rtwdev, enable: false); |
3897 | ret = rtw89_pci_poll_io_idle_ax(rtwdev); |
3898 | if (ret) { |
3899 | val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); |
3900 | rtw89_debug(rtwdev, mask: RTW89_DBG_HCI, |
3901 | fmt: "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n" , |
3902 | R_AX_DBG_ERR_FLAG, val); |
3903 | if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) |
3904 | rtw89_mac_ctrl_hci_dma_tx(rtwdev, enable: false); |
3905 | if (val & B_AX_RX_STUCK) |
3906 | rtw89_mac_ctrl_hci_dma_rx(rtwdev, enable: false); |
3907 | rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: true); |
3908 | ret = rtw89_pci_poll_io_idle_ax(rtwdev); |
3909 | val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); |
3910 | rtw89_debug(rtwdev, mask: RTW89_DBG_HCI, |
3911 | fmt: "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n" , |
3912 | R_AX_DBG_ERR_FLAG, val); |
3913 | } |
3914 | |
3915 | return ret; |
3916 | } |
3917 | |
3918 | static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) |
3919 | { |
3920 | u32 ret; |
3921 | |
3922 | if (rtwdev->chip->chip_id == RTL8852C) |
3923 | return 0; |
3924 | |
3925 | rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: false); |
3926 | rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: true); |
3927 | rtw89_pci_clr_idx_all(rtwdev); |
3928 | |
3929 | ret = rtw89_pci_rst_bdram_ax(rtwdev); |
3930 | if (ret) |
3931 | return ret; |
3932 | |
3933 | rtw89_pci_ctrl_dma_all(rtwdev, enable: true); |
3934 | return ret; |
3935 | } |
3936 | |
3937 | static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, |
3938 | enum rtw89_lv1_rcvy_step step) |
3939 | { |
3940 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3941 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
3942 | int ret; |
3943 | |
3944 | switch (step) { |
3945 | case RTW89_LV1_RCVY_STEP_1: |
3946 | ret = gen_def->lv1rst_stop_dma(rtwdev); |
3947 | if (ret) |
3948 | rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n" ); |
3949 | |
3950 | break; |
3951 | |
3952 | case RTW89_LV1_RCVY_STEP_2: |
3953 | ret = gen_def->lv1rst_start_dma(rtwdev); |
3954 | if (ret) |
3955 | rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n" ); |
3956 | break; |
3957 | |
3958 | default: |
3959 | return -EINVAL; |
3960 | } |
3961 | |
3962 | return ret; |
3963 | } |
3964 | |
3965 | static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) |
3966 | { |
3967 | if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) |
3968 | return; |
3969 | |
3970 | if (rtwdev->chip->chip_id == RTL8852C) { |
3971 | rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n" , |
3972 | rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); |
3973 | rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n" , |
3974 | rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); |
3975 | } else { |
3976 | rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n" , |
3977 | rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); |
3978 | rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n" , |
3979 | rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); |
3980 | rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n" , |
3981 | rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); |
3982 | } |
3983 | } |
3984 | |
3985 | static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) |
3986 | { |
3987 | struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); |
3988 | struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; |
3989 | const struct rtw89_pci_info *info = rtwdev->pci_info; |
3990 | const struct rtw89_pci_gen_def *gen_def = info->gen_def; |
3991 | unsigned long flags; |
3992 | int work_done; |
3993 | |
3994 | rtwdev->napi_budget_countdown = budget; |
3995 | |
3996 | rtw89_write32(rtwdev, addr: gen_def->isr_clear_rpq.addr, data: gen_def->isr_clear_rpq.data); |
3997 | work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget: rtwdev->napi_budget_countdown); |
3998 | if (work_done == budget) |
3999 | return budget; |
4000 | |
4001 | rtw89_write32(rtwdev, addr: gen_def->isr_clear_rxq.addr, data: gen_def->isr_clear_rxq.data); |
4002 | work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget: rtwdev->napi_budget_countdown); |
4003 | if (work_done < budget && napi_complete_done(n: napi, work_done)) { |
4004 | spin_lock_irqsave(&rtwpci->irq_lock, flags); |
4005 | if (likely(rtwpci->running)) |
4006 | rtw89_chip_enable_intr(rtwdev, rtwpci); |
4007 | spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags); |
4008 | } |
4009 | |
4010 | return work_done; |
4011 | } |
4012 | |
4013 | static int __maybe_unused rtw89_pci_suspend(struct device *dev) |
4014 | { |
4015 | struct ieee80211_hw *hw = dev_get_drvdata(dev); |
4016 | struct rtw89_dev *rtwdev = hw->priv; |
4017 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
4018 | |
4019 | rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); |
4020 | rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); |
4021 | rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); |
4022 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
4023 | rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, |
4024 | B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); |
4025 | rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, |
4026 | B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); |
4027 | } else { |
4028 | rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, |
4029 | B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); |
4030 | } |
4031 | |
4032 | return 0; |
4033 | } |
4034 | |
4035 | static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) |
4036 | { |
4037 | if (rtwdev->chip->chip_id == RTL8852C) |
4038 | return; |
4039 | |
4040 | /* Hardware need write the reg twice to ensure the setting work */ |
4041 | rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, |
4042 | RTW89_PCIE_BIT_CFG_RST_MSTATE); |
4043 | rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, |
4044 | RTW89_PCIE_BIT_CFG_RST_MSTATE); |
4045 | } |
4046 | |
4047 | static int __maybe_unused rtw89_pci_resume(struct device *dev) |
4048 | { |
4049 | struct ieee80211_hw *hw = dev_get_drvdata(dev); |
4050 | struct rtw89_dev *rtwdev = hw->priv; |
4051 | enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; |
4052 | |
4053 | rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); |
4054 | rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); |
4055 | rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); |
4056 | if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { |
4057 | rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, |
4058 | B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); |
4059 | rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, |
4060 | B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); |
4061 | } else { |
4062 | rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, |
4063 | B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); |
4064 | rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, |
4065 | B_AX_SEL_REQ_ENTR_L1); |
4066 | } |
4067 | rtw89_pci_l2_hci_ldo(rtwdev); |
4068 | rtw89_pci_filter_out(rtwdev); |
4069 | rtw89_pci_link_cfg(rtwdev); |
4070 | rtw89_pci_l1ss_cfg(rtwdev); |
4071 | |
4072 | return 0; |
4073 | } |
4074 | |
4075 | SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); |
4076 | EXPORT_SYMBOL(rtw89_pm_ops); |
4077 | |
4078 | const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { |
4079 | .isr_rdu = B_AX_RDU_INT, |
4080 | .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, |
4081 | .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, |
4082 | .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, |
4083 | .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | |
4084 | B_AX_RDU_INT}, |
4085 | |
4086 | .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, |
4087 | .mac_pre_deinit = NULL, |
4088 | .mac_post_init = rtw89_pci_ops_mac_post_init_ax, |
4089 | |
4090 | .clr_idx_all = rtw89_pci_clr_idx_all_ax, |
4091 | .rst_bdram = rtw89_pci_rst_bdram_ax, |
4092 | |
4093 | .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, |
4094 | .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, |
4095 | |
4096 | .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, |
4097 | .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, |
4098 | .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, |
4099 | |
4100 | .aspm_set = rtw89_pci_aspm_set_ax, |
4101 | .clkreq_set = rtw89_pci_clkreq_set_ax, |
4102 | .l1ss_set = rtw89_pci_l1ss_set_ax, |
4103 | }; |
4104 | EXPORT_SYMBOL(rtw89_pci_gen_ax); |
4105 | |
4106 | static const struct rtw89_hci_ops rtw89_pci_ops = { |
4107 | .tx_write = rtw89_pci_ops_tx_write, |
4108 | .tx_kick_off = rtw89_pci_ops_tx_kick_off, |
4109 | .flush_queues = rtw89_pci_ops_flush_queues, |
4110 | .reset = rtw89_pci_ops_reset, |
4111 | .start = rtw89_pci_ops_start, |
4112 | .stop = rtw89_pci_ops_stop, |
4113 | .pause = rtw89_pci_ops_pause, |
4114 | .switch_mode = rtw89_pci_ops_switch_mode, |
4115 | .recalc_int_mit = rtw89_pci_recalc_int_mit, |
4116 | |
4117 | .read8 = rtw89_pci_ops_read8, |
4118 | .read16 = rtw89_pci_ops_read16, |
4119 | .read32 = rtw89_pci_ops_read32, |
4120 | .write8 = rtw89_pci_ops_write8, |
4121 | .write16 = rtw89_pci_ops_write16, |
4122 | .write32 = rtw89_pci_ops_write32, |
4123 | |
4124 | .mac_pre_init = rtw89_pci_ops_mac_pre_init, |
4125 | .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, |
4126 | .mac_post_init = rtw89_pci_ops_mac_post_init, |
4127 | .deinit = rtw89_pci_ops_deinit, |
4128 | |
4129 | .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, |
4130 | .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, |
4131 | .dump_err_status = rtw89_pci_ops_dump_err_status, |
4132 | .napi_poll = rtw89_pci_napi_poll, |
4133 | |
4134 | .recovery_start = rtw89_pci_ops_recovery_start, |
4135 | .recovery_complete = rtw89_pci_ops_recovery_complete, |
4136 | |
4137 | .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, |
4138 | .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, |
4139 | .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, |
4140 | .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, |
4141 | |
4142 | .clr_idx_all = rtw89_pci_clr_idx_all, |
4143 | .clear = rtw89_pci_clear_resource, |
4144 | .disable_intr = rtw89_pci_disable_intr_lock, |
4145 | .enable_intr = rtw89_pci_enable_intr_lock, |
4146 | .rst_bdram = rtw89_pci_reset_bdram, |
4147 | }; |
4148 | |
4149 | int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
4150 | { |
4151 | struct rtw89_dev *rtwdev; |
4152 | const struct rtw89_driver_info *info; |
4153 | const struct rtw89_pci_info *pci_info; |
4154 | int ret; |
4155 | |
4156 | info = (const struct rtw89_driver_info *)id->driver_data; |
4157 | |
4158 | rtwdev = rtw89_alloc_ieee80211_hw(device: &pdev->dev, |
4159 | bus_data_size: sizeof(struct rtw89_pci), |
4160 | chip: info->chip); |
4161 | if (!rtwdev) { |
4162 | dev_err(&pdev->dev, "failed to allocate hw\n" ); |
4163 | return -ENOMEM; |
4164 | } |
4165 | |
4166 | pci_info = info->bus.pci; |
4167 | |
4168 | rtwdev->pci_info = info->bus.pci; |
4169 | rtwdev->hci.ops = &rtw89_pci_ops; |
4170 | rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; |
4171 | rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; |
4172 | rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; |
4173 | |
4174 | SET_IEEE80211_DEV(hw: rtwdev->hw, dev: &pdev->dev); |
4175 | |
4176 | ret = rtw89_core_init(rtwdev); |
4177 | if (ret) { |
4178 | rtw89_err(rtwdev, "failed to initialise core\n" ); |
4179 | goto err_release_hw; |
4180 | } |
4181 | |
4182 | ret = rtw89_pci_claim_device(rtwdev, pdev); |
4183 | if (ret) { |
4184 | rtw89_err(rtwdev, "failed to claim pci device\n" ); |
4185 | goto err_core_deinit; |
4186 | } |
4187 | |
4188 | ret = rtw89_pci_setup_resource(rtwdev, pdev); |
4189 | if (ret) { |
4190 | rtw89_err(rtwdev, "failed to setup pci resource\n" ); |
4191 | goto err_declaim_pci; |
4192 | } |
4193 | |
4194 | ret = rtw89_chip_info_setup(rtwdev); |
4195 | if (ret) { |
4196 | rtw89_err(rtwdev, "failed to setup chip information\n" ); |
4197 | goto err_clear_resource; |
4198 | } |
4199 | |
4200 | rtw89_pci_filter_out(rtwdev); |
4201 | rtw89_pci_link_cfg(rtwdev); |
4202 | rtw89_pci_l1ss_cfg(rtwdev); |
4203 | |
4204 | rtw89_core_napi_init(rtwdev); |
4205 | |
4206 | ret = rtw89_pci_request_irq(rtwdev, pdev); |
4207 | if (ret) { |
4208 | rtw89_err(rtwdev, "failed to request pci irq\n" ); |
4209 | goto err_deinit_napi; |
4210 | } |
4211 | |
4212 | ret = rtw89_core_register(rtwdev); |
4213 | if (ret) { |
4214 | rtw89_err(rtwdev, "failed to register core\n" ); |
4215 | goto err_free_irq; |
4216 | } |
4217 | |
4218 | set_bit(nr: RTW89_FLAG_PROBE_DONE, addr: rtwdev->flags); |
4219 | |
4220 | return 0; |
4221 | |
4222 | err_free_irq: |
4223 | rtw89_pci_free_irq(rtwdev, pdev); |
4224 | err_deinit_napi: |
4225 | rtw89_core_napi_deinit(rtwdev); |
4226 | err_clear_resource: |
4227 | rtw89_pci_clear_resource(rtwdev, pdev); |
4228 | err_declaim_pci: |
4229 | rtw89_pci_declaim_device(rtwdev, pdev); |
4230 | err_core_deinit: |
4231 | rtw89_core_deinit(rtwdev); |
4232 | err_release_hw: |
4233 | rtw89_free_ieee80211_hw(rtwdev); |
4234 | |
4235 | return ret; |
4236 | } |
4237 | EXPORT_SYMBOL(rtw89_pci_probe); |
4238 | |
4239 | void rtw89_pci_remove(struct pci_dev *pdev) |
4240 | { |
4241 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
4242 | struct rtw89_dev *rtwdev; |
4243 | |
4244 | rtwdev = hw->priv; |
4245 | |
4246 | rtw89_pci_free_irq(rtwdev, pdev); |
4247 | rtw89_core_napi_deinit(rtwdev); |
4248 | rtw89_core_unregister(rtwdev); |
4249 | rtw89_pci_clear_resource(rtwdev, pdev); |
4250 | rtw89_pci_declaim_device(rtwdev, pdev); |
4251 | rtw89_core_deinit(rtwdev); |
4252 | rtw89_free_ieee80211_hw(rtwdev); |
4253 | } |
4254 | EXPORT_SYMBOL(rtw89_pci_remove); |
4255 | |
4256 | MODULE_AUTHOR("Realtek Corporation" ); |
4257 | MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver" ); |
4258 | MODULE_LICENSE("Dual BSD/GPL" ); |
4259 | |