1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
2 | /* Copyright(c) 2018-2019 Realtek Corporation |
3 | */ |
4 | |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | #include "main.h" |
8 | #include "pci.h" |
9 | #include "reg.h" |
10 | #include "tx.h" |
11 | #include "rx.h" |
12 | #include "fw.h" |
13 | #include "ps.h" |
14 | #include "debug.h" |
15 | |
16 | static bool rtw_disable_msi; |
17 | static bool rtw_pci_disable_aspm; |
18 | module_param_named(disable_msi, rtw_disable_msi, bool, 0644); |
19 | module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); |
20 | MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support" ); |
21 | MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support" ); |
22 | |
23 | static u32 rtw_pci_tx_queue_idx_addr[] = { |
24 | [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, |
25 | [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, |
26 | [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, |
27 | [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, |
28 | [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, |
29 | [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, |
30 | [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, |
31 | }; |
32 | |
33 | static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, |
34 | enum rtw_tx_queue_type queue) |
35 | { |
36 | switch (queue) { |
37 | case RTW_TX_QUEUE_BCN: |
38 | return TX_DESC_QSEL_BEACON; |
39 | case RTW_TX_QUEUE_H2C: |
40 | return TX_DESC_QSEL_H2C; |
41 | case RTW_TX_QUEUE_MGMT: |
42 | return TX_DESC_QSEL_MGMT; |
43 | case RTW_TX_QUEUE_HI0: |
44 | return TX_DESC_QSEL_HIGH; |
45 | default: |
46 | return skb->priority; |
47 | } |
48 | }; |
49 | |
50 | static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) |
51 | { |
52 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
53 | |
54 | return readb(addr: rtwpci->mmap + addr); |
55 | } |
56 | |
57 | static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) |
58 | { |
59 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
60 | |
61 | return readw(addr: rtwpci->mmap + addr); |
62 | } |
63 | |
64 | static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) |
65 | { |
66 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
67 | |
68 | return readl(addr: rtwpci->mmap + addr); |
69 | } |
70 | |
71 | static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) |
72 | { |
73 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
74 | |
75 | writeb(val, addr: rtwpci->mmap + addr); |
76 | } |
77 | |
78 | static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) |
79 | { |
80 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
81 | |
82 | writew(val, addr: rtwpci->mmap + addr); |
83 | } |
84 | |
85 | static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) |
86 | { |
87 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
88 | |
89 | writel(val, addr: rtwpci->mmap + addr); |
90 | } |
91 | |
92 | static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, |
93 | struct rtw_pci_tx_ring *tx_ring) |
94 | { |
95 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
96 | struct rtw_pci_tx_data *tx_data; |
97 | struct sk_buff *skb, *tmp; |
98 | dma_addr_t dma; |
99 | |
100 | /* free every skb remained in tx list */ |
101 | skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { |
102 | __skb_unlink(skb, list: &tx_ring->queue); |
103 | tx_data = rtw_pci_get_tx_data(skb); |
104 | dma = tx_data->dma; |
105 | |
106 | dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); |
107 | dev_kfree_skb_any(skb); |
108 | } |
109 | } |
110 | |
111 | static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, |
112 | struct rtw_pci_tx_ring *tx_ring) |
113 | { |
114 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
115 | u8 *head = tx_ring->r.head; |
116 | u32 len = tx_ring->r.len; |
117 | int ring_sz = len * tx_ring->r.desc_size; |
118 | |
119 | rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); |
120 | |
121 | /* free the ring itself */ |
122 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: tx_ring->r.dma); |
123 | tx_ring->r.head = NULL; |
124 | } |
125 | |
126 | static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, |
127 | struct rtw_pci_rx_ring *rx_ring) |
128 | { |
129 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
130 | struct sk_buff *skb; |
131 | int buf_sz = RTK_PCI_RX_BUF_SIZE; |
132 | dma_addr_t dma; |
133 | int i; |
134 | |
135 | for (i = 0; i < rx_ring->r.len; i++) { |
136 | skb = rx_ring->buf[i]; |
137 | if (!skb) |
138 | continue; |
139 | |
140 | dma = *((dma_addr_t *)skb->cb); |
141 | dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); |
142 | dev_kfree_skb(skb); |
143 | rx_ring->buf[i] = NULL; |
144 | } |
145 | } |
146 | |
147 | static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, |
148 | struct rtw_pci_rx_ring *rx_ring) |
149 | { |
150 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
151 | u8 *head = rx_ring->r.head; |
152 | int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; |
153 | |
154 | rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); |
155 | |
156 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: rx_ring->r.dma); |
157 | } |
158 | |
159 | static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) |
160 | { |
161 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
162 | struct rtw_pci_tx_ring *tx_ring; |
163 | struct rtw_pci_rx_ring *rx_ring; |
164 | int i; |
165 | |
166 | for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { |
167 | tx_ring = &rtwpci->tx_rings[i]; |
168 | rtw_pci_free_tx_ring(rtwdev, tx_ring); |
169 | } |
170 | |
171 | for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { |
172 | rx_ring = &rtwpci->rx_rings[i]; |
173 | rtw_pci_free_rx_ring(rtwdev, rx_ring); |
174 | } |
175 | } |
176 | |
177 | static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, |
178 | struct rtw_pci_tx_ring *tx_ring, |
179 | u8 desc_size, u32 len) |
180 | { |
181 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
182 | int ring_sz = desc_size * len; |
183 | dma_addr_t dma; |
184 | u8 *head; |
185 | |
186 | if (len > TRX_BD_IDX_MASK) { |
187 | rtw_err(rtwdev, "len %d exceeds maximum TX entries\n" , len); |
188 | return -EINVAL; |
189 | } |
190 | |
191 | head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL); |
192 | if (!head) { |
193 | rtw_err(rtwdev, "failed to allocate tx ring\n" ); |
194 | return -ENOMEM; |
195 | } |
196 | |
197 | skb_queue_head_init(list: &tx_ring->queue); |
198 | tx_ring->r.head = head; |
199 | tx_ring->r.dma = dma; |
200 | tx_ring->r.len = len; |
201 | tx_ring->r.desc_size = desc_size; |
202 | tx_ring->r.wp = 0; |
203 | tx_ring->r.rp = 0; |
204 | |
205 | return 0; |
206 | } |
207 | |
208 | static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, |
209 | struct rtw_pci_rx_ring *rx_ring, |
210 | u32 idx, u32 desc_sz) |
211 | { |
212 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
213 | struct rtw_pci_rx_buffer_desc *buf_desc; |
214 | int buf_sz = RTK_PCI_RX_BUF_SIZE; |
215 | dma_addr_t dma; |
216 | |
217 | if (!skb) |
218 | return -EINVAL; |
219 | |
220 | dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); |
221 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) |
222 | return -EBUSY; |
223 | |
224 | *((dma_addr_t *)skb->cb) = dma; |
225 | buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + |
226 | idx * desc_sz); |
227 | memset(buf_desc, 0, sizeof(*buf_desc)); |
228 | buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); |
229 | buf_desc->dma = cpu_to_le32(dma); |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, |
235 | struct rtw_pci_rx_ring *rx_ring, |
236 | u32 idx, u32 desc_sz) |
237 | { |
238 | struct device *dev = rtwdev->dev; |
239 | struct rtw_pci_rx_buffer_desc *buf_desc; |
240 | int buf_sz = RTK_PCI_RX_BUF_SIZE; |
241 | |
242 | dma_sync_single_for_device(dev, addr: dma, size: buf_sz, dir: DMA_FROM_DEVICE); |
243 | |
244 | buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + |
245 | idx * desc_sz); |
246 | memset(buf_desc, 0, sizeof(*buf_desc)); |
247 | buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); |
248 | buf_desc->dma = cpu_to_le32(dma); |
249 | } |
250 | |
251 | static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, |
252 | struct rtw_pci_rx_ring *rx_ring, |
253 | u8 desc_size, u32 len) |
254 | { |
255 | struct pci_dev *pdev = to_pci_dev(rtwdev->dev); |
256 | struct sk_buff *skb = NULL; |
257 | dma_addr_t dma; |
258 | u8 *head; |
259 | int ring_sz = desc_size * len; |
260 | int buf_sz = RTK_PCI_RX_BUF_SIZE; |
261 | int i, allocated; |
262 | int ret = 0; |
263 | |
264 | head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL); |
265 | if (!head) { |
266 | rtw_err(rtwdev, "failed to allocate rx ring\n" ); |
267 | return -ENOMEM; |
268 | } |
269 | rx_ring->r.head = head; |
270 | |
271 | for (i = 0; i < len; i++) { |
272 | skb = dev_alloc_skb(length: buf_sz); |
273 | if (!skb) { |
274 | allocated = i; |
275 | ret = -ENOMEM; |
276 | goto err_out; |
277 | } |
278 | |
279 | memset(skb->data, 0, buf_sz); |
280 | rx_ring->buf[i] = skb; |
281 | ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, idx: i, desc_sz: desc_size); |
282 | if (ret) { |
283 | allocated = i; |
284 | dev_kfree_skb_any(skb); |
285 | goto err_out; |
286 | } |
287 | } |
288 | |
289 | rx_ring->r.dma = dma; |
290 | rx_ring->r.len = len; |
291 | rx_ring->r.desc_size = desc_size; |
292 | rx_ring->r.wp = 0; |
293 | rx_ring->r.rp = 0; |
294 | |
295 | return 0; |
296 | |
297 | err_out: |
298 | for (i = 0; i < allocated; i++) { |
299 | skb = rx_ring->buf[i]; |
300 | if (!skb) |
301 | continue; |
302 | dma = *((dma_addr_t *)skb->cb); |
303 | dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); |
304 | dev_kfree_skb_any(skb); |
305 | rx_ring->buf[i] = NULL; |
306 | } |
307 | dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma); |
308 | |
309 | rtw_err(rtwdev, "failed to init rx buffer\n" ); |
310 | |
311 | return ret; |
312 | } |
313 | |
314 | static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) |
315 | { |
316 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
317 | struct rtw_pci_tx_ring *tx_ring; |
318 | struct rtw_pci_rx_ring *rx_ring; |
319 | const struct rtw_chip_info *chip = rtwdev->chip; |
320 | int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; |
321 | int tx_desc_size, rx_desc_size; |
322 | u32 len; |
323 | int ret; |
324 | |
325 | tx_desc_size = chip->tx_buf_desc_sz; |
326 | |
327 | for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { |
328 | tx_ring = &rtwpci->tx_rings[i]; |
329 | len = max_num_of_tx_queue(queue: i); |
330 | ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, desc_size: tx_desc_size, len); |
331 | if (ret) |
332 | goto out; |
333 | } |
334 | |
335 | rx_desc_size = chip->rx_buf_desc_sz; |
336 | |
337 | for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { |
338 | rx_ring = &rtwpci->rx_rings[j]; |
339 | ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, desc_size: rx_desc_size, |
340 | RTK_MAX_RX_DESC_NUM); |
341 | if (ret) |
342 | goto out; |
343 | } |
344 | |
345 | return 0; |
346 | |
347 | out: |
348 | tx_alloced = i; |
349 | for (i = 0; i < tx_alloced; i++) { |
350 | tx_ring = &rtwpci->tx_rings[i]; |
351 | rtw_pci_free_tx_ring(rtwdev, tx_ring); |
352 | } |
353 | |
354 | rx_alloced = j; |
355 | for (j = 0; j < rx_alloced; j++) { |
356 | rx_ring = &rtwpci->rx_rings[j]; |
357 | rtw_pci_free_rx_ring(rtwdev, rx_ring); |
358 | } |
359 | |
360 | return ret; |
361 | } |
362 | |
363 | static void rtw_pci_deinit(struct rtw_dev *rtwdev) |
364 | { |
365 | rtw_pci_free_trx_ring(rtwdev); |
366 | } |
367 | |
368 | static int rtw_pci_init(struct rtw_dev *rtwdev) |
369 | { |
370 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
371 | int ret = 0; |
372 | |
373 | rtwpci->irq_mask[0] = IMR_HIGHDOK | |
374 | IMR_MGNTDOK | |
375 | IMR_BKDOK | |
376 | IMR_BEDOK | |
377 | IMR_VIDOK | |
378 | IMR_VODOK | |
379 | IMR_ROK | |
380 | IMR_BCNDMAINT_E | |
381 | IMR_C2HCMD | |
382 | 0; |
383 | rtwpci->irq_mask[1] = IMR_TXFOVW | |
384 | 0; |
385 | rtwpci->irq_mask[3] = IMR_H2CDOK | |
386 | 0; |
387 | spin_lock_init(&rtwpci->irq_lock); |
388 | spin_lock_init(&rtwpci->hwirq_lock); |
389 | ret = rtw_pci_init_trx_ring(rtwdev); |
390 | |
391 | return ret; |
392 | } |
393 | |
394 | static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) |
395 | { |
396 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
397 | u32 len; |
398 | u8 tmp; |
399 | dma_addr_t dma; |
400 | |
401 | tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); |
402 | rtw_write8(rtwdev, RTK_PCI_CTRL + 3, val: tmp | 0xf7); |
403 | |
404 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; |
405 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, val: dma); |
406 | |
407 | if (!rtw_chip_wcpu_11n(rtwdev)) { |
408 | len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; |
409 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; |
410 | rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; |
411 | rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; |
412 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, val: len & TRX_BD_IDX_MASK); |
413 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, val: dma); |
414 | } |
415 | |
416 | len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; |
417 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; |
418 | rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; |
419 | rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; |
420 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, val: len & TRX_BD_IDX_MASK); |
421 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, val: dma); |
422 | |
423 | len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; |
424 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; |
425 | rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; |
426 | rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; |
427 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, val: len & TRX_BD_IDX_MASK); |
428 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, val: dma); |
429 | |
430 | len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; |
431 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; |
432 | rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; |
433 | rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; |
434 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, val: len & TRX_BD_IDX_MASK); |
435 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, val: dma); |
436 | |
437 | len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; |
438 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; |
439 | rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; |
440 | rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; |
441 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, val: len & TRX_BD_IDX_MASK); |
442 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, val: dma); |
443 | |
444 | len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; |
445 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; |
446 | rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; |
447 | rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; |
448 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, val: len & TRX_BD_IDX_MASK); |
449 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, val: dma); |
450 | |
451 | len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; |
452 | dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; |
453 | rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; |
454 | rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; |
455 | rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, val: len & TRX_BD_IDX_MASK); |
456 | rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, val: dma); |
457 | |
458 | len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; |
459 | dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; |
460 | rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; |
461 | rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; |
462 | rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, val: len & TRX_BD_IDX_MASK); |
463 | rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, val: dma); |
464 | |
465 | /* reset read/write point */ |
466 | rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, val: 0xffffffff); |
467 | |
468 | /* reset H2C Queue index in a single write */ |
469 | if (rtw_chip_wcpu_11ac(rtwdev)) |
470 | rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, |
471 | BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); |
472 | } |
473 | |
474 | static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) |
475 | { |
476 | rtw_pci_reset_buf_desc(rtwdev); |
477 | } |
478 | |
479 | static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, |
480 | struct rtw_pci *rtwpci, bool exclude_rx) |
481 | { |
482 | unsigned long flags; |
483 | u32 imr0_unmask = exclude_rx ? IMR_ROK : 0; |
484 | |
485 | spin_lock_irqsave(&rtwpci->hwirq_lock, flags); |
486 | |
487 | rtw_write32(rtwdev, RTK_PCI_HIMR0, val: rtwpci->irq_mask[0] & ~imr0_unmask); |
488 | rtw_write32(rtwdev, RTK_PCI_HIMR1, val: rtwpci->irq_mask[1]); |
489 | if (rtw_chip_wcpu_11ac(rtwdev)) |
490 | rtw_write32(rtwdev, RTK_PCI_HIMR3, val: rtwpci->irq_mask[3]); |
491 | |
492 | rtwpci->irq_enabled = true; |
493 | |
494 | spin_unlock_irqrestore(lock: &rtwpci->hwirq_lock, flags); |
495 | } |
496 | |
497 | static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, |
498 | struct rtw_pci *rtwpci) |
499 | { |
500 | unsigned long flags; |
501 | |
502 | spin_lock_irqsave(&rtwpci->hwirq_lock, flags); |
503 | |
504 | if (!rtwpci->irq_enabled) |
505 | goto out; |
506 | |
507 | rtw_write32(rtwdev, RTK_PCI_HIMR0, val: 0); |
508 | rtw_write32(rtwdev, RTK_PCI_HIMR1, val: 0); |
509 | if (rtw_chip_wcpu_11ac(rtwdev)) |
510 | rtw_write32(rtwdev, RTK_PCI_HIMR3, val: 0); |
511 | |
512 | rtwpci->irq_enabled = false; |
513 | |
514 | out: |
515 | spin_unlock_irqrestore(lock: &rtwpci->hwirq_lock, flags); |
516 | } |
517 | |
518 | static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) |
519 | { |
520 | /* reset dma and rx tag */ |
521 | rtw_write32_set(rtwdev, RTK_PCI_CTRL, |
522 | BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); |
523 | rtwpci->rx_tag = 0; |
524 | } |
525 | |
526 | static int rtw_pci_setup(struct rtw_dev *rtwdev) |
527 | { |
528 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
529 | |
530 | rtw_pci_reset_trx_ring(rtwdev); |
531 | rtw_pci_dma_reset(rtwdev, rtwpci); |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) |
537 | { |
538 | struct rtw_pci_tx_ring *tx_ring; |
539 | enum rtw_tx_queue_type queue; |
540 | |
541 | rtw_pci_reset_trx_ring(rtwdev); |
542 | for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { |
543 | tx_ring = &rtwpci->tx_rings[queue]; |
544 | rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); |
545 | } |
546 | } |
547 | |
548 | static void rtw_pci_napi_start(struct rtw_dev *rtwdev) |
549 | { |
550 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
551 | |
552 | if (test_and_set_bit(nr: RTW_PCI_FLAG_NAPI_RUNNING, addr: rtwpci->flags)) |
553 | return; |
554 | |
555 | napi_enable(n: &rtwpci->napi); |
556 | } |
557 | |
558 | static void rtw_pci_napi_stop(struct rtw_dev *rtwdev) |
559 | { |
560 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
561 | |
562 | if (!test_and_clear_bit(nr: RTW_PCI_FLAG_NAPI_RUNNING, addr: rtwpci->flags)) |
563 | return; |
564 | |
565 | napi_synchronize(n: &rtwpci->napi); |
566 | napi_disable(n: &rtwpci->napi); |
567 | } |
568 | |
569 | static int rtw_pci_start(struct rtw_dev *rtwdev) |
570 | { |
571 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
572 | |
573 | rtw_pci_napi_start(rtwdev); |
574 | |
575 | spin_lock_bh(lock: &rtwpci->irq_lock); |
576 | rtwpci->running = true; |
577 | rtw_pci_enable_interrupt(rtwdev, rtwpci, exclude_rx: false); |
578 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
579 | |
580 | return 0; |
581 | } |
582 | |
583 | static void rtw_pci_stop(struct rtw_dev *rtwdev) |
584 | { |
585 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
586 | struct pci_dev *pdev = rtwpci->pdev; |
587 | |
588 | spin_lock_bh(lock: &rtwpci->irq_lock); |
589 | rtwpci->running = false; |
590 | rtw_pci_disable_interrupt(rtwdev, rtwpci); |
591 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
592 | |
593 | synchronize_irq(irq: pdev->irq); |
594 | rtw_pci_napi_stop(rtwdev); |
595 | |
596 | spin_lock_bh(lock: &rtwpci->irq_lock); |
597 | rtw_pci_dma_release(rtwdev, rtwpci); |
598 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
599 | } |
600 | |
601 | static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) |
602 | { |
603 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
604 | struct rtw_pci_tx_ring *tx_ring; |
605 | enum rtw_tx_queue_type queue; |
606 | bool tx_empty = true; |
607 | |
608 | if (rtw_fw_feature_check(fw: &rtwdev->fw, feature: FW_FEATURE_TX_WAKE)) |
609 | goto enter_deep_ps; |
610 | |
611 | lockdep_assert_held(&rtwpci->irq_lock); |
612 | |
613 | /* Deep PS state is not allowed to TX-DMA */ |
614 | for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { |
615 | /* BCN queue is rsvd page, does not have DMA interrupt |
616 | * H2C queue is managed by firmware |
617 | */ |
618 | if (queue == RTW_TX_QUEUE_BCN || |
619 | queue == RTW_TX_QUEUE_H2C) |
620 | continue; |
621 | |
622 | tx_ring = &rtwpci->tx_rings[queue]; |
623 | |
624 | /* check if there is any skb DMAing */ |
625 | if (skb_queue_len(list_: &tx_ring->queue)) { |
626 | tx_empty = false; |
627 | break; |
628 | } |
629 | } |
630 | |
631 | if (!tx_empty) { |
632 | rtw_dbg(rtwdev, mask: RTW_DBG_PS, |
633 | fmt: "TX path not empty, cannot enter deep power save state\n" ); |
634 | return; |
635 | } |
636 | enter_deep_ps: |
637 | set_bit(nr: RTW_FLAG_LEISURE_PS_DEEP, addr: rtwdev->flags); |
638 | rtw_power_mode_change(rtwdev, enter: true); |
639 | } |
640 | |
641 | static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) |
642 | { |
643 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
644 | |
645 | lockdep_assert_held(&rtwpci->irq_lock); |
646 | |
647 | if (test_and_clear_bit(nr: RTW_FLAG_LEISURE_PS_DEEP, addr: rtwdev->flags)) |
648 | rtw_power_mode_change(rtwdev, enter: false); |
649 | } |
650 | |
651 | static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) |
652 | { |
653 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
654 | |
655 | spin_lock_bh(lock: &rtwpci->irq_lock); |
656 | |
657 | if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) |
658 | rtw_pci_deep_ps_enter(rtwdev); |
659 | |
660 | if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) |
661 | rtw_pci_deep_ps_leave(rtwdev); |
662 | |
663 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
664 | } |
665 | |
666 | static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, |
667 | struct rtw_pci_tx_ring *ring) |
668 | { |
669 | struct sk_buff *prev = skb_dequeue(list: &ring->queue); |
670 | struct rtw_pci_tx_data *tx_data; |
671 | dma_addr_t dma; |
672 | |
673 | if (!prev) |
674 | return; |
675 | |
676 | tx_data = rtw_pci_get_tx_data(skb: prev); |
677 | dma = tx_data->dma; |
678 | dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); |
679 | dev_kfree_skb_any(skb: prev); |
680 | } |
681 | |
682 | static void rtw_pci_dma_check(struct rtw_dev *rtwdev, |
683 | struct rtw_pci_rx_ring *rx_ring, |
684 | u32 idx) |
685 | { |
686 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
687 | const struct rtw_chip_info *chip = rtwdev->chip; |
688 | struct rtw_pci_rx_buffer_desc *buf_desc; |
689 | u32 desc_sz = chip->rx_buf_desc_sz; |
690 | u16 total_pkt_size; |
691 | |
692 | buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + |
693 | idx * desc_sz); |
694 | total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); |
695 | |
696 | /* rx tag mismatch, throw a warning */ |
697 | if (total_pkt_size != rtwpci->rx_tag) |
698 | rtw_warn(rtwdev, "pci bus timeout, check dma status\n" ); |
699 | |
700 | rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; |
701 | } |
702 | |
703 | static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) |
704 | { |
705 | u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; |
706 | u32 bd_idx = rtw_read16(rtwdev, addr: bd_idx_addr + 2); |
707 | |
708 | return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); |
709 | } |
710 | |
711 | static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) |
712 | { |
713 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
714 | struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; |
715 | u32 cur_rp; |
716 | u8 i; |
717 | |
718 | /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a |
719 | * bit dynamic, it's hard to define a reasonable fixed total timeout to |
720 | * use read_poll_timeout* helper. Instead, we can ensure a reasonable |
721 | * polling times, so we just use for loop with udelay here. |
722 | */ |
723 | for (i = 0; i < 30; i++) { |
724 | cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); |
725 | if (cur_rp == ring->r.wp) |
726 | return; |
727 | |
728 | udelay(1); |
729 | } |
730 | |
731 | if (!drop) |
732 | rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n" , pci_q); |
733 | } |
734 | |
735 | static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, |
736 | bool drop) |
737 | { |
738 | u8 q; |
739 | |
740 | for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { |
741 | /* Unnecessary to flush BCN, H2C and HI tx queues. */ |
742 | if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C || |
743 | q == RTW_TX_QUEUE_HI0) |
744 | continue; |
745 | |
746 | if (pci_queues & BIT(q)) |
747 | __pci_flush_queue(rtwdev, pci_q: q, drop); |
748 | } |
749 | } |
750 | |
751 | static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) |
752 | { |
753 | u32 pci_queues = 0; |
754 | u8 i; |
755 | |
756 | /* If all of the hardware queues are requested to flush, |
757 | * flush all of the pci queues. |
758 | */ |
759 | if (queues == BIT(rtwdev->hw->queues) - 1) { |
760 | pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; |
761 | } else { |
762 | for (i = 0; i < rtwdev->hw->queues; i++) |
763 | if (queues & BIT(i)) |
764 | pci_queues |= BIT(rtw_tx_ac_to_hwq(i)); |
765 | } |
766 | |
767 | __rtw_pci_flush_queues(rtwdev, pci_queues, drop); |
768 | } |
769 | |
770 | static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, |
771 | enum rtw_tx_queue_type queue) |
772 | { |
773 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
774 | struct rtw_pci_tx_ring *ring; |
775 | u32 bd_idx; |
776 | |
777 | ring = &rtwpci->tx_rings[queue]; |
778 | bd_idx = rtw_pci_tx_queue_idx_addr[queue]; |
779 | |
780 | spin_lock_bh(lock: &rtwpci->irq_lock); |
781 | if (!rtw_fw_feature_check(fw: &rtwdev->fw, feature: FW_FEATURE_TX_WAKE)) |
782 | rtw_pci_deep_ps_leave(rtwdev); |
783 | rtw_write16(rtwdev, addr: bd_idx, val: ring->r.wp & TRX_BD_IDX_MASK); |
784 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
785 | } |
786 | |
787 | static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) |
788 | { |
789 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
790 | enum rtw_tx_queue_type queue; |
791 | |
792 | for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) |
793 | if (test_and_clear_bit(nr: queue, addr: rtwpci->tx_queued)) |
794 | rtw_pci_tx_kick_off_queue(rtwdev, queue); |
795 | } |
796 | |
797 | static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, |
798 | struct rtw_tx_pkt_info *pkt_info, |
799 | struct sk_buff *skb, |
800 | enum rtw_tx_queue_type queue) |
801 | { |
802 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
803 | const struct rtw_chip_info *chip = rtwdev->chip; |
804 | struct rtw_pci_tx_ring *ring; |
805 | struct rtw_pci_tx_data *tx_data; |
806 | dma_addr_t dma; |
807 | u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; |
808 | u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; |
809 | u32 size; |
810 | u32 psb_len; |
811 | u8 *pkt_desc; |
812 | struct rtw_pci_tx_buffer_desc *buf_desc; |
813 | |
814 | ring = &rtwpci->tx_rings[queue]; |
815 | |
816 | size = skb->len; |
817 | |
818 | if (queue == RTW_TX_QUEUE_BCN) |
819 | rtw_pci_release_rsvd_page(rtwpci, ring); |
820 | else if (!avail_desc(wp: ring->r.wp, rp: ring->r.rp, len: ring->r.len)) |
821 | return -ENOSPC; |
822 | |
823 | pkt_desc = skb_push(skb, len: chip->tx_pkt_desc_sz); |
824 | memset(pkt_desc, 0, tx_pkt_desc_sz); |
825 | pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); |
826 | rtw_tx_fill_tx_desc(pkt_info, skb); |
827 | dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, |
828 | DMA_TO_DEVICE); |
829 | if (dma_mapping_error(dev: &rtwpci->pdev->dev, dma_addr: dma)) |
830 | return -EBUSY; |
831 | |
832 | /* after this we got dma mapped, there is no way back */ |
833 | buf_desc = get_tx_buffer_desc(ring, size: tx_buf_desc_sz); |
834 | memset(buf_desc, 0, tx_buf_desc_sz); |
835 | psb_len = (skb->len - 1) / 128 + 1; |
836 | if (queue == RTW_TX_QUEUE_BCN) |
837 | psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; |
838 | |
839 | buf_desc[0].psb_len = cpu_to_le16(psb_len); |
840 | buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); |
841 | buf_desc[0].dma = cpu_to_le32(dma); |
842 | buf_desc[1].buf_size = cpu_to_le16(size); |
843 | buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); |
844 | |
845 | tx_data = rtw_pci_get_tx_data(skb); |
846 | tx_data->dma = dma; |
847 | tx_data->sn = pkt_info->sn; |
848 | |
849 | spin_lock_bh(lock: &rtwpci->irq_lock); |
850 | |
851 | skb_queue_tail(list: &ring->queue, newsk: skb); |
852 | |
853 | if (queue == RTW_TX_QUEUE_BCN) |
854 | goto out_unlock; |
855 | |
856 | /* update write-index, and kick it off later */ |
857 | set_bit(nr: queue, addr: rtwpci->tx_queued); |
858 | if (++ring->r.wp >= ring->r.len) |
859 | ring->r.wp = 0; |
860 | |
861 | out_unlock: |
862 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, |
868 | u32 size) |
869 | { |
870 | struct sk_buff *skb; |
871 | struct rtw_tx_pkt_info pkt_info = {0}; |
872 | u8 reg_bcn_work; |
873 | int ret; |
874 | |
875 | skb = rtw_tx_write_data_rsvd_page_get(rtwdev, pkt_info: &pkt_info, buf, size); |
876 | if (!skb) |
877 | return -ENOMEM; |
878 | |
879 | ret = rtw_pci_tx_write_data(rtwdev, pkt_info: &pkt_info, skb, queue: RTW_TX_QUEUE_BCN); |
880 | if (ret) { |
881 | rtw_err(rtwdev, "failed to write rsvd page data\n" ); |
882 | return ret; |
883 | } |
884 | |
885 | /* reserved pages go through beacon queue */ |
886 | reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); |
887 | reg_bcn_work |= BIT_PCI_BCNQ_FLAG; |
888 | rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, val: reg_bcn_work); |
889 | |
890 | return 0; |
891 | } |
892 | |
893 | static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) |
894 | { |
895 | struct sk_buff *skb; |
896 | struct rtw_tx_pkt_info pkt_info = {0}; |
897 | int ret; |
898 | |
899 | skb = rtw_tx_write_data_h2c_get(rtwdev, pkt_info: &pkt_info, buf, size); |
900 | if (!skb) |
901 | return -ENOMEM; |
902 | |
903 | ret = rtw_pci_tx_write_data(rtwdev, pkt_info: &pkt_info, skb, queue: RTW_TX_QUEUE_H2C); |
904 | if (ret) { |
905 | rtw_err(rtwdev, "failed to write h2c data\n" ); |
906 | return ret; |
907 | } |
908 | |
909 | rtw_pci_tx_kick_off_queue(rtwdev, queue: RTW_TX_QUEUE_H2C); |
910 | |
911 | return 0; |
912 | } |
913 | |
914 | static int rtw_pci_tx_write(struct rtw_dev *rtwdev, |
915 | struct rtw_tx_pkt_info *pkt_info, |
916 | struct sk_buff *skb) |
917 | { |
918 | enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb); |
919 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
920 | struct rtw_pci_tx_ring *ring; |
921 | int ret; |
922 | |
923 | ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue); |
924 | if (ret) |
925 | return ret; |
926 | |
927 | ring = &rtwpci->tx_rings[queue]; |
928 | spin_lock_bh(lock: &rtwpci->irq_lock); |
929 | if (avail_desc(wp: ring->r.wp, rp: ring->r.rp, len: ring->r.len) < 2) { |
930 | ieee80211_stop_queue(hw: rtwdev->hw, queue: skb_get_queue_mapping(skb)); |
931 | ring->queue_stopped = true; |
932 | } |
933 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
934 | |
935 | return 0; |
936 | } |
937 | |
938 | static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, |
939 | u8 hw_queue) |
940 | { |
941 | struct ieee80211_hw *hw = rtwdev->hw; |
942 | struct ieee80211_tx_info *info; |
943 | struct rtw_pci_tx_ring *ring; |
944 | struct rtw_pci_tx_data *tx_data; |
945 | struct sk_buff *skb; |
946 | u32 count; |
947 | u32 bd_idx_addr; |
948 | u32 bd_idx, cur_rp, rp_idx; |
949 | u16 q_map; |
950 | |
951 | ring = &rtwpci->tx_rings[hw_queue]; |
952 | |
953 | bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; |
954 | bd_idx = rtw_read32(rtwdev, addr: bd_idx_addr); |
955 | cur_rp = bd_idx >> 16; |
956 | cur_rp &= TRX_BD_IDX_MASK; |
957 | rp_idx = ring->r.rp; |
958 | if (cur_rp >= ring->r.rp) |
959 | count = cur_rp - ring->r.rp; |
960 | else |
961 | count = ring->r.len - (ring->r.rp - cur_rp); |
962 | |
963 | while (count--) { |
964 | skb = skb_dequeue(list: &ring->queue); |
965 | if (!skb) { |
966 | rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n" , |
967 | count, hw_queue, bd_idx, ring->r.rp, cur_rp); |
968 | break; |
969 | } |
970 | tx_data = rtw_pci_get_tx_data(skb); |
971 | dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, |
972 | DMA_TO_DEVICE); |
973 | |
974 | /* just free command packets from host to card */ |
975 | if (hw_queue == RTW_TX_QUEUE_H2C) { |
976 | dev_kfree_skb_irq(skb); |
977 | continue; |
978 | } |
979 | |
980 | if (ring->queue_stopped && |
981 | avail_desc(wp: ring->r.wp, rp: rp_idx, len: ring->r.len) > 4) { |
982 | q_map = skb_get_queue_mapping(skb); |
983 | ieee80211_wake_queue(hw, queue: q_map); |
984 | ring->queue_stopped = false; |
985 | } |
986 | |
987 | if (++rp_idx >= ring->r.len) |
988 | rp_idx = 0; |
989 | |
990 | skb_pull(skb, len: rtwdev->chip->tx_pkt_desc_sz); |
991 | |
992 | info = IEEE80211_SKB_CB(skb); |
993 | |
994 | /* enqueue to wait for tx report */ |
995 | if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { |
996 | rtw_tx_report_enqueue(rtwdev, skb, sn: tx_data->sn); |
997 | continue; |
998 | } |
999 | |
1000 | /* always ACK for others, then they won't be marked as drop */ |
1001 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
1002 | info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; |
1003 | else |
1004 | info->flags |= IEEE80211_TX_STAT_ACK; |
1005 | |
1006 | ieee80211_tx_info_clear_status(info); |
1007 | ieee80211_tx_status_irqsafe(hw, skb); |
1008 | } |
1009 | |
1010 | ring->r.rp = cur_rp; |
1011 | } |
1012 | |
1013 | static void rtw_pci_rx_isr(struct rtw_dev *rtwdev) |
1014 | { |
1015 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1016 | struct napi_struct *napi = &rtwpci->napi; |
1017 | |
1018 | napi_schedule(n: napi); |
1019 | } |
1020 | |
1021 | static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev, |
1022 | struct rtw_pci *rtwpci) |
1023 | { |
1024 | struct rtw_pci_rx_ring *ring; |
1025 | int count = 0; |
1026 | u32 tmp, cur_wp; |
1027 | |
1028 | ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; |
1029 | tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); |
1030 | cur_wp = u32_get_bits(v: tmp, TRX_BD_HW_IDX_MASK); |
1031 | if (cur_wp >= ring->r.wp) |
1032 | count = cur_wp - ring->r.wp; |
1033 | else |
1034 | count = ring->r.len - (ring->r.wp - cur_wp); |
1035 | |
1036 | return count; |
1037 | } |
1038 | |
1039 | static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, |
1040 | u8 hw_queue, u32 limit) |
1041 | { |
1042 | const struct rtw_chip_info *chip = rtwdev->chip; |
1043 | struct napi_struct *napi = &rtwpci->napi; |
1044 | struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; |
1045 | struct rtw_rx_pkt_stat pkt_stat; |
1046 | struct ieee80211_rx_status rx_status; |
1047 | struct sk_buff *skb, *new; |
1048 | u32 cur_rp = ring->r.rp; |
1049 | u32 count, rx_done = 0; |
1050 | u32 pkt_offset; |
1051 | u32 pkt_desc_sz = chip->rx_pkt_desc_sz; |
1052 | u32 buf_desc_sz = chip->rx_buf_desc_sz; |
1053 | u32 new_len; |
1054 | u8 *rx_desc; |
1055 | dma_addr_t dma; |
1056 | |
1057 | count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci); |
1058 | count = min(count, limit); |
1059 | |
1060 | while (count--) { |
1061 | rtw_pci_dma_check(rtwdev, rx_ring: ring, idx: cur_rp); |
1062 | skb = ring->buf[cur_rp]; |
1063 | dma = *((dma_addr_t *)skb->cb); |
1064 | dma_sync_single_for_cpu(dev: rtwdev->dev, addr: dma, RTK_PCI_RX_BUF_SIZE, |
1065 | dir: DMA_FROM_DEVICE); |
1066 | rx_desc = skb->data; |
1067 | chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); |
1068 | |
1069 | /* offset from rx_desc to payload */ |
1070 | pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + |
1071 | pkt_stat.shift; |
1072 | |
1073 | /* allocate a new skb for this frame, |
1074 | * discard the frame if none available |
1075 | */ |
1076 | new_len = pkt_stat.pkt_len + pkt_offset; |
1077 | new = dev_alloc_skb(length: new_len); |
1078 | if (WARN_ONCE(!new, "rx routine starvation\n" )) |
1079 | goto next_rp; |
1080 | |
1081 | /* put the DMA data including rx_desc from phy to new skb */ |
1082 | skb_put_data(skb: new, data: skb->data, len: new_len); |
1083 | |
1084 | if (pkt_stat.is_c2h) { |
1085 | rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb: new); |
1086 | } else { |
1087 | /* remove rx_desc */ |
1088 | skb_pull(skb: new, len: pkt_offset); |
1089 | |
1090 | rtw_rx_stats(rtwdev, vif: pkt_stat.vif, skb: new); |
1091 | memcpy(new->cb, &rx_status, sizeof(rx_status)); |
1092 | ieee80211_rx_napi(hw: rtwdev->hw, NULL, skb: new, napi); |
1093 | rx_done++; |
1094 | } |
1095 | |
1096 | next_rp: |
1097 | /* new skb delivered to mac80211, re-enable original skb DMA */ |
1098 | rtw_pci_sync_rx_desc_device(rtwdev, dma, rx_ring: ring, idx: cur_rp, |
1099 | desc_sz: buf_desc_sz); |
1100 | |
1101 | /* host read next element in ring */ |
1102 | if (++cur_rp >= ring->r.len) |
1103 | cur_rp = 0; |
1104 | } |
1105 | |
1106 | ring->r.rp = cur_rp; |
1107 | /* 'rp', the last position we have read, is seen as previous posistion |
1108 | * of 'wp' that is used to calculate 'count' next time. |
1109 | */ |
1110 | ring->r.wp = cur_rp; |
1111 | rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, val: ring->r.rp); |
1112 | |
1113 | return rx_done; |
1114 | } |
1115 | |
1116 | static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, |
1117 | struct rtw_pci *rtwpci, u32 *irq_status) |
1118 | { |
1119 | unsigned long flags; |
1120 | |
1121 | spin_lock_irqsave(&rtwpci->hwirq_lock, flags); |
1122 | |
1123 | irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); |
1124 | irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); |
1125 | if (rtw_chip_wcpu_11ac(rtwdev)) |
1126 | irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); |
1127 | else |
1128 | irq_status[3] = 0; |
1129 | irq_status[0] &= rtwpci->irq_mask[0]; |
1130 | irq_status[1] &= rtwpci->irq_mask[1]; |
1131 | irq_status[3] &= rtwpci->irq_mask[3]; |
1132 | rtw_write32(rtwdev, RTK_PCI_HISR0, val: irq_status[0]); |
1133 | rtw_write32(rtwdev, RTK_PCI_HISR1, val: irq_status[1]); |
1134 | if (rtw_chip_wcpu_11ac(rtwdev)) |
1135 | rtw_write32(rtwdev, RTK_PCI_HISR3, val: irq_status[3]); |
1136 | |
1137 | spin_unlock_irqrestore(lock: &rtwpci->hwirq_lock, flags); |
1138 | } |
1139 | |
1140 | static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) |
1141 | { |
1142 | struct rtw_dev *rtwdev = dev; |
1143 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1144 | |
1145 | /* disable RTW PCI interrupt to avoid more interrupts before the end of |
1146 | * thread function |
1147 | * |
1148 | * disable HIMR here to also avoid new HISR flag being raised before |
1149 | * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs |
1150 | * are cleared, the edge-triggered interrupt will not be generated when |
1151 | * a new HISR flag is set. |
1152 | */ |
1153 | rtw_pci_disable_interrupt(rtwdev, rtwpci); |
1154 | |
1155 | return IRQ_WAKE_THREAD; |
1156 | } |
1157 | |
1158 | static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) |
1159 | { |
1160 | struct rtw_dev *rtwdev = dev; |
1161 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1162 | u32 irq_status[4]; |
1163 | bool rx = false; |
1164 | |
1165 | spin_lock_bh(lock: &rtwpci->irq_lock); |
1166 | rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); |
1167 | |
1168 | if (irq_status[0] & IMR_MGNTDOK) |
1169 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_MGMT); |
1170 | if (irq_status[0] & IMR_HIGHDOK) |
1171 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_HI0); |
1172 | if (irq_status[0] & IMR_BEDOK) |
1173 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_BE); |
1174 | if (irq_status[0] & IMR_BKDOK) |
1175 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_BK); |
1176 | if (irq_status[0] & IMR_VODOK) |
1177 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_VO); |
1178 | if (irq_status[0] & IMR_VIDOK) |
1179 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_VI); |
1180 | if (irq_status[3] & IMR_H2CDOK) |
1181 | rtw_pci_tx_isr(rtwdev, rtwpci, hw_queue: RTW_TX_QUEUE_H2C); |
1182 | if (irq_status[0] & IMR_ROK) { |
1183 | rtw_pci_rx_isr(rtwdev); |
1184 | rx = true; |
1185 | } |
1186 | if (unlikely(irq_status[0] & IMR_C2HCMD)) |
1187 | rtw_fw_c2h_cmd_isr(rtwdev); |
1188 | |
1189 | /* all of the jobs for this interrupt have been done */ |
1190 | if (rtwpci->running) |
1191 | rtw_pci_enable_interrupt(rtwdev, rtwpci, exclude_rx: rx); |
1192 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
1193 | |
1194 | return IRQ_HANDLED; |
1195 | } |
1196 | |
1197 | static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, |
1198 | struct pci_dev *pdev) |
1199 | { |
1200 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1201 | unsigned long len; |
1202 | u8 bar_id = 2; |
1203 | int ret; |
1204 | |
1205 | ret = pci_request_regions(pdev, KBUILD_MODNAME); |
1206 | if (ret) { |
1207 | rtw_err(rtwdev, "failed to request pci regions\n" ); |
1208 | return ret; |
1209 | } |
1210 | |
1211 | len = pci_resource_len(pdev, bar_id); |
1212 | rtwpci->mmap = pci_iomap(dev: pdev, bar: bar_id, max: len); |
1213 | if (!rtwpci->mmap) { |
1214 | pci_release_regions(pdev); |
1215 | rtw_err(rtwdev, "failed to map pci memory\n" ); |
1216 | return -ENOMEM; |
1217 | } |
1218 | |
1219 | return 0; |
1220 | } |
1221 | |
1222 | static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, |
1223 | struct pci_dev *pdev) |
1224 | { |
1225 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1226 | |
1227 | if (rtwpci->mmap) { |
1228 | pci_iounmap(dev: pdev, rtwpci->mmap); |
1229 | pci_release_regions(pdev); |
1230 | } |
1231 | } |
1232 | |
1233 | static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) |
1234 | { |
1235 | u16 write_addr; |
1236 | u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); |
1237 | u8 flag; |
1238 | u8 cnt; |
1239 | |
1240 | write_addr = addr & BITS_DBI_ADDR_MASK; |
1241 | write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); |
1242 | rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, val: data); |
1243 | rtw_write16(rtwdev, REG_DBI_FLAG_V1, val: write_addr); |
1244 | rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); |
1245 | |
1246 | for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { |
1247 | flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); |
1248 | if (flag == 0) |
1249 | return; |
1250 | |
1251 | udelay(10); |
1252 | } |
1253 | |
1254 | WARN(flag, "failed to write to DBI register, addr=0x%04x\n" , addr); |
1255 | } |
1256 | |
1257 | static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) |
1258 | { |
1259 | u16 read_addr = addr & BITS_DBI_ADDR_MASK; |
1260 | u8 flag; |
1261 | u8 cnt; |
1262 | |
1263 | rtw_write16(rtwdev, REG_DBI_FLAG_V1, val: read_addr); |
1264 | rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); |
1265 | |
1266 | for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { |
1267 | flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); |
1268 | if (flag == 0) { |
1269 | read_addr = REG_DBI_RDATA_V1 + (addr & 3); |
1270 | *value = rtw_read8(rtwdev, addr: read_addr); |
1271 | return 0; |
1272 | } |
1273 | |
1274 | udelay(10); |
1275 | } |
1276 | |
1277 | WARN(1, "failed to read DBI register, addr=0x%04x\n" , addr); |
1278 | return -EIO; |
1279 | } |
1280 | |
1281 | static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) |
1282 | { |
1283 | u8 page; |
1284 | u8 wflag; |
1285 | u8 cnt; |
1286 | |
1287 | rtw_write16(rtwdev, REG_MDIO_V1, val: data); |
1288 | |
1289 | page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; |
1290 | page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; |
1291 | rtw_write8(rtwdev, REG_PCIE_MIX_CFG, val: addr & BITS_MDIO_ADDR_MASK); |
1292 | rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, val: page); |
1293 | rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, data: 1); |
1294 | |
1295 | for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { |
1296 | wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, |
1297 | BIT_MDIO_WFLAG_V1); |
1298 | if (wflag == 0) |
1299 | return; |
1300 | |
1301 | udelay(10); |
1302 | } |
1303 | |
1304 | WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n" , addr); |
1305 | } |
1306 | |
1307 | static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) |
1308 | { |
1309 | u8 value; |
1310 | int ret; |
1311 | |
1312 | if (rtw_pci_disable_aspm) |
1313 | return; |
1314 | |
1315 | ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, value: &value); |
1316 | if (ret) { |
1317 | rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d" , ret); |
1318 | return; |
1319 | } |
1320 | |
1321 | if (enable) |
1322 | value |= BIT_CLKREQ_SW_EN; |
1323 | else |
1324 | value &= ~BIT_CLKREQ_SW_EN; |
1325 | |
1326 | rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, data: value); |
1327 | } |
1328 | |
1329 | static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) |
1330 | { |
1331 | u8 value; |
1332 | int ret; |
1333 | |
1334 | ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, value: &value); |
1335 | if (ret) { |
1336 | rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d" , ret); |
1337 | return; |
1338 | } |
1339 | |
1340 | if (enable) |
1341 | value &= ~BIT_CLKREQ_N_PAD; |
1342 | else |
1343 | value |= BIT_CLKREQ_N_PAD; |
1344 | |
1345 | rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, data: value); |
1346 | } |
1347 | |
1348 | static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) |
1349 | { |
1350 | u8 value; |
1351 | int ret; |
1352 | |
1353 | if (rtw_pci_disable_aspm) |
1354 | return; |
1355 | |
1356 | ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, value: &value); |
1357 | if (ret) { |
1358 | rtw_err(rtwdev, "failed to read ASPM, ret=%d" , ret); |
1359 | return; |
1360 | } |
1361 | |
1362 | if (enable) |
1363 | value |= BIT_L1_SW_EN; |
1364 | else |
1365 | value &= ~BIT_L1_SW_EN; |
1366 | |
1367 | rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, data: value); |
1368 | } |
1369 | |
1370 | static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) |
1371 | { |
1372 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1373 | |
1374 | /* Like CLKREQ, ASPM is also implemented by two HW modules, and can |
1375 | * only be enabled when host supports it. |
1376 | * |
1377 | * And ASPM mechanism should be enabled when driver/firmware enters |
1378 | * power save mode, without having heavy traffic. Because we've |
1379 | * experienced some inter-operability issues that the link tends |
1380 | * to enter L1 state on the fly even when driver is having high |
1381 | * throughput. This is probably because the ASPM behavior slightly |
1382 | * varies from different SOC. |
1383 | */ |
1384 | if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) |
1385 | return; |
1386 | |
1387 | if ((enter && atomic_dec_if_positive(v: &rtwpci->link_usage) == 0) || |
1388 | (!enter && atomic_inc_return(v: &rtwpci->link_usage) == 1)) |
1389 | rtw_pci_aspm_set(rtwdev, enable: enter); |
1390 | } |
1391 | |
1392 | static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) |
1393 | { |
1394 | const struct rtw_chip_info *chip = rtwdev->chip; |
1395 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1396 | struct pci_dev *pdev = rtwpci->pdev; |
1397 | u16 link_ctrl; |
1398 | int ret; |
1399 | |
1400 | /* RTL8822CE has enabled REFCLK auto calibration, it does not need |
1401 | * to add clock delay to cover the REFCLK timing gap. |
1402 | */ |
1403 | if (chip->id == RTW_CHIP_TYPE_8822C) |
1404 | rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, data: 0); |
1405 | |
1406 | /* Though there is standard PCIE configuration space to set the |
1407 | * link control register, but by Realtek's design, driver should |
1408 | * check if host supports CLKREQ/ASPM to enable the HW module. |
1409 | * |
1410 | * These functions are implemented by two HW modules associated, |
1411 | * one is responsible to access PCIE configuration space to |
1412 | * follow the host settings, and another is in charge of doing |
1413 | * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes |
1414 | * the host does not support it, and due to some reasons or wrong |
1415 | * settings (ex. CLKREQ# not Bi-Direction), it could lead to device |
1416 | * loss if HW misbehaves on the link. |
1417 | * |
1418 | * Hence it's designed that driver should first check the PCIE |
1419 | * configuration space is sync'ed and enabled, then driver can turn |
1420 | * on the other module that is actually working on the mechanism. |
1421 | */ |
1422 | ret = pcie_capability_read_word(dev: pdev, PCI_EXP_LNKCTL, val: &link_ctrl); |
1423 | if (ret) { |
1424 | rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n" , ret); |
1425 | return; |
1426 | } |
1427 | |
1428 | if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) |
1429 | rtw_pci_clkreq_set(rtwdev, enable: true); |
1430 | |
1431 | rtwpci->link_ctrl = link_ctrl; |
1432 | } |
1433 | |
1434 | static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) |
1435 | { |
1436 | const struct rtw_chip_info *chip = rtwdev->chip; |
1437 | |
1438 | switch (chip->id) { |
1439 | case RTW_CHIP_TYPE_8822C: |
1440 | if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) |
1441 | rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG, |
1442 | BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, data: 1); |
1443 | break; |
1444 | default: |
1445 | break; |
1446 | } |
1447 | } |
1448 | |
1449 | static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) |
1450 | { |
1451 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1452 | const struct rtw_chip_info *chip = rtwdev->chip; |
1453 | struct rtw_efuse *efuse = &rtwdev->efuse; |
1454 | struct pci_dev *pdev = rtwpci->pdev; |
1455 | const struct rtw_intf_phy_para *para; |
1456 | u16 cut; |
1457 | u16 value; |
1458 | u16 offset; |
1459 | int i; |
1460 | int ret; |
1461 | |
1462 | cut = BIT(0) << rtwdev->hal.cut_version; |
1463 | |
1464 | for (i = 0; i < chip->intf_table->n_gen1_para; i++) { |
1465 | para = &chip->intf_table->gen1_para[i]; |
1466 | if (!(para->cut_mask & cut)) |
1467 | continue; |
1468 | if (para->offset == 0xffff) |
1469 | break; |
1470 | offset = para->offset; |
1471 | value = para->value; |
1472 | if (para->ip_sel == RTW_IP_SEL_PHY) |
1473 | rtw_mdio_write(rtwdev, addr: offset, data: value, g1: true); |
1474 | else |
1475 | rtw_dbi_write8(rtwdev, addr: offset, data: value); |
1476 | } |
1477 | |
1478 | for (i = 0; i < chip->intf_table->n_gen2_para; i++) { |
1479 | para = &chip->intf_table->gen2_para[i]; |
1480 | if (!(para->cut_mask & cut)) |
1481 | continue; |
1482 | if (para->offset == 0xffff) |
1483 | break; |
1484 | offset = para->offset; |
1485 | value = para->value; |
1486 | if (para->ip_sel == RTW_IP_SEL_PHY) |
1487 | rtw_mdio_write(rtwdev, addr: offset, data: value, g1: false); |
1488 | else |
1489 | rtw_dbi_write8(rtwdev, addr: offset, data: value); |
1490 | } |
1491 | |
1492 | rtw_pci_link_cfg(rtwdev); |
1493 | |
1494 | /* Disable 8821ce completion timeout by default */ |
1495 | if (chip->id == RTW_CHIP_TYPE_8821C) { |
1496 | ret = pcie_capability_set_word(dev: pdev, PCI_EXP_DEVCTL2, |
1497 | PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); |
1498 | if (ret) |
1499 | rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n" , |
1500 | ret); |
1501 | } |
1502 | |
1503 | if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5) |
1504 | rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, data: 0x1); |
1505 | } |
1506 | |
1507 | static int __maybe_unused rtw_pci_suspend(struct device *dev) |
1508 | { |
1509 | struct ieee80211_hw *hw = dev_get_drvdata(dev); |
1510 | struct rtw_dev *rtwdev = hw->priv; |
1511 | const struct rtw_chip_info *chip = rtwdev->chip; |
1512 | struct rtw_efuse *efuse = &rtwdev->efuse; |
1513 | |
1514 | if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) |
1515 | rtw_pci_clkreq_pad_low(rtwdev, enable: true); |
1516 | return 0; |
1517 | } |
1518 | |
1519 | static int __maybe_unused rtw_pci_resume(struct device *dev) |
1520 | { |
1521 | struct ieee80211_hw *hw = dev_get_drvdata(dev); |
1522 | struct rtw_dev *rtwdev = hw->priv; |
1523 | const struct rtw_chip_info *chip = rtwdev->chip; |
1524 | struct rtw_efuse *efuse = &rtwdev->efuse; |
1525 | |
1526 | if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) |
1527 | rtw_pci_clkreq_pad_low(rtwdev, enable: false); |
1528 | return 0; |
1529 | } |
1530 | |
1531 | SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); |
1532 | EXPORT_SYMBOL(rtw_pm_ops); |
1533 | |
1534 | static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1535 | { |
1536 | int ret; |
1537 | |
1538 | ret = pci_enable_device(dev: pdev); |
1539 | if (ret) { |
1540 | rtw_err(rtwdev, "failed to enable pci device\n" ); |
1541 | return ret; |
1542 | } |
1543 | |
1544 | pci_set_master(dev: pdev); |
1545 | pci_set_drvdata(pdev, data: rtwdev->hw); |
1546 | SET_IEEE80211_DEV(hw: rtwdev->hw, dev: &pdev->dev); |
1547 | |
1548 | return 0; |
1549 | } |
1550 | |
1551 | static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1552 | { |
1553 | pci_disable_device(dev: pdev); |
1554 | } |
1555 | |
1556 | static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1557 | { |
1558 | struct rtw_pci *rtwpci; |
1559 | int ret; |
1560 | |
1561 | rtwpci = (struct rtw_pci *)rtwdev->priv; |
1562 | rtwpci->pdev = pdev; |
1563 | |
1564 | /* after this driver can access to hw registers */ |
1565 | ret = rtw_pci_io_mapping(rtwdev, pdev); |
1566 | if (ret) { |
1567 | rtw_err(rtwdev, "failed to request pci io region\n" ); |
1568 | goto err_out; |
1569 | } |
1570 | |
1571 | ret = rtw_pci_init(rtwdev); |
1572 | if (ret) { |
1573 | rtw_err(rtwdev, "failed to allocate pci resources\n" ); |
1574 | goto err_io_unmap; |
1575 | } |
1576 | |
1577 | return 0; |
1578 | |
1579 | err_io_unmap: |
1580 | rtw_pci_io_unmapping(rtwdev, pdev); |
1581 | |
1582 | err_out: |
1583 | return ret; |
1584 | } |
1585 | |
1586 | static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1587 | { |
1588 | rtw_pci_deinit(rtwdev); |
1589 | rtw_pci_io_unmapping(rtwdev, pdev); |
1590 | } |
1591 | |
1592 | static struct rtw_hci_ops rtw_pci_ops = { |
1593 | .tx_write = rtw_pci_tx_write, |
1594 | .tx_kick_off = rtw_pci_tx_kick_off, |
1595 | .flush_queues = rtw_pci_flush_queues, |
1596 | .setup = rtw_pci_setup, |
1597 | .start = rtw_pci_start, |
1598 | .stop = rtw_pci_stop, |
1599 | .deep_ps = rtw_pci_deep_ps, |
1600 | .link_ps = rtw_pci_link_ps, |
1601 | .interface_cfg = rtw_pci_interface_cfg, |
1602 | |
1603 | .read8 = rtw_pci_read8, |
1604 | .read16 = rtw_pci_read16, |
1605 | .read32 = rtw_pci_read32, |
1606 | .write8 = rtw_pci_write8, |
1607 | .write16 = rtw_pci_write16, |
1608 | .write32 = rtw_pci_write32, |
1609 | .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, |
1610 | .write_data_h2c = rtw_pci_write_data_h2c, |
1611 | }; |
1612 | |
1613 | static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1614 | { |
1615 | unsigned int flags = PCI_IRQ_LEGACY; |
1616 | int ret; |
1617 | |
1618 | if (!rtw_disable_msi) |
1619 | flags |= PCI_IRQ_MSI; |
1620 | |
1621 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, flags); |
1622 | if (ret < 0) { |
1623 | rtw_err(rtwdev, "failed to alloc PCI irq vectors\n" ); |
1624 | return ret; |
1625 | } |
1626 | |
1627 | ret = devm_request_threaded_irq(dev: rtwdev->dev, irq: pdev->irq, |
1628 | handler: rtw_pci_interrupt_handler, |
1629 | thread_fn: rtw_pci_interrupt_threadfn, |
1630 | IRQF_SHARED, KBUILD_MODNAME, dev_id: rtwdev); |
1631 | if (ret) { |
1632 | rtw_err(rtwdev, "failed to request irq %d\n" , ret); |
1633 | pci_free_irq_vectors(dev: pdev); |
1634 | } |
1635 | |
1636 | return ret; |
1637 | } |
1638 | |
1639 | static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) |
1640 | { |
1641 | devm_free_irq(dev: rtwdev->dev, irq: pdev->irq, dev_id: rtwdev); |
1642 | pci_free_irq_vectors(dev: pdev); |
1643 | } |
1644 | |
1645 | static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) |
1646 | { |
1647 | struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi); |
1648 | struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev, |
1649 | priv); |
1650 | int work_done = 0; |
1651 | |
1652 | if (rtwpci->rx_no_aspm) |
1653 | rtw_pci_link_ps(rtwdev, enter: false); |
1654 | |
1655 | while (work_done < budget) { |
1656 | u32 work_done_once; |
1657 | |
1658 | work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, hw_queue: RTW_RX_QUEUE_MPDU, |
1659 | limit: budget - work_done); |
1660 | if (work_done_once == 0) |
1661 | break; |
1662 | work_done += work_done_once; |
1663 | } |
1664 | if (work_done < budget) { |
1665 | napi_complete_done(n: napi, work_done); |
1666 | spin_lock_bh(lock: &rtwpci->irq_lock); |
1667 | if (rtwpci->running) |
1668 | rtw_pci_enable_interrupt(rtwdev, rtwpci, exclude_rx: false); |
1669 | spin_unlock_bh(lock: &rtwpci->irq_lock); |
1670 | /* When ISR happens during polling and before napi_complete |
1671 | * while no further data is received. Data on the dma_ring will |
1672 | * not be processed immediately. Check whether dma ring is |
1673 | * empty and perform napi_schedule accordingly. |
1674 | */ |
1675 | if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) |
1676 | napi_schedule(n: napi); |
1677 | } |
1678 | if (rtwpci->rx_no_aspm) |
1679 | rtw_pci_link_ps(rtwdev, enter: true); |
1680 | |
1681 | return work_done; |
1682 | } |
1683 | |
1684 | static void rtw_pci_napi_init(struct rtw_dev *rtwdev) |
1685 | { |
1686 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1687 | |
1688 | init_dummy_netdev(dev: &rtwpci->netdev); |
1689 | netif_napi_add(dev: &rtwpci->netdev, napi: &rtwpci->napi, poll: rtw_pci_napi_poll); |
1690 | } |
1691 | |
1692 | static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) |
1693 | { |
1694 | struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; |
1695 | |
1696 | rtw_pci_napi_stop(rtwdev); |
1697 | netif_napi_del(napi: &rtwpci->napi); |
1698 | } |
1699 | |
1700 | int rtw_pci_probe(struct pci_dev *pdev, |
1701 | const struct pci_device_id *id) |
1702 | { |
1703 | struct pci_dev *bridge = pci_upstream_bridge(dev: pdev); |
1704 | struct ieee80211_hw *hw; |
1705 | struct rtw_dev *rtwdev; |
1706 | struct rtw_pci *rtwpci; |
1707 | int drv_data_size; |
1708 | int ret; |
1709 | |
1710 | drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); |
1711 | hw = ieee80211_alloc_hw(priv_data_len: drv_data_size, ops: &rtw_ops); |
1712 | if (!hw) { |
1713 | dev_err(&pdev->dev, "failed to allocate hw\n" ); |
1714 | return -ENOMEM; |
1715 | } |
1716 | |
1717 | rtwdev = hw->priv; |
1718 | rtwdev->hw = hw; |
1719 | rtwdev->dev = &pdev->dev; |
1720 | rtwdev->chip = (struct rtw_chip_info *)id->driver_data; |
1721 | rtwdev->hci.ops = &rtw_pci_ops; |
1722 | rtwdev->hci.type = RTW_HCI_TYPE_PCIE; |
1723 | |
1724 | rtwpci = (struct rtw_pci *)rtwdev->priv; |
1725 | atomic_set(v: &rtwpci->link_usage, i: 1); |
1726 | |
1727 | ret = rtw_core_init(rtwdev); |
1728 | if (ret) |
1729 | goto err_release_hw; |
1730 | |
1731 | rtw_dbg(rtwdev, mask: RTW_DBG_PCI, |
1732 | fmt: "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n" , |
1733 | pdev->vendor, pdev->device, pdev->revision); |
1734 | |
1735 | ret = rtw_pci_claim(rtwdev, pdev); |
1736 | if (ret) { |
1737 | rtw_err(rtwdev, "failed to claim pci device\n" ); |
1738 | goto err_deinit_core; |
1739 | } |
1740 | |
1741 | ret = rtw_pci_setup_resource(rtwdev, pdev); |
1742 | if (ret) { |
1743 | rtw_err(rtwdev, "failed to setup pci resources\n" ); |
1744 | goto err_pci_declaim; |
1745 | } |
1746 | |
1747 | rtw_pci_napi_init(rtwdev); |
1748 | |
1749 | ret = rtw_chip_info_setup(rtwdev); |
1750 | if (ret) { |
1751 | rtw_err(rtwdev, "failed to setup chip information\n" ); |
1752 | goto err_destroy_pci; |
1753 | } |
1754 | |
1755 | /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ |
1756 | if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL) |
1757 | rtwpci->rx_no_aspm = true; |
1758 | |
1759 | rtw_pci_phy_cfg(rtwdev); |
1760 | |
1761 | ret = rtw_register_hw(rtwdev, hw); |
1762 | if (ret) { |
1763 | rtw_err(rtwdev, "failed to register hw\n" ); |
1764 | goto err_destroy_pci; |
1765 | } |
1766 | |
1767 | ret = rtw_pci_request_irq(rtwdev, pdev); |
1768 | if (ret) { |
1769 | ieee80211_unregister_hw(hw); |
1770 | goto err_destroy_pci; |
1771 | } |
1772 | |
1773 | return 0; |
1774 | |
1775 | err_destroy_pci: |
1776 | rtw_pci_napi_deinit(rtwdev); |
1777 | rtw_pci_destroy(rtwdev, pdev); |
1778 | |
1779 | err_pci_declaim: |
1780 | rtw_pci_declaim(rtwdev, pdev); |
1781 | |
1782 | err_deinit_core: |
1783 | rtw_core_deinit(rtwdev); |
1784 | |
1785 | err_release_hw: |
1786 | ieee80211_free_hw(hw); |
1787 | |
1788 | return ret; |
1789 | } |
1790 | EXPORT_SYMBOL(rtw_pci_probe); |
1791 | |
1792 | void rtw_pci_remove(struct pci_dev *pdev) |
1793 | { |
1794 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
1795 | struct rtw_dev *rtwdev; |
1796 | struct rtw_pci *rtwpci; |
1797 | |
1798 | if (!hw) |
1799 | return; |
1800 | |
1801 | rtwdev = hw->priv; |
1802 | rtwpci = (struct rtw_pci *)rtwdev->priv; |
1803 | |
1804 | rtw_unregister_hw(rtwdev, hw); |
1805 | rtw_pci_disable_interrupt(rtwdev, rtwpci); |
1806 | rtw_pci_napi_deinit(rtwdev); |
1807 | rtw_pci_destroy(rtwdev, pdev); |
1808 | rtw_pci_declaim(rtwdev, pdev); |
1809 | rtw_pci_free_irq(rtwdev, pdev); |
1810 | rtw_core_deinit(rtwdev); |
1811 | ieee80211_free_hw(hw); |
1812 | } |
1813 | EXPORT_SYMBOL(rtw_pci_remove); |
1814 | |
1815 | void rtw_pci_shutdown(struct pci_dev *pdev) |
1816 | { |
1817 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
1818 | struct rtw_dev *rtwdev; |
1819 | const struct rtw_chip_info *chip; |
1820 | |
1821 | if (!hw) |
1822 | return; |
1823 | |
1824 | rtwdev = hw->priv; |
1825 | chip = rtwdev->chip; |
1826 | |
1827 | if (chip->ops->shutdown) |
1828 | chip->ops->shutdown(rtwdev); |
1829 | |
1830 | pci_set_power_state(dev: pdev, PCI_D3hot); |
1831 | } |
1832 | EXPORT_SYMBOL(rtw_pci_shutdown); |
1833 | |
1834 | MODULE_AUTHOR("Realtek Corporation" ); |
1835 | MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver" ); |
1836 | MODULE_LICENSE("Dual BSD/GPL" ); |
1837 | |