1 | // SPDX-License-Identifier: ISC |
2 | /* |
3 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
4 | * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. |
5 | * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/pci.h> |
9 | #include <linux/module.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/spinlock.h> |
12 | #include <linux/bitops.h> |
13 | |
14 | #include "core.h" |
15 | #include "debug.h" |
16 | #include "coredump.h" |
17 | |
18 | #include "targaddrs.h" |
19 | #include "bmi.h" |
20 | |
21 | #include "hif.h" |
22 | #include "htc.h" |
23 | |
24 | #include "ce.h" |
25 | #include "pci.h" |
26 | |
27 | enum ath10k_pci_reset_mode { |
28 | ATH10K_PCI_RESET_AUTO = 0, |
29 | ATH10K_PCI_RESET_WARM_ONLY = 1, |
30 | }; |
31 | |
32 | static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; |
33 | static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; |
34 | |
35 | module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); |
36 | MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)" ); |
37 | |
38 | module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); |
39 | MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)" ); |
40 | |
41 | /* how long wait to wait for target to initialise, in ms */ |
42 | #define ATH10K_PCI_TARGET_WAIT 3000 |
43 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 |
44 | |
45 | /* Maximum number of bytes that can be handled atomically by |
46 | * diag read and write. |
47 | */ |
48 | #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 |
49 | |
50 | #define QCA99X0_PCIE_BAR0_START_REG 0x81030 |
51 | #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c |
52 | #define QCA99X0_CPU_MEM_DATA_REG 0x4d010 |
53 | |
54 | static const struct pci_device_id ath10k_pci_id_table[] = { |
55 | /* PCI-E QCA988X V2 (Ubiquiti branded) */ |
56 | { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, |
57 | |
58 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
59 | { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ |
60 | { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ |
61 | { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ |
62 | { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ |
63 | { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ |
64 | { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ |
65 | { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ |
66 | {0} |
67 | }; |
68 | |
69 | static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { |
70 | /* QCA988X pre 2.0 chips are not supported because they need some nasty |
71 | * hacks. ath10k doesn't have them and these devices crash horribly |
72 | * because of that. |
73 | */ |
74 | { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, |
75 | { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, |
76 | |
77 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, |
78 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, |
79 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, |
80 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, |
81 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, |
82 | |
83 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, |
84 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, |
85 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, |
86 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, |
87 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, |
88 | |
89 | { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, |
90 | |
91 | { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, |
92 | |
93 | { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, |
94 | |
95 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, |
96 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, |
97 | |
98 | { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, |
99 | }; |
100 | |
101 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); |
102 | static int ath10k_pci_cold_reset(struct ath10k *ar); |
103 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar); |
104 | static int ath10k_pci_init_irq(struct ath10k *ar); |
105 | static int ath10k_pci_deinit_irq(struct ath10k *ar); |
106 | static int ath10k_pci_request_irq(struct ath10k *ar); |
107 | static void ath10k_pci_free_irq(struct ath10k *ar); |
108 | static int ath10k_pci_bmi_wait(struct ath10k *ar, |
109 | struct ath10k_ce_pipe *tx_pipe, |
110 | struct ath10k_ce_pipe *rx_pipe, |
111 | struct bmi_xfer *xfer); |
112 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); |
113 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); |
114 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
115 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); |
116 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); |
117 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
118 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); |
119 | |
120 | static const struct ce_attr pci_host_ce_config_wlan[] = { |
121 | /* CE0: host->target HTC control and raw streams */ |
122 | { |
123 | .flags = CE_ATTR_FLAGS, |
124 | .src_nentries = 16, |
125 | .src_sz_max = 256, |
126 | .dest_nentries = 0, |
127 | .send_cb = ath10k_pci_htc_tx_cb, |
128 | }, |
129 | |
130 | /* CE1: target->host HTT + HTC control */ |
131 | { |
132 | .flags = CE_ATTR_FLAGS, |
133 | .src_nentries = 0, |
134 | .src_sz_max = 2048, |
135 | .dest_nentries = 512, |
136 | .recv_cb = ath10k_pci_htt_htc_rx_cb, |
137 | }, |
138 | |
139 | /* CE2: target->host WMI */ |
140 | { |
141 | .flags = CE_ATTR_FLAGS, |
142 | .src_nentries = 0, |
143 | .src_sz_max = 2048, |
144 | .dest_nentries = 128, |
145 | .recv_cb = ath10k_pci_htc_rx_cb, |
146 | }, |
147 | |
148 | /* CE3: host->target WMI */ |
149 | { |
150 | .flags = CE_ATTR_FLAGS, |
151 | .src_nentries = 32, |
152 | .src_sz_max = 2048, |
153 | .dest_nentries = 0, |
154 | .send_cb = ath10k_pci_htc_tx_cb, |
155 | }, |
156 | |
157 | /* CE4: host->target HTT */ |
158 | { |
159 | .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, |
160 | .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, |
161 | .src_sz_max = 256, |
162 | .dest_nentries = 0, |
163 | .send_cb = ath10k_pci_htt_tx_cb, |
164 | }, |
165 | |
166 | /* CE5: target->host HTT (HIF->HTT) */ |
167 | { |
168 | .flags = CE_ATTR_FLAGS, |
169 | .src_nentries = 0, |
170 | .src_sz_max = 512, |
171 | .dest_nentries = 512, |
172 | .recv_cb = ath10k_pci_htt_rx_cb, |
173 | }, |
174 | |
175 | /* CE6: target autonomous hif_memcpy */ |
176 | { |
177 | .flags = CE_ATTR_FLAGS, |
178 | .src_nentries = 0, |
179 | .src_sz_max = 0, |
180 | .dest_nentries = 0, |
181 | }, |
182 | |
183 | /* CE7: ce_diag, the Diagnostic Window */ |
184 | { |
185 | .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, |
186 | .src_nentries = 2, |
187 | .src_sz_max = DIAG_TRANSFER_LIMIT, |
188 | .dest_nentries = 2, |
189 | }, |
190 | |
191 | /* CE8: target->host pktlog */ |
192 | { |
193 | .flags = CE_ATTR_FLAGS, |
194 | .src_nentries = 0, |
195 | .src_sz_max = 2048, |
196 | .dest_nentries = 128, |
197 | .recv_cb = ath10k_pci_pktlog_rx_cb, |
198 | }, |
199 | |
200 | /* CE9 target autonomous qcache memcpy */ |
201 | { |
202 | .flags = CE_ATTR_FLAGS, |
203 | .src_nentries = 0, |
204 | .src_sz_max = 0, |
205 | .dest_nentries = 0, |
206 | }, |
207 | |
208 | /* CE10: target autonomous hif memcpy */ |
209 | { |
210 | .flags = CE_ATTR_FLAGS, |
211 | .src_nentries = 0, |
212 | .src_sz_max = 0, |
213 | .dest_nentries = 0, |
214 | }, |
215 | |
216 | /* CE11: target autonomous hif memcpy */ |
217 | { |
218 | .flags = CE_ATTR_FLAGS, |
219 | .src_nentries = 0, |
220 | .src_sz_max = 0, |
221 | .dest_nentries = 0, |
222 | }, |
223 | }; |
224 | |
225 | /* Target firmware's Copy Engine configuration. */ |
226 | static const struct ce_pipe_config pci_target_ce_config_wlan[] = { |
227 | /* CE0: host->target HTC control and raw streams */ |
228 | { |
229 | .pipenum = __cpu_to_le32(0), |
230 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), |
231 | .nentries = __cpu_to_le32(32), |
232 | .nbytes_max = __cpu_to_le32(256), |
233 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
234 | .reserved = __cpu_to_le32(0), |
235 | }, |
236 | |
237 | /* CE1: target->host HTT + HTC control */ |
238 | { |
239 | .pipenum = __cpu_to_le32(1), |
240 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
241 | .nentries = __cpu_to_le32(32), |
242 | .nbytes_max = __cpu_to_le32(2048), |
243 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
244 | .reserved = __cpu_to_le32(0), |
245 | }, |
246 | |
247 | /* CE2: target->host WMI */ |
248 | { |
249 | .pipenum = __cpu_to_le32(2), |
250 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
251 | .nentries = __cpu_to_le32(64), |
252 | .nbytes_max = __cpu_to_le32(2048), |
253 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
254 | .reserved = __cpu_to_le32(0), |
255 | }, |
256 | |
257 | /* CE3: host->target WMI */ |
258 | { |
259 | .pipenum = __cpu_to_le32(3), |
260 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), |
261 | .nentries = __cpu_to_le32(32), |
262 | .nbytes_max = __cpu_to_le32(2048), |
263 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
264 | .reserved = __cpu_to_le32(0), |
265 | }, |
266 | |
267 | /* CE4: host->target HTT */ |
268 | { |
269 | .pipenum = __cpu_to_le32(4), |
270 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), |
271 | .nentries = __cpu_to_le32(256), |
272 | .nbytes_max = __cpu_to_le32(256), |
273 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
274 | .reserved = __cpu_to_le32(0), |
275 | }, |
276 | |
277 | /* NB: 50% of src nentries, since tx has 2 frags */ |
278 | |
279 | /* CE5: target->host HTT (HIF->HTT) */ |
280 | { |
281 | .pipenum = __cpu_to_le32(5), |
282 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
283 | .nentries = __cpu_to_le32(32), |
284 | .nbytes_max = __cpu_to_le32(512), |
285 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
286 | .reserved = __cpu_to_le32(0), |
287 | }, |
288 | |
289 | /* CE6: Reserved for target autonomous hif_memcpy */ |
290 | { |
291 | .pipenum = __cpu_to_le32(6), |
292 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), |
293 | .nentries = __cpu_to_le32(32), |
294 | .nbytes_max = __cpu_to_le32(4096), |
295 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
296 | .reserved = __cpu_to_le32(0), |
297 | }, |
298 | |
299 | /* CE7 used only by Host */ |
300 | { |
301 | .pipenum = __cpu_to_le32(7), |
302 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), |
303 | .nentries = __cpu_to_le32(0), |
304 | .nbytes_max = __cpu_to_le32(0), |
305 | .flags = __cpu_to_le32(0), |
306 | .reserved = __cpu_to_le32(0), |
307 | }, |
308 | |
309 | /* CE8 target->host packtlog */ |
310 | { |
311 | .pipenum = __cpu_to_le32(8), |
312 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
313 | .nentries = __cpu_to_le32(64), |
314 | .nbytes_max = __cpu_to_le32(2048), |
315 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), |
316 | .reserved = __cpu_to_le32(0), |
317 | }, |
318 | |
319 | /* CE9 target autonomous qcache memcpy */ |
320 | { |
321 | .pipenum = __cpu_to_le32(9), |
322 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), |
323 | .nentries = __cpu_to_le32(32), |
324 | .nbytes_max = __cpu_to_le32(2048), |
325 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), |
326 | .reserved = __cpu_to_le32(0), |
327 | }, |
328 | |
329 | /* It not necessary to send target wlan configuration for CE10 & CE11 |
330 | * as these CEs are not actively used in target. |
331 | */ |
332 | }; |
333 | |
334 | /* |
335 | * Map from service/endpoint to Copy Engine. |
336 | * This table is derived from the CE_PCI TABLE, above. |
337 | * It is passed to the Target at startup for use by firmware. |
338 | */ |
339 | static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = { |
340 | { |
341 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
342 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
343 | __cpu_to_le32(3), |
344 | }, |
345 | { |
346 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
347 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
348 | __cpu_to_le32(2), |
349 | }, |
350 | { |
351 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
352 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
353 | __cpu_to_le32(3), |
354 | }, |
355 | { |
356 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
357 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
358 | __cpu_to_le32(2), |
359 | }, |
360 | { |
361 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
362 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
363 | __cpu_to_le32(3), |
364 | }, |
365 | { |
366 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
367 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
368 | __cpu_to_le32(2), |
369 | }, |
370 | { |
371 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
372 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
373 | __cpu_to_le32(3), |
374 | }, |
375 | { |
376 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
377 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
378 | __cpu_to_le32(2), |
379 | }, |
380 | { |
381 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
382 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
383 | __cpu_to_le32(3), |
384 | }, |
385 | { |
386 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
387 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
388 | __cpu_to_le32(2), |
389 | }, |
390 | { |
391 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
392 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
393 | __cpu_to_le32(0), |
394 | }, |
395 | { |
396 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
397 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
398 | __cpu_to_le32(1), |
399 | }, |
400 | { /* not used */ |
401 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), |
402 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
403 | __cpu_to_le32(0), |
404 | }, |
405 | { /* not used */ |
406 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), |
407 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
408 | __cpu_to_le32(1), |
409 | }, |
410 | { |
411 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
412 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
413 | __cpu_to_le32(4), |
414 | }, |
415 | { |
416 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
417 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ |
418 | __cpu_to_le32(5), |
419 | }, |
420 | |
421 | /* (Additions here) */ |
422 | |
423 | { /* must be last */ |
424 | __cpu_to_le32(0), |
425 | __cpu_to_le32(0), |
426 | __cpu_to_le32(0), |
427 | }, |
428 | }; |
429 | |
430 | static bool ath10k_pci_is_awake(struct ath10k *ar) |
431 | { |
432 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
433 | u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + |
434 | RTC_STATE_ADDRESS); |
435 | |
436 | return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; |
437 | } |
438 | |
439 | static void __ath10k_pci_wake(struct ath10k *ar) |
440 | { |
441 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
442 | |
443 | lockdep_assert_held(&ar_pci->ps_lock); |
444 | |
445 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n" , |
446 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); |
447 | |
448 | iowrite32(PCIE_SOC_WAKE_V_MASK, |
449 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + |
450 | PCIE_SOC_WAKE_ADDRESS); |
451 | } |
452 | |
453 | static void __ath10k_pci_sleep(struct ath10k *ar) |
454 | { |
455 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
456 | |
457 | lockdep_assert_held(&ar_pci->ps_lock); |
458 | |
459 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n" , |
460 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); |
461 | |
462 | iowrite32(PCIE_SOC_WAKE_RESET, |
463 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + |
464 | PCIE_SOC_WAKE_ADDRESS); |
465 | ar_pci->ps_awake = false; |
466 | } |
467 | |
468 | static int ath10k_pci_wake_wait(struct ath10k *ar) |
469 | { |
470 | int tot_delay = 0; |
471 | int curr_delay = 5; |
472 | |
473 | while (tot_delay < PCIE_WAKE_TIMEOUT) { |
474 | if (ath10k_pci_is_awake(ar)) { |
475 | if (tot_delay > PCIE_WAKE_LATE_US) |
476 | ath10k_warn(ar, fmt: "device wakeup took %d ms which is unusually long, otherwise it works normally.\n" , |
477 | tot_delay / 1000); |
478 | return 0; |
479 | } |
480 | |
481 | udelay(curr_delay); |
482 | tot_delay += curr_delay; |
483 | |
484 | if (curr_delay < 50) |
485 | curr_delay += 5; |
486 | } |
487 | |
488 | return -ETIMEDOUT; |
489 | } |
490 | |
491 | static int ath10k_pci_force_wake(struct ath10k *ar) |
492 | { |
493 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
494 | unsigned long flags; |
495 | int ret = 0; |
496 | |
497 | if (ar_pci->pci_ps) |
498 | return ret; |
499 | |
500 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
501 | |
502 | if (!ar_pci->ps_awake) { |
503 | iowrite32(PCIE_SOC_WAKE_V_MASK, |
504 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + |
505 | PCIE_SOC_WAKE_ADDRESS); |
506 | |
507 | ret = ath10k_pci_wake_wait(ar); |
508 | if (ret == 0) |
509 | ar_pci->ps_awake = true; |
510 | } |
511 | |
512 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
513 | |
514 | return ret; |
515 | } |
516 | |
517 | static void ath10k_pci_force_sleep(struct ath10k *ar) |
518 | { |
519 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
520 | unsigned long flags; |
521 | |
522 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
523 | |
524 | iowrite32(PCIE_SOC_WAKE_RESET, |
525 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + |
526 | PCIE_SOC_WAKE_ADDRESS); |
527 | ar_pci->ps_awake = false; |
528 | |
529 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
530 | } |
531 | |
532 | static int ath10k_pci_wake(struct ath10k *ar) |
533 | { |
534 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
535 | unsigned long flags; |
536 | int ret = 0; |
537 | |
538 | if (ar_pci->pci_ps == 0) |
539 | return ret; |
540 | |
541 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
542 | |
543 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n" , |
544 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); |
545 | |
546 | /* This function can be called very frequently. To avoid excessive |
547 | * CPU stalls for MMIO reads use a cache var to hold the device state. |
548 | */ |
549 | if (!ar_pci->ps_awake) { |
550 | __ath10k_pci_wake(ar); |
551 | |
552 | ret = ath10k_pci_wake_wait(ar); |
553 | if (ret == 0) |
554 | ar_pci->ps_awake = true; |
555 | } |
556 | |
557 | if (ret == 0) { |
558 | ar_pci->ps_wake_refcount++; |
559 | WARN_ON(ar_pci->ps_wake_refcount == 0); |
560 | } |
561 | |
562 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
563 | |
564 | return ret; |
565 | } |
566 | |
567 | static void ath10k_pci_sleep(struct ath10k *ar) |
568 | { |
569 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
570 | unsigned long flags; |
571 | |
572 | if (ar_pci->pci_ps == 0) |
573 | return; |
574 | |
575 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
576 | |
577 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n" , |
578 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); |
579 | |
580 | if (WARN_ON(ar_pci->ps_wake_refcount == 0)) |
581 | goto skip; |
582 | |
583 | ar_pci->ps_wake_refcount--; |
584 | |
585 | mod_timer(timer: &ar_pci->ps_timer, expires: jiffies + |
586 | msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); |
587 | |
588 | skip: |
589 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
590 | } |
591 | |
592 | static void ath10k_pci_ps_timer(struct timer_list *t) |
593 | { |
594 | struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer); |
595 | struct ath10k *ar = ar_pci->ar; |
596 | unsigned long flags; |
597 | |
598 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
599 | |
600 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n" , |
601 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); |
602 | |
603 | if (ar_pci->ps_wake_refcount > 0) |
604 | goto skip; |
605 | |
606 | __ath10k_pci_sleep(ar); |
607 | |
608 | skip: |
609 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
610 | } |
611 | |
612 | static void ath10k_pci_sleep_sync(struct ath10k *ar) |
613 | { |
614 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
615 | unsigned long flags; |
616 | |
617 | if (ar_pci->pci_ps == 0) { |
618 | ath10k_pci_force_sleep(ar); |
619 | return; |
620 | } |
621 | |
622 | del_timer_sync(timer: &ar_pci->ps_timer); |
623 | |
624 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
625 | WARN_ON(ar_pci->ps_wake_refcount > 0); |
626 | __ath10k_pci_sleep(ar); |
627 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
628 | } |
629 | |
630 | static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) |
631 | { |
632 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
633 | int ret; |
634 | |
635 | if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { |
636 | ath10k_warn(ar, fmt: "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n" , |
637 | offset, offset + sizeof(value), ar_pci->mem_len); |
638 | return; |
639 | } |
640 | |
641 | ret = ath10k_pci_wake(ar); |
642 | if (ret) { |
643 | ath10k_warn(ar, fmt: "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n" , |
644 | value, offset, ret); |
645 | return; |
646 | } |
647 | |
648 | iowrite32(value, ar_pci->mem + offset); |
649 | ath10k_pci_sleep(ar); |
650 | } |
651 | |
652 | static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) |
653 | { |
654 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
655 | u32 val; |
656 | int ret; |
657 | |
658 | if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { |
659 | ath10k_warn(ar, fmt: "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n" , |
660 | offset, offset + sizeof(val), ar_pci->mem_len); |
661 | return 0; |
662 | } |
663 | |
664 | ret = ath10k_pci_wake(ar); |
665 | if (ret) { |
666 | ath10k_warn(ar, fmt: "failed to wake target for read32 at 0x%08x: %d\n" , |
667 | offset, ret); |
668 | return 0xffffffff; |
669 | } |
670 | |
671 | val = ioread32(ar_pci->mem + offset); |
672 | ath10k_pci_sleep(ar); |
673 | |
674 | return val; |
675 | } |
676 | |
677 | inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) |
678 | { |
679 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
680 | |
681 | ce->bus_ops->write32(ar, offset, value); |
682 | } |
683 | |
684 | inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) |
685 | { |
686 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
687 | |
688 | return ce->bus_ops->read32(ar, offset); |
689 | } |
690 | |
691 | u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) |
692 | { |
693 | return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); |
694 | } |
695 | |
696 | void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) |
697 | { |
698 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, value: val); |
699 | } |
700 | |
701 | u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) |
702 | { |
703 | return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); |
704 | } |
705 | |
706 | void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) |
707 | { |
708 | ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, value: val); |
709 | } |
710 | |
711 | bool ath10k_pci_irq_pending(struct ath10k *ar) |
712 | { |
713 | u32 cause; |
714 | |
715 | /* Check if the shared legacy irq is for us */ |
716 | cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
717 | PCIE_INTR_CAUSE_ADDRESS); |
718 | if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) |
719 | return true; |
720 | |
721 | return false; |
722 | } |
723 | |
724 | void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) |
725 | { |
726 | /* IMPORTANT: INTR_CLR register has to be set after |
727 | * INTR_ENABLE is set to 0, otherwise interrupt can not be |
728 | * really cleared. |
729 | */ |
730 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
731 | value: 0); |
732 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, |
733 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
734 | |
735 | /* IMPORTANT: this extra read transaction is required to |
736 | * flush the posted write buffer. |
737 | */ |
738 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
739 | PCIE_INTR_ENABLE_ADDRESS); |
740 | } |
741 | |
742 | void ath10k_pci_enable_legacy_irq(struct ath10k *ar) |
743 | { |
744 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
745 | PCIE_INTR_ENABLE_ADDRESS, |
746 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
747 | |
748 | /* IMPORTANT: this extra read transaction is required to |
749 | * flush the posted write buffer. |
750 | */ |
751 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
752 | PCIE_INTR_ENABLE_ADDRESS); |
753 | } |
754 | |
755 | static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) |
756 | { |
757 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
758 | |
759 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) |
760 | return "msi" ; |
761 | |
762 | return "legacy" ; |
763 | } |
764 | |
765 | static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) |
766 | { |
767 | struct ath10k *ar = pipe->hif_ce_state; |
768 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
769 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; |
770 | struct sk_buff *skb; |
771 | dma_addr_t paddr; |
772 | int ret; |
773 | |
774 | skb = dev_alloc_skb(length: pipe->buf_sz); |
775 | if (!skb) |
776 | return -ENOMEM; |
777 | |
778 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb" ); |
779 | |
780 | paddr = dma_map_single(ar->dev, skb->data, |
781 | skb->len + skb_tailroom(skb), |
782 | DMA_FROM_DEVICE); |
783 | if (unlikely(dma_mapping_error(ar->dev, paddr))) { |
784 | ath10k_warn(ar, fmt: "failed to dma map pci rx buf\n" ); |
785 | dev_kfree_skb_any(skb); |
786 | return -EIO; |
787 | } |
788 | |
789 | ATH10K_SKB_RXCB(skb)->paddr = paddr; |
790 | |
791 | spin_lock_bh(lock: &ce->ce_lock); |
792 | ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); |
793 | spin_unlock_bh(lock: &ce->ce_lock); |
794 | if (ret) { |
795 | dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), |
796 | DMA_FROM_DEVICE); |
797 | dev_kfree_skb_any(skb); |
798 | return ret; |
799 | } |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) |
805 | { |
806 | struct ath10k *ar = pipe->hif_ce_state; |
807 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
808 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
809 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; |
810 | int ret, num; |
811 | |
812 | if (pipe->buf_sz == 0) |
813 | return; |
814 | |
815 | if (!ce_pipe->dest_ring) |
816 | return; |
817 | |
818 | spin_lock_bh(lock: &ce->ce_lock); |
819 | num = __ath10k_ce_rx_num_free_bufs(pipe: ce_pipe); |
820 | spin_unlock_bh(lock: &ce->ce_lock); |
821 | |
822 | while (num >= 0) { |
823 | ret = __ath10k_pci_rx_post_buf(pipe); |
824 | if (ret) { |
825 | if (ret == -ENOSPC) |
826 | break; |
827 | ath10k_warn(ar, fmt: "failed to post pci rx buf: %d\n" , ret); |
828 | mod_timer(timer: &ar_pci->rx_post_retry, expires: jiffies + |
829 | ATH10K_PCI_RX_POST_RETRY_MS); |
830 | break; |
831 | } |
832 | num--; |
833 | } |
834 | } |
835 | |
836 | void ath10k_pci_rx_post(struct ath10k *ar) |
837 | { |
838 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
839 | int i; |
840 | |
841 | for (i = 0; i < CE_COUNT; i++) |
842 | ath10k_pci_rx_post_pipe(pipe: &ar_pci->pipe_info[i]); |
843 | } |
844 | |
845 | void ath10k_pci_rx_replenish_retry(struct timer_list *t) |
846 | { |
847 | struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry); |
848 | struct ath10k *ar = ar_pci->ar; |
849 | |
850 | ath10k_pci_rx_post(ar); |
851 | } |
852 | |
853 | static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
854 | { |
855 | u32 val = 0, region = addr & 0xfffff; |
856 | |
857 | val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) |
858 | & 0x7ff) << 21; |
859 | val |= 0x100000 | region; |
860 | return val; |
861 | } |
862 | |
863 | /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. |
864 | * Support to access target space below 1M for qca6174 and qca9377. |
865 | * If target space is below 1M, the bit[20] of converted CE addr is 0. |
866 | * Otherwise bit[20] of converted CE addr is 1. |
867 | */ |
868 | static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
869 | { |
870 | u32 val = 0, region = addr & 0xfffff; |
871 | |
872 | val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) |
873 | & 0x7ff) << 21; |
874 | val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; |
875 | return val; |
876 | } |
877 | |
878 | static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
879 | { |
880 | u32 val = 0, region = addr & 0xfffff; |
881 | |
882 | val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); |
883 | val |= 0x100000 | region; |
884 | return val; |
885 | } |
886 | |
887 | static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
888 | { |
889 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
890 | |
891 | if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) |
892 | return -EOPNOTSUPP; |
893 | |
894 | return ar_pci->targ_cpu_to_ce_addr(ar, addr); |
895 | } |
896 | |
897 | /* |
898 | * Diagnostic read/write access is provided for startup/config/debug usage. |
899 | * Caller must guarantee proper alignment, when applicable, and single user |
900 | * at any moment. |
901 | */ |
902 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, |
903 | int nbytes) |
904 | { |
905 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
906 | int ret = 0; |
907 | u32 *buf; |
908 | unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; |
909 | struct ath10k_ce_pipe *ce_diag; |
910 | /* Host buffer address in CE space */ |
911 | u32 ce_data; |
912 | dma_addr_t ce_data_base = 0; |
913 | void *data_buf; |
914 | int i; |
915 | |
916 | mutex_lock(&ar_pci->ce_diag_mutex); |
917 | ce_diag = ar_pci->ce_diag; |
918 | |
919 | /* |
920 | * Allocate a temporary bounce buffer to hold caller's data |
921 | * to be DMA'ed from Target. This guarantees |
922 | * 1) 4-byte alignment |
923 | * 2) Buffer in DMA-able space |
924 | */ |
925 | alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); |
926 | |
927 | data_buf = dma_alloc_coherent(dev: ar->dev, size: alloc_nbytes, dma_handle: &ce_data_base, |
928 | GFP_ATOMIC); |
929 | if (!data_buf) { |
930 | ret = -ENOMEM; |
931 | goto done; |
932 | } |
933 | |
934 | /* The address supplied by the caller is in the |
935 | * Target CPU virtual address space. |
936 | * |
937 | * In order to use this address with the diagnostic CE, |
938 | * convert it from Target CPU virtual address space |
939 | * to CE address space |
940 | */ |
941 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, addr: address); |
942 | |
943 | remaining_bytes = nbytes; |
944 | ce_data = ce_data_base; |
945 | while (remaining_bytes) { |
946 | nbytes = min_t(unsigned int, remaining_bytes, |
947 | DIAG_TRANSFER_LIMIT); |
948 | |
949 | ret = ath10k_ce_rx_post_buf(pipe: ce_diag, ctx: &ce_data, paddr: ce_data); |
950 | if (ret != 0) |
951 | goto done; |
952 | |
953 | /* Request CE to send from Target(!) address to Host buffer */ |
954 | ret = ath10k_ce_send(ce_state: ce_diag, NULL, buffer: (u32)address, nbytes, transfer_id: 0, flags: 0); |
955 | if (ret) |
956 | goto done; |
957 | |
958 | i = 0; |
959 | while (ath10k_ce_completed_send_next(ce_state: ce_diag, NULL) != 0) { |
960 | udelay(DIAG_ACCESS_CE_WAIT_US); |
961 | i += DIAG_ACCESS_CE_WAIT_US; |
962 | |
963 | if (i > DIAG_ACCESS_CE_TIMEOUT_US) { |
964 | ret = -EBUSY; |
965 | goto done; |
966 | } |
967 | } |
968 | |
969 | i = 0; |
970 | while (ath10k_ce_completed_recv_next(ce_state: ce_diag, per_transfer_contextp: (void **)&buf, |
971 | nbytesp: &completed_nbytes) != 0) { |
972 | udelay(DIAG_ACCESS_CE_WAIT_US); |
973 | i += DIAG_ACCESS_CE_WAIT_US; |
974 | |
975 | if (i > DIAG_ACCESS_CE_TIMEOUT_US) { |
976 | ret = -EBUSY; |
977 | goto done; |
978 | } |
979 | } |
980 | |
981 | if (nbytes != completed_nbytes) { |
982 | ret = -EIO; |
983 | goto done; |
984 | } |
985 | |
986 | if (*buf != ce_data) { |
987 | ret = -EIO; |
988 | goto done; |
989 | } |
990 | |
991 | remaining_bytes -= nbytes; |
992 | memcpy(data, data_buf, nbytes); |
993 | |
994 | address += nbytes; |
995 | data += nbytes; |
996 | } |
997 | |
998 | done: |
999 | |
1000 | if (data_buf) |
1001 | dma_free_coherent(dev: ar->dev, size: alloc_nbytes, cpu_addr: data_buf, |
1002 | dma_handle: ce_data_base); |
1003 | |
1004 | mutex_unlock(lock: &ar_pci->ce_diag_mutex); |
1005 | |
1006 | return ret; |
1007 | } |
1008 | |
1009 | static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) |
1010 | { |
1011 | __le32 val = 0; |
1012 | int ret; |
1013 | |
1014 | ret = ath10k_pci_diag_read_mem(ar, address, data: &val, nbytes: sizeof(val)); |
1015 | *value = __le32_to_cpu(val); |
1016 | |
1017 | return ret; |
1018 | } |
1019 | |
1020 | static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, |
1021 | u32 src, u32 len) |
1022 | { |
1023 | u32 host_addr, addr; |
1024 | int ret; |
1025 | |
1026 | host_addr = host_interest_item_address(item_offset: src); |
1027 | |
1028 | ret = ath10k_pci_diag_read32(ar, address: host_addr, value: &addr); |
1029 | if (ret != 0) { |
1030 | ath10k_warn(ar, fmt: "failed to get memcpy hi address for firmware address %d: %d\n" , |
1031 | src, ret); |
1032 | return ret; |
1033 | } |
1034 | |
1035 | ret = ath10k_pci_diag_read_mem(ar, address: addr, data: dest, nbytes: len); |
1036 | if (ret != 0) { |
1037 | ath10k_warn(ar, fmt: "failed to memcpy firmware memory from %d (%d B): %d\n" , |
1038 | addr, len, ret); |
1039 | return ret; |
1040 | } |
1041 | |
1042 | return 0; |
1043 | } |
1044 | |
1045 | #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ |
1046 | __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) |
1047 | |
1048 | int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, |
1049 | const void *data, int nbytes) |
1050 | { |
1051 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1052 | int ret = 0; |
1053 | u32 *buf; |
1054 | unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; |
1055 | struct ath10k_ce_pipe *ce_diag; |
1056 | void *data_buf; |
1057 | dma_addr_t ce_data_base = 0; |
1058 | int i; |
1059 | |
1060 | mutex_lock(&ar_pci->ce_diag_mutex); |
1061 | ce_diag = ar_pci->ce_diag; |
1062 | |
1063 | /* |
1064 | * Allocate a temporary bounce buffer to hold caller's data |
1065 | * to be DMA'ed to Target. This guarantees |
1066 | * 1) 4-byte alignment |
1067 | * 2) Buffer in DMA-able space |
1068 | */ |
1069 | alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); |
1070 | |
1071 | data_buf = dma_alloc_coherent(dev: ar->dev, size: alloc_nbytes, dma_handle: &ce_data_base, |
1072 | GFP_ATOMIC); |
1073 | if (!data_buf) { |
1074 | ret = -ENOMEM; |
1075 | goto done; |
1076 | } |
1077 | |
1078 | /* |
1079 | * The address supplied by the caller is in the |
1080 | * Target CPU virtual address space. |
1081 | * |
1082 | * In order to use this address with the diagnostic CE, |
1083 | * convert it from |
1084 | * Target CPU virtual address space |
1085 | * to |
1086 | * CE address space |
1087 | */ |
1088 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, addr: address); |
1089 | |
1090 | remaining_bytes = nbytes; |
1091 | while (remaining_bytes) { |
1092 | /* FIXME: check cast */ |
1093 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); |
1094 | |
1095 | /* Copy caller's data to allocated DMA buf */ |
1096 | memcpy(data_buf, data, nbytes); |
1097 | |
1098 | /* Set up to receive directly into Target(!) address */ |
1099 | ret = ath10k_ce_rx_post_buf(pipe: ce_diag, ctx: &address, paddr: address); |
1100 | if (ret != 0) |
1101 | goto done; |
1102 | |
1103 | /* |
1104 | * Request CE to send caller-supplied data that |
1105 | * was copied to bounce buffer to Target(!) address. |
1106 | */ |
1107 | ret = ath10k_ce_send(ce_state: ce_diag, NULL, buffer: ce_data_base, nbytes, transfer_id: 0, flags: 0); |
1108 | if (ret != 0) |
1109 | goto done; |
1110 | |
1111 | i = 0; |
1112 | while (ath10k_ce_completed_send_next(ce_state: ce_diag, NULL) != 0) { |
1113 | udelay(DIAG_ACCESS_CE_WAIT_US); |
1114 | i += DIAG_ACCESS_CE_WAIT_US; |
1115 | |
1116 | if (i > DIAG_ACCESS_CE_TIMEOUT_US) { |
1117 | ret = -EBUSY; |
1118 | goto done; |
1119 | } |
1120 | } |
1121 | |
1122 | i = 0; |
1123 | while (ath10k_ce_completed_recv_next(ce_state: ce_diag, per_transfer_contextp: (void **)&buf, |
1124 | nbytesp: &completed_nbytes) != 0) { |
1125 | udelay(DIAG_ACCESS_CE_WAIT_US); |
1126 | i += DIAG_ACCESS_CE_WAIT_US; |
1127 | |
1128 | if (i > DIAG_ACCESS_CE_TIMEOUT_US) { |
1129 | ret = -EBUSY; |
1130 | goto done; |
1131 | } |
1132 | } |
1133 | |
1134 | if (nbytes != completed_nbytes) { |
1135 | ret = -EIO; |
1136 | goto done; |
1137 | } |
1138 | |
1139 | if (*buf != address) { |
1140 | ret = -EIO; |
1141 | goto done; |
1142 | } |
1143 | |
1144 | remaining_bytes -= nbytes; |
1145 | address += nbytes; |
1146 | data += nbytes; |
1147 | } |
1148 | |
1149 | done: |
1150 | if (data_buf) { |
1151 | dma_free_coherent(dev: ar->dev, size: alloc_nbytes, cpu_addr: data_buf, |
1152 | dma_handle: ce_data_base); |
1153 | } |
1154 | |
1155 | if (ret != 0) |
1156 | ath10k_warn(ar, fmt: "failed to write diag value at 0x%x: %d\n" , |
1157 | address, ret); |
1158 | |
1159 | mutex_unlock(lock: &ar_pci->ce_diag_mutex); |
1160 | |
1161 | return ret; |
1162 | } |
1163 | |
1164 | static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) |
1165 | { |
1166 | __le32 val = __cpu_to_le32(value); |
1167 | |
1168 | return ath10k_pci_diag_write_mem(ar, address, data: &val, nbytes: sizeof(val)); |
1169 | } |
1170 | |
1171 | /* Called by lower (CE) layer when a send to Target completes. */ |
1172 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) |
1173 | { |
1174 | struct ath10k *ar = ce_state->ar; |
1175 | struct sk_buff_head list; |
1176 | struct sk_buff *skb; |
1177 | |
1178 | __skb_queue_head_init(list: &list); |
1179 | while (ath10k_ce_completed_send_next(ce_state, per_transfer_contextp: (void **)&skb) == 0) { |
1180 | /* no need to call tx completion for NULL pointers */ |
1181 | if (skb == NULL) |
1182 | continue; |
1183 | |
1184 | __skb_queue_tail(list: &list, newsk: skb); |
1185 | } |
1186 | |
1187 | while ((skb = __skb_dequeue(list: &list))) |
1188 | ath10k_htc_tx_completion_handler(ar, skb); |
1189 | } |
1190 | |
1191 | static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, |
1192 | void (*callback)(struct ath10k *ar, |
1193 | struct sk_buff *skb)) |
1194 | { |
1195 | struct ath10k *ar = ce_state->ar; |
1196 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1197 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
1198 | struct sk_buff *skb; |
1199 | struct sk_buff_head list; |
1200 | void *transfer_context; |
1201 | unsigned int nbytes, max_nbytes; |
1202 | |
1203 | __skb_queue_head_init(list: &list); |
1204 | while (ath10k_ce_completed_recv_next(ce_state, per_transfer_contextp: &transfer_context, |
1205 | nbytesp: &nbytes) == 0) { |
1206 | skb = transfer_context; |
1207 | max_nbytes = skb->len + skb_tailroom(skb); |
1208 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
1209 | max_nbytes, DMA_FROM_DEVICE); |
1210 | |
1211 | if (unlikely(max_nbytes < nbytes)) { |
1212 | ath10k_warn(ar, fmt: "rxed more than expected (nbytes %d, max %d)" , |
1213 | nbytes, max_nbytes); |
1214 | dev_kfree_skb_any(skb); |
1215 | continue; |
1216 | } |
1217 | |
1218 | skb_put(skb, len: nbytes); |
1219 | __skb_queue_tail(list: &list, newsk: skb); |
1220 | } |
1221 | |
1222 | while ((skb = __skb_dequeue(list: &list))) { |
1223 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n" , |
1224 | ce_state->id, skb->len); |
1225 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_PCI_DUMP, NULL, prefix: "pci rx: " , |
1226 | buf: skb->data, len: skb->len); |
1227 | |
1228 | callback(ar, skb); |
1229 | } |
1230 | |
1231 | ath10k_pci_rx_post_pipe(pipe: pipe_info); |
1232 | } |
1233 | |
1234 | static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, |
1235 | void (*callback)(struct ath10k *ar, |
1236 | struct sk_buff *skb)) |
1237 | { |
1238 | struct ath10k *ar = ce_state->ar; |
1239 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1240 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
1241 | struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; |
1242 | struct sk_buff *skb; |
1243 | struct sk_buff_head list; |
1244 | void *transfer_context; |
1245 | unsigned int nbytes, max_nbytes, nentries; |
1246 | int orig_len; |
1247 | |
1248 | /* No need to acquire ce_lock for CE5, since this is the only place CE5 |
1249 | * is processed other than init and deinit. Before releasing CE5 |
1250 | * buffers, interrupts are disabled. Thus CE5 access is serialized. |
1251 | */ |
1252 | __skb_queue_head_init(list: &list); |
1253 | while (ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp: &transfer_context, |
1254 | nbytesp: &nbytes) == 0) { |
1255 | skb = transfer_context; |
1256 | max_nbytes = skb->len + skb_tailroom(skb); |
1257 | |
1258 | if (unlikely(max_nbytes < nbytes)) { |
1259 | ath10k_warn(ar, fmt: "rxed more than expected (nbytes %d, max %d)" , |
1260 | nbytes, max_nbytes); |
1261 | continue; |
1262 | } |
1263 | |
1264 | dma_sync_single_for_cpu(dev: ar->dev, addr: ATH10K_SKB_RXCB(skb)->paddr, |
1265 | size: max_nbytes, dir: DMA_FROM_DEVICE); |
1266 | skb_put(skb, len: nbytes); |
1267 | __skb_queue_tail(list: &list, newsk: skb); |
1268 | } |
1269 | |
1270 | nentries = skb_queue_len(list_: &list); |
1271 | while ((skb = __skb_dequeue(list: &list))) { |
1272 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n" , |
1273 | ce_state->id, skb->len); |
1274 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_PCI_DUMP, NULL, prefix: "pci rx: " , |
1275 | buf: skb->data, len: skb->len); |
1276 | |
1277 | orig_len = skb->len; |
1278 | callback(ar, skb); |
1279 | skb_push(skb, len: orig_len - skb->len); |
1280 | skb_reset_tail_pointer(skb); |
1281 | skb_trim(skb, len: 0); |
1282 | |
1283 | /*let device gain the buffer again*/ |
1284 | dma_sync_single_for_device(dev: ar->dev, addr: ATH10K_SKB_RXCB(skb)->paddr, |
1285 | size: skb->len + skb_tailroom(skb), |
1286 | dir: DMA_FROM_DEVICE); |
1287 | } |
1288 | ath10k_ce_rx_update_write_idx(pipe: ce_pipe, nentries); |
1289 | } |
1290 | |
1291 | /* Called by lower (CE) layer when data is received from the Target. */ |
1292 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) |
1293 | { |
1294 | ath10k_pci_process_rx_cb(ce_state, callback: ath10k_htc_rx_completion_handler); |
1295 | } |
1296 | |
1297 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) |
1298 | { |
1299 | /* CE4 polling needs to be done whenever CE pipe which transports |
1300 | * HTT Rx (target->host) is processed. |
1301 | */ |
1302 | ath10k_ce_per_engine_service(ar: ce_state->ar, ce_id: 4); |
1303 | |
1304 | ath10k_pci_process_rx_cb(ce_state, callback: ath10k_htc_rx_completion_handler); |
1305 | } |
1306 | |
1307 | /* Called by lower (CE) layer when data is received from the Target. |
1308 | * Only 10.4 firmware uses separate CE to transfer pktlog data. |
1309 | */ |
1310 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) |
1311 | { |
1312 | ath10k_pci_process_rx_cb(ce_state, |
1313 | callback: ath10k_htt_rx_pktlog_completion_handler); |
1314 | } |
1315 | |
1316 | /* Called by lower (CE) layer when a send to HTT Target completes. */ |
1317 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) |
1318 | { |
1319 | struct ath10k *ar = ce_state->ar; |
1320 | struct sk_buff *skb; |
1321 | |
1322 | while (ath10k_ce_completed_send_next(ce_state, per_transfer_contextp: (void **)&skb) == 0) { |
1323 | /* no need to call tx completion for NULL pointers */ |
1324 | if (!skb) |
1325 | continue; |
1326 | |
1327 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, |
1328 | skb->len, DMA_TO_DEVICE); |
1329 | ath10k_htt_hif_tx_complete(ar, skb); |
1330 | } |
1331 | } |
1332 | |
1333 | static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) |
1334 | { |
1335 | skb_pull(skb, len: sizeof(struct ath10k_htc_hdr)); |
1336 | ath10k_htt_t2h_msg_handler(ar, skb); |
1337 | } |
1338 | |
1339 | /* Called by lower (CE) layer when HTT data is received from the Target. */ |
1340 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) |
1341 | { |
1342 | /* CE4 polling needs to be done whenever CE pipe which transports |
1343 | * HTT Rx (target->host) is processed. |
1344 | */ |
1345 | ath10k_ce_per_engine_service(ar: ce_state->ar, ce_id: 4); |
1346 | |
1347 | ath10k_pci_process_htt_rx_cb(ce_state, callback: ath10k_pci_htt_rx_deliver); |
1348 | } |
1349 | |
1350 | int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, |
1351 | struct ath10k_hif_sg_item *items, int n_items) |
1352 | { |
1353 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1354 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
1355 | struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; |
1356 | struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; |
1357 | struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; |
1358 | unsigned int nentries_mask; |
1359 | unsigned int sw_index; |
1360 | unsigned int write_index; |
1361 | int err, i = 0; |
1362 | |
1363 | spin_lock_bh(lock: &ce->ce_lock); |
1364 | |
1365 | nentries_mask = src_ring->nentries_mask; |
1366 | sw_index = src_ring->sw_index; |
1367 | write_index = src_ring->write_index; |
1368 | |
1369 | if (unlikely(CE_RING_DELTA(nentries_mask, |
1370 | write_index, sw_index - 1) < n_items)) { |
1371 | err = -ENOBUFS; |
1372 | goto err; |
1373 | } |
1374 | |
1375 | for (i = 0; i < n_items - 1; i++) { |
1376 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
1377 | "pci tx item %d paddr %pad len %d n_items %d\n" , |
1378 | i, &items[i].paddr, items[i].len, n_items); |
1379 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_PCI_DUMP, NULL, prefix: "pci tx data: " , |
1380 | buf: items[i].vaddr, len: items[i].len); |
1381 | |
1382 | err = ath10k_ce_send_nolock(ce_state: ce_pipe, |
1383 | per_transfer_context: items[i].transfer_context, |
1384 | buffer: items[i].paddr, |
1385 | nbytes: items[i].len, |
1386 | transfer_id: items[i].transfer_id, |
1387 | CE_SEND_FLAG_GATHER); |
1388 | if (err) |
1389 | goto err; |
1390 | } |
1391 | |
1392 | /* `i` is equal to `n_items -1` after for() */ |
1393 | |
1394 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
1395 | "pci tx item %d paddr %pad len %d n_items %d\n" , |
1396 | i, &items[i].paddr, items[i].len, n_items); |
1397 | ath10k_dbg_dump(ar, mask: ATH10K_DBG_PCI_DUMP, NULL, prefix: "pci tx data: " , |
1398 | buf: items[i].vaddr, len: items[i].len); |
1399 | |
1400 | err = ath10k_ce_send_nolock(ce_state: ce_pipe, |
1401 | per_transfer_context: items[i].transfer_context, |
1402 | buffer: items[i].paddr, |
1403 | nbytes: items[i].len, |
1404 | transfer_id: items[i].transfer_id, |
1405 | flags: 0); |
1406 | if (err) |
1407 | goto err; |
1408 | |
1409 | spin_unlock_bh(lock: &ce->ce_lock); |
1410 | return 0; |
1411 | |
1412 | err: |
1413 | for (; i > 0; i--) |
1414 | __ath10k_ce_send_revert(pipe: ce_pipe); |
1415 | |
1416 | spin_unlock_bh(lock: &ce->ce_lock); |
1417 | return err; |
1418 | } |
1419 | |
1420 | int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, |
1421 | size_t buf_len) |
1422 | { |
1423 | return ath10k_pci_diag_read_mem(ar, address, data: buf, nbytes: buf_len); |
1424 | } |
1425 | |
1426 | u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) |
1427 | { |
1428 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1429 | |
1430 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n" ); |
1431 | |
1432 | return ath10k_ce_num_free_src_entries(pipe: ar_pci->pipe_info[pipe].ce_hdl); |
1433 | } |
1434 | |
1435 | static void ath10k_pci_dump_registers(struct ath10k *ar, |
1436 | struct ath10k_fw_crash_data *crash_data) |
1437 | { |
1438 | __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; |
1439 | int i, ret; |
1440 | |
1441 | lockdep_assert_held(&ar->dump_mutex); |
1442 | |
1443 | ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], |
1444 | hi_failure_state, |
1445 | REG_DUMP_COUNT_QCA988X * sizeof(__le32)); |
1446 | if (ret) { |
1447 | ath10k_err(ar, fmt: "failed to read firmware dump area: %d\n" , ret); |
1448 | return; |
1449 | } |
1450 | |
1451 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); |
1452 | |
1453 | ath10k_err(ar, fmt: "firmware register dump:\n" ); |
1454 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) |
1455 | ath10k_err(ar, fmt: "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n" , |
1456 | i, |
1457 | __le32_to_cpu(reg_dump_values[i]), |
1458 | __le32_to_cpu(reg_dump_values[i + 1]), |
1459 | __le32_to_cpu(reg_dump_values[i + 2]), |
1460 | __le32_to_cpu(reg_dump_values[i + 3])); |
1461 | |
1462 | if (!crash_data) |
1463 | return; |
1464 | |
1465 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) |
1466 | crash_data->registers[i] = reg_dump_values[i]; |
1467 | } |
1468 | |
1469 | static int ath10k_pci_dump_memory_section(struct ath10k *ar, |
1470 | const struct ath10k_mem_region *mem_region, |
1471 | u8 *buf, size_t buf_len) |
1472 | { |
1473 | const struct ath10k_mem_section *cur_section, *next_section; |
1474 | unsigned int count, section_size, skip_size; |
1475 | int ret, i, j; |
1476 | |
1477 | if (!mem_region || !buf) |
1478 | return 0; |
1479 | |
1480 | cur_section = &mem_region->section_table.sections[0]; |
1481 | |
1482 | if (mem_region->start > cur_section->start) { |
1483 | ath10k_warn(ar, fmt: "incorrect memdump region 0x%x with section start address 0x%x.\n" , |
1484 | mem_region->start, cur_section->start); |
1485 | return 0; |
1486 | } |
1487 | |
1488 | skip_size = cur_section->start - mem_region->start; |
1489 | |
1490 | /* fill the gap between the first register section and register |
1491 | * start address |
1492 | */ |
1493 | for (i = 0; i < skip_size; i++) { |
1494 | *buf = ATH10K_MAGIC_NOT_COPIED; |
1495 | buf++; |
1496 | } |
1497 | |
1498 | count = 0; |
1499 | |
1500 | for (i = 0; cur_section != NULL; i++) { |
1501 | section_size = cur_section->end - cur_section->start; |
1502 | |
1503 | if (section_size <= 0) { |
1504 | ath10k_warn(ar, fmt: "incorrect ramdump format with start address 0x%x and stop address 0x%x\n" , |
1505 | cur_section->start, |
1506 | cur_section->end); |
1507 | break; |
1508 | } |
1509 | |
1510 | if ((i + 1) == mem_region->section_table.size) { |
1511 | /* last section */ |
1512 | next_section = NULL; |
1513 | skip_size = 0; |
1514 | } else { |
1515 | next_section = cur_section + 1; |
1516 | |
1517 | if (cur_section->end > next_section->start) { |
1518 | ath10k_warn(ar, fmt: "next ramdump section 0x%x is smaller than current end address 0x%x\n" , |
1519 | next_section->start, |
1520 | cur_section->end); |
1521 | break; |
1522 | } |
1523 | |
1524 | skip_size = next_section->start - cur_section->end; |
1525 | } |
1526 | |
1527 | if (buf_len < (skip_size + section_size)) { |
1528 | ath10k_warn(ar, fmt: "ramdump buffer is too small: %zu\n" , buf_len); |
1529 | break; |
1530 | } |
1531 | |
1532 | buf_len -= skip_size + section_size; |
1533 | |
1534 | /* read section to dest memory */ |
1535 | ret = ath10k_pci_diag_read_mem(ar, address: cur_section->start, |
1536 | data: buf, nbytes: section_size); |
1537 | if (ret) { |
1538 | ath10k_warn(ar, fmt: "failed to read ramdump from section 0x%x: %d\n" , |
1539 | cur_section->start, ret); |
1540 | break; |
1541 | } |
1542 | |
1543 | buf += section_size; |
1544 | count += section_size; |
1545 | |
1546 | /* fill in the gap between this section and the next */ |
1547 | for (j = 0; j < skip_size; j++) { |
1548 | *buf = ATH10K_MAGIC_NOT_COPIED; |
1549 | buf++; |
1550 | } |
1551 | |
1552 | count += skip_size; |
1553 | |
1554 | if (!next_section) |
1555 | /* this was the last section */ |
1556 | break; |
1557 | |
1558 | cur_section = next_section; |
1559 | } |
1560 | |
1561 | return count; |
1562 | } |
1563 | |
1564 | static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) |
1565 | { |
1566 | u32 val; |
1567 | |
1568 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
1569 | FW_RAM_CONFIG_ADDRESS, value: config); |
1570 | |
1571 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1572 | FW_RAM_CONFIG_ADDRESS); |
1573 | if (val != config) { |
1574 | ath10k_warn(ar, fmt: "failed to set RAM config from 0x%x to 0x%x\n" , |
1575 | val, config); |
1576 | return -EIO; |
1577 | } |
1578 | |
1579 | return 0; |
1580 | } |
1581 | |
1582 | /* Always returns the length */ |
1583 | static int ath10k_pci_dump_memory_sram(struct ath10k *ar, |
1584 | const struct ath10k_mem_region *region, |
1585 | u8 *buf) |
1586 | { |
1587 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1588 | u32 base_addr, i; |
1589 | |
1590 | base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); |
1591 | base_addr += region->start; |
1592 | |
1593 | for (i = 0; i < region->len; i += 4) { |
1594 | iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); |
1595 | *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); |
1596 | } |
1597 | |
1598 | return region->len; |
1599 | } |
1600 | |
1601 | /* if an error happened returns < 0, otherwise the length */ |
1602 | static int ath10k_pci_dump_memory_reg(struct ath10k *ar, |
1603 | const struct ath10k_mem_region *region, |
1604 | u8 *buf) |
1605 | { |
1606 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1607 | u32 i; |
1608 | int ret; |
1609 | |
1610 | mutex_lock(&ar->conf_mutex); |
1611 | if (ar->state != ATH10K_STATE_ON) { |
1612 | ath10k_warn(ar, fmt: "Skipping pci_dump_memory_reg invalid state\n" ); |
1613 | ret = -EIO; |
1614 | goto done; |
1615 | } |
1616 | |
1617 | for (i = 0; i < region->len; i += 4) |
1618 | *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); |
1619 | |
1620 | ret = region->len; |
1621 | done: |
1622 | mutex_unlock(lock: &ar->conf_mutex); |
1623 | return ret; |
1624 | } |
1625 | |
1626 | /* if an error happened returns < 0, otherwise the length */ |
1627 | static int ath10k_pci_dump_memory_generic(struct ath10k *ar, |
1628 | const struct ath10k_mem_region *current_region, |
1629 | u8 *buf) |
1630 | { |
1631 | int ret; |
1632 | |
1633 | if (current_region->section_table.size > 0) |
1634 | /* Copy each section individually. */ |
1635 | return ath10k_pci_dump_memory_section(ar, |
1636 | mem_region: current_region, |
1637 | buf, |
1638 | buf_len: current_region->len); |
1639 | |
1640 | /* No individual memory sections defined so we can |
1641 | * copy the entire memory region. |
1642 | */ |
1643 | ret = ath10k_pci_diag_read_mem(ar, |
1644 | address: current_region->start, |
1645 | data: buf, |
1646 | nbytes: current_region->len); |
1647 | if (ret) { |
1648 | ath10k_warn(ar, fmt: "failed to copy ramdump region %s: %d\n" , |
1649 | current_region->name, ret); |
1650 | return ret; |
1651 | } |
1652 | |
1653 | return current_region->len; |
1654 | } |
1655 | |
1656 | static void ath10k_pci_dump_memory(struct ath10k *ar, |
1657 | struct ath10k_fw_crash_data *crash_data) |
1658 | { |
1659 | const struct ath10k_hw_mem_layout *mem_layout; |
1660 | const struct ath10k_mem_region *current_region; |
1661 | struct ath10k_dump_ram_data_hdr *hdr; |
1662 | u32 count, shift; |
1663 | size_t buf_len; |
1664 | int ret, i; |
1665 | u8 *buf; |
1666 | |
1667 | lockdep_assert_held(&ar->dump_mutex); |
1668 | |
1669 | if (!crash_data) |
1670 | return; |
1671 | |
1672 | mem_layout = ath10k_coredump_get_mem_layout(ar); |
1673 | if (!mem_layout) |
1674 | return; |
1675 | |
1676 | current_region = &mem_layout->region_table.regions[0]; |
1677 | |
1678 | buf = crash_data->ramdump_buf; |
1679 | buf_len = crash_data->ramdump_buf_len; |
1680 | |
1681 | memset(buf, 0, buf_len); |
1682 | |
1683 | for (i = 0; i < mem_layout->region_table.size; i++) { |
1684 | count = 0; |
1685 | |
1686 | if (current_region->len > buf_len) { |
1687 | ath10k_warn(ar, fmt: "memory region %s size %d is larger that remaining ramdump buffer size %zu\n" , |
1688 | current_region->name, |
1689 | current_region->len, |
1690 | buf_len); |
1691 | break; |
1692 | } |
1693 | |
1694 | /* To get IRAM dump, the host driver needs to switch target |
1695 | * ram config from DRAM to IRAM. |
1696 | */ |
1697 | if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || |
1698 | current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { |
1699 | shift = current_region->start >> 20; |
1700 | |
1701 | ret = ath10k_pci_set_ram_config(ar, config: shift); |
1702 | if (ret) { |
1703 | ath10k_warn(ar, fmt: "failed to switch ram config to IRAM for section %s: %d\n" , |
1704 | current_region->name, ret); |
1705 | break; |
1706 | } |
1707 | } |
1708 | |
1709 | /* Reserve space for the header. */ |
1710 | hdr = (void *)buf; |
1711 | buf += sizeof(*hdr); |
1712 | buf_len -= sizeof(*hdr); |
1713 | |
1714 | switch (current_region->type) { |
1715 | case ATH10K_MEM_REGION_TYPE_IOSRAM: |
1716 | count = ath10k_pci_dump_memory_sram(ar, region: current_region, buf); |
1717 | break; |
1718 | case ATH10K_MEM_REGION_TYPE_IOREG: |
1719 | ret = ath10k_pci_dump_memory_reg(ar, region: current_region, buf); |
1720 | if (ret < 0) |
1721 | break; |
1722 | |
1723 | count = ret; |
1724 | break; |
1725 | default: |
1726 | ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); |
1727 | if (ret < 0) |
1728 | break; |
1729 | |
1730 | count = ret; |
1731 | break; |
1732 | } |
1733 | |
1734 | hdr->region_type = cpu_to_le32(current_region->type); |
1735 | hdr->start = cpu_to_le32(current_region->start); |
1736 | hdr->length = cpu_to_le32(count); |
1737 | |
1738 | if (count == 0) |
1739 | /* Note: the header remains, just with zero length. */ |
1740 | break; |
1741 | |
1742 | buf += count; |
1743 | buf_len -= count; |
1744 | |
1745 | current_region++; |
1746 | } |
1747 | } |
1748 | |
1749 | static void ath10k_pci_fw_dump_work(struct work_struct *work) |
1750 | { |
1751 | struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, |
1752 | dump_work); |
1753 | struct ath10k_fw_crash_data *crash_data; |
1754 | struct ath10k *ar = ar_pci->ar; |
1755 | char guid[UUID_STRING_LEN + 1]; |
1756 | |
1757 | mutex_lock(&ar->dump_mutex); |
1758 | |
1759 | spin_lock_bh(lock: &ar->data_lock); |
1760 | ar->stats.fw_crash_counter++; |
1761 | spin_unlock_bh(lock: &ar->data_lock); |
1762 | |
1763 | crash_data = ath10k_coredump_new(ar); |
1764 | |
1765 | if (crash_data) |
1766 | scnprintf(buf: guid, size: sizeof(guid), fmt: "%pUl" , &crash_data->guid); |
1767 | else |
1768 | scnprintf(buf: guid, size: sizeof(guid), fmt: "n/a" ); |
1769 | |
1770 | ath10k_err(ar, fmt: "firmware crashed! (guid %s)\n" , guid); |
1771 | ath10k_print_driver_info(ar); |
1772 | ath10k_pci_dump_registers(ar, crash_data); |
1773 | ath10k_ce_dump_registers(ar, crash_data); |
1774 | ath10k_pci_dump_memory(ar, crash_data); |
1775 | |
1776 | mutex_unlock(lock: &ar->dump_mutex); |
1777 | |
1778 | ath10k_core_start_recovery(ar); |
1779 | } |
1780 | |
1781 | static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) |
1782 | { |
1783 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1784 | |
1785 | queue_work(wq: ar->workqueue, work: &ar_pci->dump_work); |
1786 | } |
1787 | |
1788 | void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, |
1789 | int force) |
1790 | { |
1791 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1792 | |
1793 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n" ); |
1794 | |
1795 | if (!force) { |
1796 | int resources; |
1797 | /* |
1798 | * Decide whether to actually poll for completions, or just |
1799 | * wait for a later chance. |
1800 | * If there seem to be plenty of resources left, then just wait |
1801 | * since checking involves reading a CE register, which is a |
1802 | * relatively expensive operation. |
1803 | */ |
1804 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); |
1805 | |
1806 | /* |
1807 | * If at least 50% of the total resources are still available, |
1808 | * don't bother checking again yet. |
1809 | */ |
1810 | if (resources > (ar_pci->attr[pipe].src_nentries >> 1)) |
1811 | return; |
1812 | } |
1813 | ath10k_ce_per_engine_service(ar, ce_id: pipe); |
1814 | } |
1815 | |
1816 | static void ath10k_pci_rx_retry_sync(struct ath10k *ar) |
1817 | { |
1818 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1819 | |
1820 | del_timer_sync(timer: &ar_pci->rx_post_retry); |
1821 | } |
1822 | |
1823 | int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, |
1824 | u8 *ul_pipe, u8 *dl_pipe) |
1825 | { |
1826 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1827 | const struct ce_service_to_pipe *entry; |
1828 | bool ul_set = false, dl_set = false; |
1829 | int i; |
1830 | |
1831 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n" ); |
1832 | |
1833 | for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) { |
1834 | entry = &ar_pci->serv_to_pipe[i]; |
1835 | |
1836 | if (__le32_to_cpu(entry->service_id) != service_id) |
1837 | continue; |
1838 | |
1839 | switch (__le32_to_cpu(entry->pipedir)) { |
1840 | case PIPEDIR_NONE: |
1841 | break; |
1842 | case PIPEDIR_IN: |
1843 | WARN_ON(dl_set); |
1844 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
1845 | dl_set = true; |
1846 | break; |
1847 | case PIPEDIR_OUT: |
1848 | WARN_ON(ul_set); |
1849 | *ul_pipe = __le32_to_cpu(entry->pipenum); |
1850 | ul_set = true; |
1851 | break; |
1852 | case PIPEDIR_INOUT: |
1853 | WARN_ON(dl_set); |
1854 | WARN_ON(ul_set); |
1855 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
1856 | *ul_pipe = __le32_to_cpu(entry->pipenum); |
1857 | dl_set = true; |
1858 | ul_set = true; |
1859 | break; |
1860 | } |
1861 | } |
1862 | |
1863 | if (!ul_set || !dl_set) |
1864 | return -ENOENT; |
1865 | |
1866 | return 0; |
1867 | } |
1868 | |
1869 | void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, |
1870 | u8 *ul_pipe, u8 *dl_pipe) |
1871 | { |
1872 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n" ); |
1873 | |
1874 | (void)ath10k_pci_hif_map_service_to_pipe(ar, |
1875 | service_id: ATH10K_HTC_SVC_ID_RSVD_CTRL, |
1876 | ul_pipe, dl_pipe); |
1877 | } |
1878 | |
1879 | void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) |
1880 | { |
1881 | u32 val; |
1882 | |
1883 | switch (ar->hw_rev) { |
1884 | case ATH10K_HW_QCA988X: |
1885 | case ATH10K_HW_QCA9887: |
1886 | case ATH10K_HW_QCA6174: |
1887 | case ATH10K_HW_QCA9377: |
1888 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1889 | CORE_CTRL_ADDRESS); |
1890 | val &= ~CORE_CTRL_PCIE_REG_31_MASK; |
1891 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
1892 | CORE_CTRL_ADDRESS, value: val); |
1893 | break; |
1894 | case ATH10K_HW_QCA99X0: |
1895 | case ATH10K_HW_QCA9984: |
1896 | case ATH10K_HW_QCA9888: |
1897 | case ATH10K_HW_QCA4019: |
1898 | /* TODO: Find appropriate register configuration for QCA99X0 |
1899 | * to mask irq/MSI. |
1900 | */ |
1901 | break; |
1902 | case ATH10K_HW_WCN3990: |
1903 | break; |
1904 | } |
1905 | } |
1906 | |
1907 | static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) |
1908 | { |
1909 | u32 val; |
1910 | |
1911 | switch (ar->hw_rev) { |
1912 | case ATH10K_HW_QCA988X: |
1913 | case ATH10K_HW_QCA9887: |
1914 | case ATH10K_HW_QCA6174: |
1915 | case ATH10K_HW_QCA9377: |
1916 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1917 | CORE_CTRL_ADDRESS); |
1918 | val |= CORE_CTRL_PCIE_REG_31_MASK; |
1919 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
1920 | CORE_CTRL_ADDRESS, value: val); |
1921 | break; |
1922 | case ATH10K_HW_QCA99X0: |
1923 | case ATH10K_HW_QCA9984: |
1924 | case ATH10K_HW_QCA9888: |
1925 | case ATH10K_HW_QCA4019: |
1926 | /* TODO: Find appropriate register configuration for QCA99X0 |
1927 | * to unmask irq/MSI. |
1928 | */ |
1929 | break; |
1930 | case ATH10K_HW_WCN3990: |
1931 | break; |
1932 | } |
1933 | } |
1934 | |
1935 | static void ath10k_pci_irq_disable(struct ath10k *ar) |
1936 | { |
1937 | ath10k_ce_disable_interrupts(ar); |
1938 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
1939 | ath10k_pci_irq_msi_fw_mask(ar); |
1940 | } |
1941 | |
1942 | static void ath10k_pci_irq_sync(struct ath10k *ar) |
1943 | { |
1944 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1945 | |
1946 | synchronize_irq(irq: ar_pci->pdev->irq); |
1947 | } |
1948 | |
1949 | static void ath10k_pci_irq_enable(struct ath10k *ar) |
1950 | { |
1951 | ath10k_ce_enable_interrupts(ar); |
1952 | ath10k_pci_enable_legacy_irq(ar); |
1953 | ath10k_pci_irq_msi_fw_unmask(ar); |
1954 | } |
1955 | |
1956 | static int ath10k_pci_hif_start(struct ath10k *ar) |
1957 | { |
1958 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1959 | |
1960 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n" ); |
1961 | |
1962 | ath10k_core_napi_enable(ar); |
1963 | |
1964 | ath10k_pci_irq_enable(ar); |
1965 | ath10k_pci_rx_post(ar); |
1966 | |
1967 | pcie_capability_clear_and_set_word(dev: ar_pci->pdev, PCI_EXP_LNKCTL, |
1968 | PCI_EXP_LNKCTL_ASPMC, |
1969 | set: ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); |
1970 | |
1971 | return 0; |
1972 | } |
1973 | |
1974 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
1975 | { |
1976 | struct ath10k *ar; |
1977 | struct ath10k_ce_pipe *ce_pipe; |
1978 | struct ath10k_ce_ring *ce_ring; |
1979 | struct sk_buff *skb; |
1980 | int i; |
1981 | |
1982 | ar = pci_pipe->hif_ce_state; |
1983 | ce_pipe = pci_pipe->ce_hdl; |
1984 | ce_ring = ce_pipe->dest_ring; |
1985 | |
1986 | if (!ce_ring) |
1987 | return; |
1988 | |
1989 | if (!pci_pipe->buf_sz) |
1990 | return; |
1991 | |
1992 | for (i = 0; i < ce_ring->nentries; i++) { |
1993 | skb = ce_ring->per_transfer_context[i]; |
1994 | if (!skb) |
1995 | continue; |
1996 | |
1997 | ce_ring->per_transfer_context[i] = NULL; |
1998 | |
1999 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
2000 | skb->len + skb_tailroom(skb), |
2001 | DMA_FROM_DEVICE); |
2002 | dev_kfree_skb_any(skb); |
2003 | } |
2004 | } |
2005 | |
2006 | static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
2007 | { |
2008 | struct ath10k *ar; |
2009 | struct ath10k_ce_pipe *ce_pipe; |
2010 | struct ath10k_ce_ring *ce_ring; |
2011 | struct sk_buff *skb; |
2012 | int i; |
2013 | |
2014 | ar = pci_pipe->hif_ce_state; |
2015 | ce_pipe = pci_pipe->ce_hdl; |
2016 | ce_ring = ce_pipe->src_ring; |
2017 | |
2018 | if (!ce_ring) |
2019 | return; |
2020 | |
2021 | if (!pci_pipe->buf_sz) |
2022 | return; |
2023 | |
2024 | for (i = 0; i < ce_ring->nentries; i++) { |
2025 | skb = ce_ring->per_transfer_context[i]; |
2026 | if (!skb) |
2027 | continue; |
2028 | |
2029 | ce_ring->per_transfer_context[i] = NULL; |
2030 | |
2031 | ath10k_htc_tx_completion_handler(ar, skb); |
2032 | } |
2033 | } |
2034 | |
2035 | /* |
2036 | * Cleanup residual buffers for device shutdown: |
2037 | * buffers that were enqueued for receive |
2038 | * buffers that were to be sent |
2039 | * Note: Buffers that had completed but which were |
2040 | * not yet processed are on a completion queue. They |
2041 | * are handled when the completion thread shuts down. |
2042 | */ |
2043 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) |
2044 | { |
2045 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2046 | int pipe_num; |
2047 | |
2048 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
2049 | struct ath10k_pci_pipe *pipe_info; |
2050 | |
2051 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
2052 | ath10k_pci_rx_pipe_cleanup(pci_pipe: pipe_info); |
2053 | ath10k_pci_tx_pipe_cleanup(pci_pipe: pipe_info); |
2054 | } |
2055 | } |
2056 | |
2057 | void ath10k_pci_ce_deinit(struct ath10k *ar) |
2058 | { |
2059 | int i; |
2060 | |
2061 | for (i = 0; i < CE_COUNT; i++) |
2062 | ath10k_ce_deinit_pipe(ar, ce_id: i); |
2063 | } |
2064 | |
2065 | void ath10k_pci_flush(struct ath10k *ar) |
2066 | { |
2067 | ath10k_pci_rx_retry_sync(ar); |
2068 | ath10k_pci_buffer_cleanup(ar); |
2069 | } |
2070 | |
2071 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
2072 | { |
2073 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2074 | unsigned long flags; |
2075 | |
2076 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n" ); |
2077 | |
2078 | ath10k_pci_irq_disable(ar); |
2079 | ath10k_pci_irq_sync(ar); |
2080 | |
2081 | ath10k_core_napi_sync_disable(ar); |
2082 | |
2083 | cancel_work_sync(work: &ar_pci->dump_work); |
2084 | |
2085 | /* Most likely the device has HTT Rx ring configured. The only way to |
2086 | * prevent the device from accessing (and possible corrupting) host |
2087 | * memory is to reset the chip now. |
2088 | * |
2089 | * There's also no known way of masking MSI interrupts on the device. |
2090 | * For ranged MSI the CE-related interrupts can be masked. However |
2091 | * regardless how many MSI interrupts are assigned the first one |
2092 | * is always used for firmware indications (crashes) and cannot be |
2093 | * masked. To prevent the device from asserting the interrupt reset it |
2094 | * before proceeding with cleanup. |
2095 | */ |
2096 | ath10k_pci_safe_chip_reset(ar); |
2097 | |
2098 | ath10k_pci_flush(ar); |
2099 | |
2100 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
2101 | WARN_ON(ar_pci->ps_wake_refcount > 0); |
2102 | spin_unlock_irqrestore(lock: &ar_pci->ps_lock, flags); |
2103 | } |
2104 | |
2105 | int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, |
2106 | void *req, u32 req_len, |
2107 | void *resp, u32 *resp_len) |
2108 | { |
2109 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2110 | struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; |
2111 | struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; |
2112 | struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; |
2113 | struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; |
2114 | dma_addr_t req_paddr = 0; |
2115 | dma_addr_t resp_paddr = 0; |
2116 | struct bmi_xfer xfer = {}; |
2117 | void *treq, *tresp = NULL; |
2118 | int ret = 0; |
2119 | |
2120 | might_sleep(); |
2121 | |
2122 | if (resp && !resp_len) |
2123 | return -EINVAL; |
2124 | |
2125 | if (resp && resp_len && *resp_len == 0) |
2126 | return -EINVAL; |
2127 | |
2128 | treq = kmemdup(p: req, size: req_len, GFP_KERNEL); |
2129 | if (!treq) |
2130 | return -ENOMEM; |
2131 | |
2132 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); |
2133 | ret = dma_mapping_error(dev: ar->dev, dma_addr: req_paddr); |
2134 | if (ret) { |
2135 | ret = -EIO; |
2136 | goto err_dma; |
2137 | } |
2138 | |
2139 | if (resp && resp_len) { |
2140 | tresp = kzalloc(size: *resp_len, GFP_KERNEL); |
2141 | if (!tresp) { |
2142 | ret = -ENOMEM; |
2143 | goto err_req; |
2144 | } |
2145 | |
2146 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, |
2147 | DMA_FROM_DEVICE); |
2148 | ret = dma_mapping_error(dev: ar->dev, dma_addr: resp_paddr); |
2149 | if (ret) { |
2150 | ret = -EIO; |
2151 | goto err_req; |
2152 | } |
2153 | |
2154 | xfer.wait_for_resp = true; |
2155 | xfer.resp_len = 0; |
2156 | |
2157 | ath10k_ce_rx_post_buf(pipe: ce_rx, ctx: &xfer, paddr: resp_paddr); |
2158 | } |
2159 | |
2160 | ret = ath10k_ce_send(ce_state: ce_tx, per_transfer_send_context: &xfer, buffer: req_paddr, nbytes: req_len, transfer_id: -1, flags: 0); |
2161 | if (ret) |
2162 | goto err_resp; |
2163 | |
2164 | ret = ath10k_pci_bmi_wait(ar, tx_pipe: ce_tx, rx_pipe: ce_rx, xfer: &xfer); |
2165 | if (ret) { |
2166 | dma_addr_t unused_buffer; |
2167 | unsigned int unused_nbytes; |
2168 | unsigned int unused_id; |
2169 | |
2170 | ath10k_ce_cancel_send_next(ce_state: ce_tx, NULL, bufferp: &unused_buffer, |
2171 | nbytesp: &unused_nbytes, transfer_idp: &unused_id); |
2172 | } else { |
2173 | /* non-zero means we did not time out */ |
2174 | ret = 0; |
2175 | } |
2176 | |
2177 | err_resp: |
2178 | if (resp) { |
2179 | dma_addr_t unused_buffer; |
2180 | |
2181 | ath10k_ce_revoke_recv_next(ce_state: ce_rx, NULL, bufferp: &unused_buffer); |
2182 | dma_unmap_single(ar->dev, resp_paddr, |
2183 | *resp_len, DMA_FROM_DEVICE); |
2184 | } |
2185 | err_req: |
2186 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); |
2187 | |
2188 | if (ret == 0 && resp_len) { |
2189 | *resp_len = min(*resp_len, xfer.resp_len); |
2190 | memcpy(resp, tresp, *resp_len); |
2191 | } |
2192 | err_dma: |
2193 | kfree(objp: treq); |
2194 | kfree(objp: tresp); |
2195 | |
2196 | return ret; |
2197 | } |
2198 | |
2199 | static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) |
2200 | { |
2201 | struct bmi_xfer *xfer; |
2202 | |
2203 | if (ath10k_ce_completed_send_next(ce_state, per_transfer_contextp: (void **)&xfer)) |
2204 | return; |
2205 | |
2206 | xfer->tx_done = true; |
2207 | } |
2208 | |
2209 | static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) |
2210 | { |
2211 | struct ath10k *ar = ce_state->ar; |
2212 | struct bmi_xfer *xfer; |
2213 | unsigned int nbytes; |
2214 | |
2215 | if (ath10k_ce_completed_recv_next(ce_state, per_transfer_contextp: (void **)&xfer, |
2216 | nbytesp: &nbytes)) |
2217 | return; |
2218 | |
2219 | if (WARN_ON_ONCE(!xfer)) |
2220 | return; |
2221 | |
2222 | if (!xfer->wait_for_resp) { |
2223 | ath10k_warn(ar, fmt: "unexpected: BMI data received; ignoring\n" ); |
2224 | return; |
2225 | } |
2226 | |
2227 | xfer->resp_len = nbytes; |
2228 | xfer->rx_done = true; |
2229 | } |
2230 | |
2231 | static int ath10k_pci_bmi_wait(struct ath10k *ar, |
2232 | struct ath10k_ce_pipe *tx_pipe, |
2233 | struct ath10k_ce_pipe *rx_pipe, |
2234 | struct bmi_xfer *xfer) |
2235 | { |
2236 | unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; |
2237 | unsigned long started = jiffies; |
2238 | unsigned long dur; |
2239 | int ret; |
2240 | |
2241 | while (time_before_eq(jiffies, timeout)) { |
2242 | ath10k_pci_bmi_send_done(ce_state: tx_pipe); |
2243 | ath10k_pci_bmi_recv_data(ce_state: rx_pipe); |
2244 | |
2245 | if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { |
2246 | ret = 0; |
2247 | goto out; |
2248 | } |
2249 | |
2250 | schedule(); |
2251 | } |
2252 | |
2253 | ret = -ETIMEDOUT; |
2254 | |
2255 | out: |
2256 | dur = jiffies - started; |
2257 | if (dur > HZ) |
2258 | ath10k_dbg(ar, ATH10K_DBG_BMI, |
2259 | "bmi cmd took %lu jiffies hz %d ret %d\n" , |
2260 | dur, HZ, ret); |
2261 | return ret; |
2262 | } |
2263 | |
2264 | /* |
2265 | * Send an interrupt to the device to wake up the Target CPU |
2266 | * so it has an opportunity to notice any changed state. |
2267 | */ |
2268 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) |
2269 | { |
2270 | u32 addr, val; |
2271 | |
2272 | addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; |
2273 | val = ath10k_pci_read32(ar, offset: addr); |
2274 | val |= CORE_CTRL_CPU_INTR_MASK; |
2275 | ath10k_pci_write32(ar, offset: addr, value: val); |
2276 | |
2277 | return 0; |
2278 | } |
2279 | |
2280 | static int ath10k_pci_get_num_banks(struct ath10k *ar) |
2281 | { |
2282 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2283 | |
2284 | switch (ar_pci->pdev->device) { |
2285 | case QCA988X_2_0_DEVICE_ID_UBNT: |
2286 | case QCA988X_2_0_DEVICE_ID: |
2287 | case QCA99X0_2_0_DEVICE_ID: |
2288 | case QCA9888_2_0_DEVICE_ID: |
2289 | case QCA9984_1_0_DEVICE_ID: |
2290 | case QCA9887_1_0_DEVICE_ID: |
2291 | return 1; |
2292 | case QCA6164_2_1_DEVICE_ID: |
2293 | case QCA6174_2_1_DEVICE_ID: |
2294 | switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { |
2295 | case QCA6174_HW_1_0_CHIP_ID_REV: |
2296 | case QCA6174_HW_1_1_CHIP_ID_REV: |
2297 | case QCA6174_HW_2_1_CHIP_ID_REV: |
2298 | case QCA6174_HW_2_2_CHIP_ID_REV: |
2299 | return 3; |
2300 | case QCA6174_HW_1_3_CHIP_ID_REV: |
2301 | return 2; |
2302 | case QCA6174_HW_3_0_CHIP_ID_REV: |
2303 | case QCA6174_HW_3_1_CHIP_ID_REV: |
2304 | case QCA6174_HW_3_2_CHIP_ID_REV: |
2305 | return 9; |
2306 | } |
2307 | break; |
2308 | case QCA9377_1_0_DEVICE_ID: |
2309 | return 9; |
2310 | } |
2311 | |
2312 | ath10k_warn(ar, fmt: "unknown number of banks, assuming 1\n" ); |
2313 | return 1; |
2314 | } |
2315 | |
2316 | static int ath10k_bus_get_num_banks(struct ath10k *ar) |
2317 | { |
2318 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
2319 | |
2320 | return ce->bus_ops->get_num_banks(ar); |
2321 | } |
2322 | |
2323 | int ath10k_pci_init_config(struct ath10k *ar) |
2324 | { |
2325 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2326 | u32 interconnect_targ_addr; |
2327 | u32 pcie_state_targ_addr = 0; |
2328 | u32 pipe_cfg_targ_addr = 0; |
2329 | u32 svc_to_pipe_map = 0; |
2330 | u32 pcie_config_flags = 0; |
2331 | u32 ealloc_value; |
2332 | u32 ealloc_targ_addr; |
2333 | u32 flag2_value; |
2334 | u32 flag2_targ_addr; |
2335 | int ret = 0; |
2336 | |
2337 | /* Download to Target the CE Config and the service-to-CE map */ |
2338 | interconnect_targ_addr = |
2339 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); |
2340 | |
2341 | /* Supply Target-side CE configuration */ |
2342 | ret = ath10k_pci_diag_read32(ar, address: interconnect_targ_addr, |
2343 | value: &pcie_state_targ_addr); |
2344 | if (ret != 0) { |
2345 | ath10k_err(ar, fmt: "Failed to get pcie state addr: %d\n" , ret); |
2346 | return ret; |
2347 | } |
2348 | |
2349 | if (pcie_state_targ_addr == 0) { |
2350 | ret = -EIO; |
2351 | ath10k_err(ar, fmt: "Invalid pcie state addr\n" ); |
2352 | return ret; |
2353 | } |
2354 | |
2355 | ret = ath10k_pci_diag_read32(ar, address: (pcie_state_targ_addr + |
2356 | offsetof(struct pcie_state, |
2357 | pipe_cfg_addr)), |
2358 | value: &pipe_cfg_targ_addr); |
2359 | if (ret != 0) { |
2360 | ath10k_err(ar, fmt: "Failed to get pipe cfg addr: %d\n" , ret); |
2361 | return ret; |
2362 | } |
2363 | |
2364 | if (pipe_cfg_targ_addr == 0) { |
2365 | ret = -EIO; |
2366 | ath10k_err(ar, fmt: "Invalid pipe cfg addr\n" ); |
2367 | return ret; |
2368 | } |
2369 | |
2370 | ret = ath10k_pci_diag_write_mem(ar, address: pipe_cfg_targ_addr, |
2371 | data: ar_pci->pipe_config, |
2372 | nbytes: sizeof(struct ce_pipe_config) * |
2373 | NUM_TARGET_CE_CONFIG_WLAN); |
2374 | |
2375 | if (ret != 0) { |
2376 | ath10k_err(ar, fmt: "Failed to write pipe cfg: %d\n" , ret); |
2377 | return ret; |
2378 | } |
2379 | |
2380 | ret = ath10k_pci_diag_read32(ar, address: (pcie_state_targ_addr + |
2381 | offsetof(struct pcie_state, |
2382 | svc_to_pipe_map)), |
2383 | value: &svc_to_pipe_map); |
2384 | if (ret != 0) { |
2385 | ath10k_err(ar, fmt: "Failed to get svc/pipe map: %d\n" , ret); |
2386 | return ret; |
2387 | } |
2388 | |
2389 | if (svc_to_pipe_map == 0) { |
2390 | ret = -EIO; |
2391 | ath10k_err(ar, fmt: "Invalid svc_to_pipe map\n" ); |
2392 | return ret; |
2393 | } |
2394 | |
2395 | ret = ath10k_pci_diag_write_mem(ar, address: svc_to_pipe_map, |
2396 | data: ar_pci->serv_to_pipe, |
2397 | nbytes: sizeof(pci_target_service_to_ce_map_wlan)); |
2398 | if (ret != 0) { |
2399 | ath10k_err(ar, fmt: "Failed to write svc/pipe map: %d\n" , ret); |
2400 | return ret; |
2401 | } |
2402 | |
2403 | ret = ath10k_pci_diag_read32(ar, address: (pcie_state_targ_addr + |
2404 | offsetof(struct pcie_state, |
2405 | config_flags)), |
2406 | value: &pcie_config_flags); |
2407 | if (ret != 0) { |
2408 | ath10k_err(ar, fmt: "Failed to get pcie config_flags: %d\n" , ret); |
2409 | return ret; |
2410 | } |
2411 | |
2412 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; |
2413 | |
2414 | ret = ath10k_pci_diag_write32(ar, address: (pcie_state_targ_addr + |
2415 | offsetof(struct pcie_state, |
2416 | config_flags)), |
2417 | value: pcie_config_flags); |
2418 | if (ret != 0) { |
2419 | ath10k_err(ar, fmt: "Failed to write pcie config_flags: %d\n" , ret); |
2420 | return ret; |
2421 | } |
2422 | |
2423 | /* configure early allocation */ |
2424 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); |
2425 | |
2426 | ret = ath10k_pci_diag_read32(ar, address: ealloc_targ_addr, value: &ealloc_value); |
2427 | if (ret != 0) { |
2428 | ath10k_err(ar, fmt: "Failed to get early alloc val: %d\n" , ret); |
2429 | return ret; |
2430 | } |
2431 | |
2432 | /* first bank is switched to IRAM */ |
2433 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & |
2434 | HI_EARLY_ALLOC_MAGIC_MASK); |
2435 | ealloc_value |= ((ath10k_bus_get_num_banks(ar) << |
2436 | HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & |
2437 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); |
2438 | |
2439 | ret = ath10k_pci_diag_write32(ar, address: ealloc_targ_addr, value: ealloc_value); |
2440 | if (ret != 0) { |
2441 | ath10k_err(ar, fmt: "Failed to set early alloc val: %d\n" , ret); |
2442 | return ret; |
2443 | } |
2444 | |
2445 | /* Tell Target to proceed with initialization */ |
2446 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); |
2447 | |
2448 | ret = ath10k_pci_diag_read32(ar, address: flag2_targ_addr, value: &flag2_value); |
2449 | if (ret != 0) { |
2450 | ath10k_err(ar, fmt: "Failed to get option val: %d\n" , ret); |
2451 | return ret; |
2452 | } |
2453 | |
2454 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; |
2455 | |
2456 | ret = ath10k_pci_diag_write32(ar, address: flag2_targ_addr, value: flag2_value); |
2457 | if (ret != 0) { |
2458 | ath10k_err(ar, fmt: "Failed to set option val: %d\n" , ret); |
2459 | return ret; |
2460 | } |
2461 | |
2462 | return 0; |
2463 | } |
2464 | |
2465 | static void ath10k_pci_override_ce_config(struct ath10k *ar) |
2466 | { |
2467 | struct ce_attr *attr; |
2468 | struct ce_pipe_config *config; |
2469 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2470 | |
2471 | /* For QCA6174 we're overriding the Copy Engine 5 configuration, |
2472 | * since it is currently used for other feature. |
2473 | */ |
2474 | |
2475 | /* Override Host's Copy Engine 5 configuration */ |
2476 | attr = &ar_pci->attr[5]; |
2477 | attr->src_sz_max = 0; |
2478 | attr->dest_nentries = 0; |
2479 | |
2480 | /* Override Target firmware's Copy Engine configuration */ |
2481 | config = &ar_pci->pipe_config[5]; |
2482 | config->pipedir = __cpu_to_le32(PIPEDIR_OUT); |
2483 | config->nbytes_max = __cpu_to_le32(2048); |
2484 | |
2485 | /* Map from service/endpoint to Copy Engine */ |
2486 | ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1); |
2487 | } |
2488 | |
2489 | int ath10k_pci_alloc_pipes(struct ath10k *ar) |
2490 | { |
2491 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2492 | struct ath10k_pci_pipe *pipe; |
2493 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
2494 | int i, ret; |
2495 | |
2496 | for (i = 0; i < CE_COUNT; i++) { |
2497 | pipe = &ar_pci->pipe_info[i]; |
2498 | pipe->ce_hdl = &ce->ce_states[i]; |
2499 | pipe->pipe_num = i; |
2500 | pipe->hif_ce_state = ar; |
2501 | |
2502 | ret = ath10k_ce_alloc_pipe(ar, ce_id: i, attr: &ar_pci->attr[i]); |
2503 | if (ret) { |
2504 | ath10k_err(ar, fmt: "failed to allocate copy engine pipe %d: %d\n" , |
2505 | i, ret); |
2506 | return ret; |
2507 | } |
2508 | |
2509 | /* Last CE is Diagnostic Window */ |
2510 | if (i == CE_DIAG_PIPE) { |
2511 | ar_pci->ce_diag = pipe->ce_hdl; |
2512 | continue; |
2513 | } |
2514 | |
2515 | pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max); |
2516 | } |
2517 | |
2518 | return 0; |
2519 | } |
2520 | |
2521 | void ath10k_pci_free_pipes(struct ath10k *ar) |
2522 | { |
2523 | int i; |
2524 | |
2525 | for (i = 0; i < CE_COUNT; i++) |
2526 | ath10k_ce_free_pipe(ar, ce_id: i); |
2527 | } |
2528 | |
2529 | int ath10k_pci_init_pipes(struct ath10k *ar) |
2530 | { |
2531 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2532 | int i, ret; |
2533 | |
2534 | for (i = 0; i < CE_COUNT; i++) { |
2535 | ret = ath10k_ce_init_pipe(ar, ce_id: i, attr: &ar_pci->attr[i]); |
2536 | if (ret) { |
2537 | ath10k_err(ar, fmt: "failed to initialize copy engine pipe %d: %d\n" , |
2538 | i, ret); |
2539 | return ret; |
2540 | } |
2541 | } |
2542 | |
2543 | return 0; |
2544 | } |
2545 | |
2546 | static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) |
2547 | { |
2548 | return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & |
2549 | FW_IND_EVENT_PENDING; |
2550 | } |
2551 | |
2552 | static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) |
2553 | { |
2554 | u32 val; |
2555 | |
2556 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); |
2557 | val &= ~FW_IND_EVENT_PENDING; |
2558 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, value: val); |
2559 | } |
2560 | |
2561 | static bool ath10k_pci_has_device_gone(struct ath10k *ar) |
2562 | { |
2563 | u32 val; |
2564 | |
2565 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); |
2566 | return (val == 0xffffffff); |
2567 | } |
2568 | |
2569 | /* this function effectively clears target memory controller assert line */ |
2570 | static void ath10k_pci_warm_reset_si0(struct ath10k *ar) |
2571 | { |
2572 | u32 val; |
2573 | |
2574 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2575 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, |
2576 | val: val | SOC_RESET_CONTROL_SI0_RST_MASK); |
2577 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2578 | |
2579 | msleep(msecs: 10); |
2580 | |
2581 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2582 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, |
2583 | val: val & ~SOC_RESET_CONTROL_SI0_RST_MASK); |
2584 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2585 | |
2586 | msleep(msecs: 10); |
2587 | } |
2588 | |
2589 | static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) |
2590 | { |
2591 | u32 val; |
2592 | |
2593 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, value: 0); |
2594 | |
2595 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2596 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, |
2597 | val: val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); |
2598 | } |
2599 | |
2600 | static void ath10k_pci_warm_reset_ce(struct ath10k *ar) |
2601 | { |
2602 | u32 val; |
2603 | |
2604 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); |
2605 | |
2606 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, |
2607 | val: val | SOC_RESET_CONTROL_CE_RST_MASK); |
2608 | msleep(msecs: 10); |
2609 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, |
2610 | val: val & ~SOC_RESET_CONTROL_CE_RST_MASK); |
2611 | } |
2612 | |
2613 | static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) |
2614 | { |
2615 | u32 val; |
2616 | |
2617 | val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS); |
2618 | ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS, |
2619 | val: val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); |
2620 | } |
2621 | |
2622 | static int ath10k_pci_warm_reset(struct ath10k *ar) |
2623 | { |
2624 | int ret; |
2625 | |
2626 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n" ); |
2627 | |
2628 | spin_lock_bh(lock: &ar->data_lock); |
2629 | ar->stats.fw_warm_reset_counter++; |
2630 | spin_unlock_bh(lock: &ar->data_lock); |
2631 | |
2632 | ath10k_pci_irq_disable(ar); |
2633 | |
2634 | /* Make sure the target CPU is not doing anything dangerous, e.g. if it |
2635 | * were to access copy engine while host performs copy engine reset |
2636 | * then it is possible for the device to confuse pci-e controller to |
2637 | * the point of bringing host system to a complete stop (i.e. hang). |
2638 | */ |
2639 | ath10k_pci_warm_reset_si0(ar); |
2640 | ath10k_pci_warm_reset_cpu(ar); |
2641 | ath10k_pci_init_pipes(ar); |
2642 | ath10k_pci_wait_for_target_init(ar); |
2643 | |
2644 | ath10k_pci_warm_reset_clear_lf(ar); |
2645 | ath10k_pci_warm_reset_ce(ar); |
2646 | ath10k_pci_warm_reset_cpu(ar); |
2647 | ath10k_pci_init_pipes(ar); |
2648 | |
2649 | ret = ath10k_pci_wait_for_target_init(ar); |
2650 | if (ret) { |
2651 | ath10k_warn(ar, fmt: "failed to wait for target init: %d\n" , ret); |
2652 | return ret; |
2653 | } |
2654 | |
2655 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n" ); |
2656 | |
2657 | return 0; |
2658 | } |
2659 | |
2660 | static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) |
2661 | { |
2662 | ath10k_pci_irq_disable(ar); |
2663 | return ath10k_pci_qca99x0_chip_reset(ar); |
2664 | } |
2665 | |
2666 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar) |
2667 | { |
2668 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2669 | |
2670 | if (!ar_pci->pci_soft_reset) |
2671 | return -EOPNOTSUPP; |
2672 | |
2673 | return ar_pci->pci_soft_reset(ar); |
2674 | } |
2675 | |
2676 | static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) |
2677 | { |
2678 | int i, ret; |
2679 | u32 val; |
2680 | |
2681 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n" ); |
2682 | |
2683 | /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. |
2684 | * It is thus preferred to use warm reset which is safer but may not be |
2685 | * able to recover the device from all possible fail scenarios. |
2686 | * |
2687 | * Warm reset doesn't always work on first try so attempt it a few |
2688 | * times before giving up. |
2689 | */ |
2690 | for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { |
2691 | ret = ath10k_pci_warm_reset(ar); |
2692 | if (ret) { |
2693 | ath10k_warn(ar, fmt: "failed to warm reset attempt %d of %d: %d\n" , |
2694 | i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, |
2695 | ret); |
2696 | continue; |
2697 | } |
2698 | |
2699 | /* FIXME: Sometimes copy engine doesn't recover after warm |
2700 | * reset. In most cases this needs cold reset. In some of these |
2701 | * cases the device is in such a state that a cold reset may |
2702 | * lock up the host. |
2703 | * |
2704 | * Reading any host interest register via copy engine is |
2705 | * sufficient to verify if device is capable of booting |
2706 | * firmware blob. |
2707 | */ |
2708 | ret = ath10k_pci_init_pipes(ar); |
2709 | if (ret) { |
2710 | ath10k_warn(ar, fmt: "failed to init copy engine: %d\n" , |
2711 | ret); |
2712 | continue; |
2713 | } |
2714 | |
2715 | ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, |
2716 | value: &val); |
2717 | if (ret) { |
2718 | ath10k_warn(ar, fmt: "failed to poke copy engine: %d\n" , |
2719 | ret); |
2720 | continue; |
2721 | } |
2722 | |
2723 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n" ); |
2724 | return 0; |
2725 | } |
2726 | |
2727 | if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { |
2728 | ath10k_warn(ar, fmt: "refusing cold reset as requested\n" ); |
2729 | return -EPERM; |
2730 | } |
2731 | |
2732 | ret = ath10k_pci_cold_reset(ar); |
2733 | if (ret) { |
2734 | ath10k_warn(ar, fmt: "failed to cold reset: %d\n" , ret); |
2735 | return ret; |
2736 | } |
2737 | |
2738 | ret = ath10k_pci_wait_for_target_init(ar); |
2739 | if (ret) { |
2740 | ath10k_warn(ar, fmt: "failed to wait for target after cold reset: %d\n" , |
2741 | ret); |
2742 | return ret; |
2743 | } |
2744 | |
2745 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n" ); |
2746 | |
2747 | return 0; |
2748 | } |
2749 | |
2750 | static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) |
2751 | { |
2752 | int ret; |
2753 | |
2754 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n" ); |
2755 | |
2756 | /* FIXME: QCA6174 requires cold + warm reset to work. */ |
2757 | |
2758 | ret = ath10k_pci_cold_reset(ar); |
2759 | if (ret) { |
2760 | ath10k_warn(ar, fmt: "failed to cold reset: %d\n" , ret); |
2761 | return ret; |
2762 | } |
2763 | |
2764 | ret = ath10k_pci_wait_for_target_init(ar); |
2765 | if (ret) { |
2766 | ath10k_warn(ar, fmt: "failed to wait for target after cold reset: %d\n" , |
2767 | ret); |
2768 | return ret; |
2769 | } |
2770 | |
2771 | ret = ath10k_pci_warm_reset(ar); |
2772 | if (ret) { |
2773 | ath10k_warn(ar, fmt: "failed to warm reset: %d\n" , ret); |
2774 | return ret; |
2775 | } |
2776 | |
2777 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n" ); |
2778 | |
2779 | return 0; |
2780 | } |
2781 | |
2782 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) |
2783 | { |
2784 | int ret; |
2785 | |
2786 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n" ); |
2787 | |
2788 | ret = ath10k_pci_cold_reset(ar); |
2789 | if (ret) { |
2790 | ath10k_warn(ar, fmt: "failed to cold reset: %d\n" , ret); |
2791 | return ret; |
2792 | } |
2793 | |
2794 | ret = ath10k_pci_wait_for_target_init(ar); |
2795 | if (ret) { |
2796 | ath10k_warn(ar, fmt: "failed to wait for target after cold reset: %d\n" , |
2797 | ret); |
2798 | return ret; |
2799 | } |
2800 | |
2801 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n" ); |
2802 | |
2803 | return 0; |
2804 | } |
2805 | |
2806 | static int ath10k_pci_chip_reset(struct ath10k *ar) |
2807 | { |
2808 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2809 | |
2810 | if (WARN_ON(!ar_pci->pci_hard_reset)) |
2811 | return -EOPNOTSUPP; |
2812 | |
2813 | return ar_pci->pci_hard_reset(ar); |
2814 | } |
2815 | |
2816 | static int ath10k_pci_hif_power_up(struct ath10k *ar, |
2817 | enum ath10k_firmware_mode fw_mode) |
2818 | { |
2819 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2820 | int ret; |
2821 | |
2822 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n" ); |
2823 | |
2824 | pcie_capability_read_word(dev: ar_pci->pdev, PCI_EXP_LNKCTL, |
2825 | val: &ar_pci->link_ctl); |
2826 | pcie_capability_clear_word(dev: ar_pci->pdev, PCI_EXP_LNKCTL, |
2827 | PCI_EXP_LNKCTL_ASPMC); |
2828 | |
2829 | /* |
2830 | * Bring the target up cleanly. |
2831 | * |
2832 | * The target may be in an undefined state with an AUX-powered Target |
2833 | * and a Host in WoW mode. If the Host crashes, loses power, or is |
2834 | * restarted (without unloading the driver) then the Target is left |
2835 | * (aux) powered and running. On a subsequent driver load, the Target |
2836 | * is in an unexpected state. We try to catch that here in order to |
2837 | * reset the Target and retry the probe. |
2838 | */ |
2839 | ret = ath10k_pci_chip_reset(ar); |
2840 | if (ret) { |
2841 | if (ath10k_pci_has_fw_crashed(ar)) { |
2842 | ath10k_warn(ar, fmt: "firmware crashed during chip reset\n" ); |
2843 | ath10k_pci_fw_crashed_clear(ar); |
2844 | ath10k_pci_fw_crashed_dump(ar); |
2845 | } |
2846 | |
2847 | ath10k_err(ar, fmt: "failed to reset chip: %d\n" , ret); |
2848 | goto err_sleep; |
2849 | } |
2850 | |
2851 | ret = ath10k_pci_init_pipes(ar); |
2852 | if (ret) { |
2853 | ath10k_err(ar, fmt: "failed to initialize CE: %d\n" , ret); |
2854 | goto err_sleep; |
2855 | } |
2856 | |
2857 | ret = ath10k_pci_init_config(ar); |
2858 | if (ret) { |
2859 | ath10k_err(ar, fmt: "failed to setup init config: %d\n" , ret); |
2860 | goto err_ce; |
2861 | } |
2862 | |
2863 | ret = ath10k_pci_wake_target_cpu(ar); |
2864 | if (ret) { |
2865 | ath10k_err(ar, fmt: "could not wake up target CPU: %d\n" , ret); |
2866 | goto err_ce; |
2867 | } |
2868 | |
2869 | return 0; |
2870 | |
2871 | err_ce: |
2872 | ath10k_pci_ce_deinit(ar); |
2873 | |
2874 | err_sleep: |
2875 | return ret; |
2876 | } |
2877 | |
2878 | void ath10k_pci_hif_power_down(struct ath10k *ar) |
2879 | { |
2880 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n" ); |
2881 | |
2882 | /* Currently hif_power_up performs effectively a reset and hif_stop |
2883 | * resets the chip as well so there's no point in resetting here. |
2884 | */ |
2885 | } |
2886 | |
2887 | static int ath10k_pci_hif_suspend(struct ath10k *ar) |
2888 | { |
2889 | /* Nothing to do; the important stuff is in the driver suspend. */ |
2890 | return 0; |
2891 | } |
2892 | |
2893 | static int ath10k_pci_suspend(struct ath10k *ar) |
2894 | { |
2895 | /* The grace timer can still be counting down and ar->ps_awake be true. |
2896 | * It is known that the device may be asleep after resuming regardless |
2897 | * of the SoC powersave state before suspending. Hence make sure the |
2898 | * device is asleep before proceeding. |
2899 | */ |
2900 | ath10k_pci_sleep_sync(ar); |
2901 | |
2902 | return 0; |
2903 | } |
2904 | |
2905 | static int ath10k_pci_hif_resume(struct ath10k *ar) |
2906 | { |
2907 | /* Nothing to do; the important stuff is in the driver resume. */ |
2908 | return 0; |
2909 | } |
2910 | |
2911 | static int ath10k_pci_resume(struct ath10k *ar) |
2912 | { |
2913 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2914 | struct pci_dev *pdev = ar_pci->pdev; |
2915 | u32 val; |
2916 | int ret = 0; |
2917 | |
2918 | ret = ath10k_pci_force_wake(ar); |
2919 | if (ret) { |
2920 | ath10k_err(ar, fmt: "failed to wake up target: %d\n" , ret); |
2921 | return ret; |
2922 | } |
2923 | |
2924 | /* Suspend/Resume resets the PCI configuration space, so we have to |
2925 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries |
2926 | * from interfering with C3 CPU state. pci_restore_state won't help |
2927 | * here since it only restores the first 64 bytes pci config header. |
2928 | */ |
2929 | pci_read_config_dword(dev: pdev, where: 0x40, val: &val); |
2930 | if ((val & 0x0000ff00) != 0) |
2931 | pci_write_config_dword(dev: pdev, where: 0x40, val: val & 0xffff00ff); |
2932 | |
2933 | return ret; |
2934 | } |
2935 | |
2936 | static bool ath10k_pci_validate_cal(void *data, size_t size) |
2937 | { |
2938 | __le16 *cal_words = data; |
2939 | u16 checksum = 0; |
2940 | size_t i; |
2941 | |
2942 | if (size % 2 != 0) |
2943 | return false; |
2944 | |
2945 | for (i = 0; i < size / 2; i++) |
2946 | checksum ^= le16_to_cpu(cal_words[i]); |
2947 | |
2948 | return checksum == 0xffff; |
2949 | } |
2950 | |
2951 | static void ath10k_pci_enable_eeprom(struct ath10k *ar) |
2952 | { |
2953 | /* Enable SI clock */ |
2954 | ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, val: 0x0); |
2955 | |
2956 | /* Configure GPIOs for I2C operation */ |
2957 | ath10k_pci_write32(ar, |
2958 | GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + |
2959 | 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, |
2960 | SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, |
2961 | GPIO_PIN0_CONFIG) | |
2962 | SM(1, GPIO_PIN0_PAD_PULL)); |
2963 | |
2964 | ath10k_pci_write32(ar, |
2965 | GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + |
2966 | 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, |
2967 | SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | |
2968 | SM(1, GPIO_PIN0_PAD_PULL)); |
2969 | |
2970 | ath10k_pci_write32(ar, |
2971 | GPIO_BASE_ADDRESS + |
2972 | QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, |
2973 | value: 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); |
2974 | |
2975 | /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ |
2976 | ath10k_pci_write32(ar, |
2977 | SI_BASE_ADDRESS + SI_CONFIG_OFFSET, |
2978 | SM(1, SI_CONFIG_ERR_INT) | |
2979 | SM(1, SI_CONFIG_BIDIR_OD_DATA) | |
2980 | SM(1, SI_CONFIG_I2C) | |
2981 | SM(1, SI_CONFIG_POS_SAMPLE) | |
2982 | SM(1, SI_CONFIG_INACTIVE_DATA) | |
2983 | SM(1, SI_CONFIG_INACTIVE_CLK) | |
2984 | SM(8, SI_CONFIG_DIVIDER)); |
2985 | } |
2986 | |
2987 | static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) |
2988 | { |
2989 | u32 reg; |
2990 | int wait_limit; |
2991 | |
2992 | /* set device select byte and for the read operation */ |
2993 | reg = QCA9887_EEPROM_SELECT_READ | |
2994 | SM(addr, QCA9887_EEPROM_ADDR_LO) | |
2995 | SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); |
2996 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, value: reg); |
2997 | |
2998 | /* write transmit data, transfer length, and START bit */ |
2999 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, |
3000 | SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | |
3001 | SM(4, SI_CS_TX_CNT)); |
3002 | |
3003 | /* wait max 1 sec */ |
3004 | wait_limit = 100000; |
3005 | |
3006 | /* wait for SI_CS_DONE_INT */ |
3007 | do { |
3008 | reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); |
3009 | if (MS(reg, SI_CS_DONE_INT)) |
3010 | break; |
3011 | |
3012 | wait_limit--; |
3013 | udelay(10); |
3014 | } while (wait_limit > 0); |
3015 | |
3016 | if (!MS(reg, SI_CS_DONE_INT)) { |
3017 | ath10k_err(ar, fmt: "timeout while reading device EEPROM at %04x\n" , |
3018 | addr); |
3019 | return -ETIMEDOUT; |
3020 | } |
3021 | |
3022 | /* clear SI_CS_DONE_INT */ |
3023 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, value: reg); |
3024 | |
3025 | if (MS(reg, SI_CS_DONE_ERR)) { |
3026 | ath10k_err(ar, fmt: "failed to read device EEPROM at %04x\n" , addr); |
3027 | return -EIO; |
3028 | } |
3029 | |
3030 | /* extract receive data */ |
3031 | reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); |
3032 | *out = reg; |
3033 | |
3034 | return 0; |
3035 | } |
3036 | |
3037 | static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, |
3038 | size_t *data_len) |
3039 | { |
3040 | u8 *caldata = NULL; |
3041 | size_t calsize, i; |
3042 | int ret; |
3043 | |
3044 | if (!QCA_REV_9887(ar)) |
3045 | return -EOPNOTSUPP; |
3046 | |
3047 | calsize = ar->hw_params.cal_data_len; |
3048 | caldata = kmalloc(size: calsize, GFP_KERNEL); |
3049 | if (!caldata) |
3050 | return -ENOMEM; |
3051 | |
3052 | ath10k_pci_enable_eeprom(ar); |
3053 | |
3054 | for (i = 0; i < calsize; i++) { |
3055 | ret = ath10k_pci_read_eeprom(ar, addr: i, out: &caldata[i]); |
3056 | if (ret) |
3057 | goto err_free; |
3058 | } |
3059 | |
3060 | if (!ath10k_pci_validate_cal(data: caldata, size: calsize)) |
3061 | goto err_free; |
3062 | |
3063 | *data = caldata; |
3064 | *data_len = calsize; |
3065 | |
3066 | return 0; |
3067 | |
3068 | err_free: |
3069 | kfree(objp: caldata); |
3070 | |
3071 | return -EINVAL; |
3072 | } |
3073 | |
3074 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { |
3075 | .tx_sg = ath10k_pci_hif_tx_sg, |
3076 | .diag_read = ath10k_pci_hif_diag_read, |
3077 | .diag_write = ath10k_pci_diag_write_mem, |
3078 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, |
3079 | .start = ath10k_pci_hif_start, |
3080 | .stop = ath10k_pci_hif_stop, |
3081 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, |
3082 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, |
3083 | .send_complete_check = ath10k_pci_hif_send_complete_check, |
3084 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, |
3085 | .power_up = ath10k_pci_hif_power_up, |
3086 | .power_down = ath10k_pci_hif_power_down, |
3087 | .read32 = ath10k_pci_read32, |
3088 | .write32 = ath10k_pci_write32, |
3089 | .suspend = ath10k_pci_hif_suspend, |
3090 | .resume = ath10k_pci_hif_resume, |
3091 | .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, |
3092 | }; |
3093 | |
3094 | /* |
3095 | * Top-level interrupt handler for all PCI interrupts from a Target. |
3096 | * When a block of MSI interrupts is allocated, this top-level handler |
3097 | * is not used; instead, we directly call the correct sub-handler. |
3098 | */ |
3099 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) |
3100 | { |
3101 | struct ath10k *ar = arg; |
3102 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3103 | int ret; |
3104 | |
3105 | if (ath10k_pci_has_device_gone(ar)) |
3106 | return IRQ_NONE; |
3107 | |
3108 | ret = ath10k_pci_force_wake(ar); |
3109 | if (ret) { |
3110 | ath10k_warn(ar, fmt: "failed to wake device up on irq: %d\n" , ret); |
3111 | return IRQ_NONE; |
3112 | } |
3113 | |
3114 | if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) && |
3115 | !ath10k_pci_irq_pending(ar)) |
3116 | return IRQ_NONE; |
3117 | |
3118 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
3119 | ath10k_pci_irq_msi_fw_mask(ar); |
3120 | napi_schedule(n: &ar->napi); |
3121 | |
3122 | return IRQ_HANDLED; |
3123 | } |
3124 | |
3125 | static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) |
3126 | { |
3127 | struct ath10k *ar = container_of(ctx, struct ath10k, napi); |
3128 | int done = 0; |
3129 | |
3130 | if (ath10k_pci_has_fw_crashed(ar)) { |
3131 | ath10k_pci_fw_crashed_clear(ar); |
3132 | ath10k_pci_fw_crashed_dump(ar); |
3133 | napi_complete(n: ctx); |
3134 | return done; |
3135 | } |
3136 | |
3137 | ath10k_ce_per_engine_service_any(ar); |
3138 | |
3139 | done = ath10k_htt_txrx_compl_task(ar, budget); |
3140 | |
3141 | if (done < budget) { |
3142 | napi_complete_done(n: ctx, work_done: done); |
3143 | /* In case of MSI, it is possible that interrupts are received |
3144 | * while NAPI poll is inprogress. So pending interrupts that are |
3145 | * received after processing all copy engine pipes by NAPI poll |
3146 | * will not be handled again. This is causing failure to |
3147 | * complete boot sequence in x86 platform. So before enabling |
3148 | * interrupts safer to check for pending interrupts for |
3149 | * immediate servicing. |
3150 | */ |
3151 | if (ath10k_ce_interrupt_summary(ar)) { |
3152 | napi_schedule(n: ctx); |
3153 | goto out; |
3154 | } |
3155 | ath10k_pci_enable_legacy_irq(ar); |
3156 | ath10k_pci_irq_msi_fw_unmask(ar); |
3157 | } |
3158 | |
3159 | out: |
3160 | return done; |
3161 | } |
3162 | |
3163 | static int ath10k_pci_request_irq_msi(struct ath10k *ar) |
3164 | { |
3165 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3166 | int ret; |
3167 | |
3168 | ret = request_irq(irq: ar_pci->pdev->irq, |
3169 | handler: ath10k_pci_interrupt_handler, |
3170 | IRQF_SHARED, name: "ath10k_pci" , dev: ar); |
3171 | if (ret) { |
3172 | ath10k_warn(ar, fmt: "failed to request MSI irq %d: %d\n" , |
3173 | ar_pci->pdev->irq, ret); |
3174 | return ret; |
3175 | } |
3176 | |
3177 | return 0; |
3178 | } |
3179 | |
3180 | static int ath10k_pci_request_irq_legacy(struct ath10k *ar) |
3181 | { |
3182 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3183 | int ret; |
3184 | |
3185 | ret = request_irq(irq: ar_pci->pdev->irq, |
3186 | handler: ath10k_pci_interrupt_handler, |
3187 | IRQF_SHARED, name: "ath10k_pci" , dev: ar); |
3188 | if (ret) { |
3189 | ath10k_warn(ar, fmt: "failed to request legacy irq %d: %d\n" , |
3190 | ar_pci->pdev->irq, ret); |
3191 | return ret; |
3192 | } |
3193 | |
3194 | return 0; |
3195 | } |
3196 | |
3197 | static int ath10k_pci_request_irq(struct ath10k *ar) |
3198 | { |
3199 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3200 | |
3201 | switch (ar_pci->oper_irq_mode) { |
3202 | case ATH10K_PCI_IRQ_LEGACY: |
3203 | return ath10k_pci_request_irq_legacy(ar); |
3204 | case ATH10K_PCI_IRQ_MSI: |
3205 | return ath10k_pci_request_irq_msi(ar); |
3206 | default: |
3207 | return -EINVAL; |
3208 | } |
3209 | } |
3210 | |
3211 | static void ath10k_pci_free_irq(struct ath10k *ar) |
3212 | { |
3213 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3214 | |
3215 | free_irq(ar_pci->pdev->irq, ar); |
3216 | } |
3217 | |
3218 | void ath10k_pci_init_napi(struct ath10k *ar) |
3219 | { |
3220 | netif_napi_add(dev: &ar->napi_dev, napi: &ar->napi, poll: ath10k_pci_napi_poll); |
3221 | } |
3222 | |
3223 | static int ath10k_pci_init_irq(struct ath10k *ar) |
3224 | { |
3225 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3226 | int ret; |
3227 | |
3228 | ath10k_pci_init_napi(ar); |
3229 | |
3230 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) |
3231 | ath10k_info(ar, fmt: "limiting irq mode to: %d\n" , |
3232 | ath10k_pci_irq_mode); |
3233 | |
3234 | /* Try MSI */ |
3235 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { |
3236 | ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; |
3237 | ret = pci_enable_msi(dev: ar_pci->pdev); |
3238 | if (ret == 0) |
3239 | return 0; |
3240 | |
3241 | /* MHI failed, try legacy irq next */ |
3242 | } |
3243 | |
3244 | /* Try legacy irq |
3245 | * |
3246 | * A potential race occurs here: The CORE_BASE write |
3247 | * depends on target correctly decoding AXI address but |
3248 | * host won't know when target writes BAR to CORE_CTRL. |
3249 | * This write might get lost if target has NOT written BAR. |
3250 | * For now, fix the race by repeating the write in below |
3251 | * synchronization checking. |
3252 | */ |
3253 | ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; |
3254 | |
3255 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
3256 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
3257 | |
3258 | return 0; |
3259 | } |
3260 | |
3261 | static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) |
3262 | { |
3263 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
3264 | value: 0); |
3265 | } |
3266 | |
3267 | static int ath10k_pci_deinit_irq(struct ath10k *ar) |
3268 | { |
3269 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3270 | |
3271 | switch (ar_pci->oper_irq_mode) { |
3272 | case ATH10K_PCI_IRQ_LEGACY: |
3273 | ath10k_pci_deinit_irq_legacy(ar); |
3274 | break; |
3275 | default: |
3276 | pci_disable_msi(dev: ar_pci->pdev); |
3277 | break; |
3278 | } |
3279 | |
3280 | return 0; |
3281 | } |
3282 | |
3283 | int ath10k_pci_wait_for_target_init(struct ath10k *ar) |
3284 | { |
3285 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3286 | unsigned long timeout; |
3287 | u32 val; |
3288 | |
3289 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n" ); |
3290 | |
3291 | timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); |
3292 | |
3293 | do { |
3294 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); |
3295 | |
3296 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n" , |
3297 | val); |
3298 | |
3299 | /* target should never return this */ |
3300 | if (val == 0xffffffff) |
3301 | continue; |
3302 | |
3303 | /* the device has crashed so don't bother trying anymore */ |
3304 | if (val & FW_IND_EVENT_PENDING) |
3305 | break; |
3306 | |
3307 | if (val & FW_IND_INITIALIZED) |
3308 | break; |
3309 | |
3310 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) |
3311 | /* Fix potential race by repeating CORE_BASE writes */ |
3312 | ath10k_pci_enable_legacy_irq(ar); |
3313 | |
3314 | mdelay(10); |
3315 | } while (time_before(jiffies, timeout)); |
3316 | |
3317 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
3318 | ath10k_pci_irq_msi_fw_mask(ar); |
3319 | |
3320 | if (val == 0xffffffff) { |
3321 | ath10k_err(ar, fmt: "failed to read device register, device is gone\n" ); |
3322 | return -EIO; |
3323 | } |
3324 | |
3325 | if (val & FW_IND_EVENT_PENDING) { |
3326 | ath10k_warn(ar, fmt: "device has crashed during init\n" ); |
3327 | return -ECOMM; |
3328 | } |
3329 | |
3330 | if (!(val & FW_IND_INITIALIZED)) { |
3331 | ath10k_err(ar, fmt: "failed to receive initialized event from target: %08x\n" , |
3332 | val); |
3333 | return -ETIMEDOUT; |
3334 | } |
3335 | |
3336 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n" ); |
3337 | return 0; |
3338 | } |
3339 | |
3340 | static int ath10k_pci_cold_reset(struct ath10k *ar) |
3341 | { |
3342 | u32 val; |
3343 | |
3344 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n" ); |
3345 | |
3346 | spin_lock_bh(lock: &ar->data_lock); |
3347 | |
3348 | ar->stats.fw_cold_reset_counter++; |
3349 | |
3350 | spin_unlock_bh(lock: &ar->data_lock); |
3351 | |
3352 | /* Put Target, including PCIe, into RESET. */ |
3353 | val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); |
3354 | val |= 1; |
3355 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
3356 | |
3357 | /* After writing into SOC_GLOBAL_RESET to put device into |
3358 | * reset and pulling out of reset pcie may not be stable |
3359 | * for any immediate pcie register access and cause bus error, |
3360 | * add delay before any pcie access request to fix this issue. |
3361 | */ |
3362 | msleep(msecs: 20); |
3363 | |
3364 | /* Pull Target, including PCIe, out of RESET. */ |
3365 | val &= ~1; |
3366 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
3367 | |
3368 | msleep(msecs: 20); |
3369 | |
3370 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n" ); |
3371 | |
3372 | return 0; |
3373 | } |
3374 | |
3375 | static int ath10k_pci_claim(struct ath10k *ar) |
3376 | { |
3377 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3378 | struct pci_dev *pdev = ar_pci->pdev; |
3379 | int ret; |
3380 | |
3381 | pci_set_drvdata(pdev, data: ar); |
3382 | |
3383 | ret = pci_enable_device(dev: pdev); |
3384 | if (ret) { |
3385 | ath10k_err(ar, fmt: "failed to enable pci device: %d\n" , ret); |
3386 | return ret; |
3387 | } |
3388 | |
3389 | ret = pci_request_region(pdev, BAR_NUM, "ath" ); |
3390 | if (ret) { |
3391 | ath10k_err(ar, fmt: "failed to request region BAR%d: %d\n" , BAR_NUM, |
3392 | ret); |
3393 | goto err_device; |
3394 | } |
3395 | |
3396 | /* Target expects 32 bit DMA. Enforce it. */ |
3397 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
3398 | if (ret) { |
3399 | ath10k_err(ar, fmt: "failed to set dma mask to 32-bit: %d\n" , ret); |
3400 | goto err_region; |
3401 | } |
3402 | |
3403 | pci_set_master(dev: pdev); |
3404 | |
3405 | /* Arrange for access to Target SoC registers. */ |
3406 | ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); |
3407 | ar_pci->mem = pci_iomap(dev: pdev, BAR_NUM, max: 0); |
3408 | if (!ar_pci->mem) { |
3409 | ath10k_err(ar, fmt: "failed to iomap BAR%d\n" , BAR_NUM); |
3410 | ret = -EIO; |
3411 | goto err_region; |
3412 | } |
3413 | |
3414 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n" , ar_pci->mem); |
3415 | return 0; |
3416 | |
3417 | err_region: |
3418 | pci_release_region(pdev, BAR_NUM); |
3419 | |
3420 | err_device: |
3421 | pci_disable_device(dev: pdev); |
3422 | |
3423 | return ret; |
3424 | } |
3425 | |
3426 | static void ath10k_pci_release(struct ath10k *ar) |
3427 | { |
3428 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3429 | struct pci_dev *pdev = ar_pci->pdev; |
3430 | |
3431 | pci_iounmap(dev: pdev, ar_pci->mem); |
3432 | pci_release_region(pdev, BAR_NUM); |
3433 | pci_disable_device(dev: pdev); |
3434 | } |
3435 | |
3436 | static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) |
3437 | { |
3438 | const struct ath10k_pci_supp_chip *supp_chip; |
3439 | int i; |
3440 | u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); |
3441 | |
3442 | for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { |
3443 | supp_chip = &ath10k_pci_supp_chips[i]; |
3444 | |
3445 | if (supp_chip->dev_id == dev_id && |
3446 | supp_chip->rev_id == rev_id) |
3447 | return true; |
3448 | } |
3449 | |
3450 | return false; |
3451 | } |
3452 | |
3453 | int ath10k_pci_setup_resource(struct ath10k *ar) |
3454 | { |
3455 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3456 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
3457 | int ret; |
3458 | |
3459 | spin_lock_init(&ce->ce_lock); |
3460 | spin_lock_init(&ar_pci->ps_lock); |
3461 | mutex_init(&ar_pci->ce_diag_mutex); |
3462 | |
3463 | INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); |
3464 | |
3465 | timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); |
3466 | |
3467 | ar_pci->attr = kmemdup(p: pci_host_ce_config_wlan, |
3468 | size: sizeof(pci_host_ce_config_wlan), |
3469 | GFP_KERNEL); |
3470 | if (!ar_pci->attr) |
3471 | return -ENOMEM; |
3472 | |
3473 | ar_pci->pipe_config = kmemdup(p: pci_target_ce_config_wlan, |
3474 | size: sizeof(pci_target_ce_config_wlan), |
3475 | GFP_KERNEL); |
3476 | if (!ar_pci->pipe_config) { |
3477 | ret = -ENOMEM; |
3478 | goto err_free_attr; |
3479 | } |
3480 | |
3481 | ar_pci->serv_to_pipe = kmemdup(p: pci_target_service_to_ce_map_wlan, |
3482 | size: sizeof(pci_target_service_to_ce_map_wlan), |
3483 | GFP_KERNEL); |
3484 | if (!ar_pci->serv_to_pipe) { |
3485 | ret = -ENOMEM; |
3486 | goto err_free_pipe_config; |
3487 | } |
3488 | |
3489 | if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) |
3490 | ath10k_pci_override_ce_config(ar); |
3491 | |
3492 | ret = ath10k_pci_alloc_pipes(ar); |
3493 | if (ret) { |
3494 | ath10k_err(ar, fmt: "failed to allocate copy engine pipes: %d\n" , |
3495 | ret); |
3496 | goto err_free_serv_to_pipe; |
3497 | } |
3498 | |
3499 | return 0; |
3500 | |
3501 | err_free_serv_to_pipe: |
3502 | kfree(objp: ar_pci->serv_to_pipe); |
3503 | err_free_pipe_config: |
3504 | kfree(objp: ar_pci->pipe_config); |
3505 | err_free_attr: |
3506 | kfree(objp: ar_pci->attr); |
3507 | return ret; |
3508 | } |
3509 | |
3510 | void ath10k_pci_release_resource(struct ath10k *ar) |
3511 | { |
3512 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3513 | |
3514 | ath10k_pci_rx_retry_sync(ar); |
3515 | netif_napi_del(napi: &ar->napi); |
3516 | ath10k_pci_ce_deinit(ar); |
3517 | ath10k_pci_free_pipes(ar); |
3518 | kfree(objp: ar_pci->attr); |
3519 | kfree(objp: ar_pci->pipe_config); |
3520 | kfree(objp: ar_pci->serv_to_pipe); |
3521 | } |
3522 | |
3523 | static const struct ath10k_bus_ops ath10k_pci_bus_ops = { |
3524 | .read32 = ath10k_bus_pci_read32, |
3525 | .write32 = ath10k_bus_pci_write32, |
3526 | .get_num_banks = ath10k_pci_get_num_banks, |
3527 | }; |
3528 | |
3529 | static int ath10k_pci_probe(struct pci_dev *pdev, |
3530 | const struct pci_device_id *pci_dev) |
3531 | { |
3532 | int ret = 0; |
3533 | struct ath10k *ar; |
3534 | struct ath10k_pci *ar_pci; |
3535 | enum ath10k_hw_rev hw_rev; |
3536 | struct ath10k_bus_params bus_params = {}; |
3537 | bool pci_ps, is_qca988x = false; |
3538 | int (*pci_soft_reset)(struct ath10k *ar); |
3539 | int (*pci_hard_reset)(struct ath10k *ar); |
3540 | u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); |
3541 | |
3542 | switch (pci_dev->device) { |
3543 | case QCA988X_2_0_DEVICE_ID_UBNT: |
3544 | case QCA988X_2_0_DEVICE_ID: |
3545 | hw_rev = ATH10K_HW_QCA988X; |
3546 | pci_ps = false; |
3547 | is_qca988x = true; |
3548 | pci_soft_reset = ath10k_pci_warm_reset; |
3549 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; |
3550 | targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; |
3551 | break; |
3552 | case QCA9887_1_0_DEVICE_ID: |
3553 | hw_rev = ATH10K_HW_QCA9887; |
3554 | pci_ps = false; |
3555 | pci_soft_reset = ath10k_pci_warm_reset; |
3556 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; |
3557 | targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; |
3558 | break; |
3559 | case QCA6164_2_1_DEVICE_ID: |
3560 | case QCA6174_2_1_DEVICE_ID: |
3561 | hw_rev = ATH10K_HW_QCA6174; |
3562 | pci_ps = true; |
3563 | pci_soft_reset = ath10k_pci_warm_reset; |
3564 | pci_hard_reset = ath10k_pci_qca6174_chip_reset; |
3565 | targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; |
3566 | break; |
3567 | case QCA99X0_2_0_DEVICE_ID: |
3568 | hw_rev = ATH10K_HW_QCA99X0; |
3569 | pci_ps = false; |
3570 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; |
3571 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; |
3572 | targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; |
3573 | break; |
3574 | case QCA9984_1_0_DEVICE_ID: |
3575 | hw_rev = ATH10K_HW_QCA9984; |
3576 | pci_ps = false; |
3577 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; |
3578 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; |
3579 | targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; |
3580 | break; |
3581 | case QCA9888_2_0_DEVICE_ID: |
3582 | hw_rev = ATH10K_HW_QCA9888; |
3583 | pci_ps = false; |
3584 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; |
3585 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; |
3586 | targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; |
3587 | break; |
3588 | case QCA9377_1_0_DEVICE_ID: |
3589 | hw_rev = ATH10K_HW_QCA9377; |
3590 | pci_ps = true; |
3591 | pci_soft_reset = ath10k_pci_warm_reset; |
3592 | pci_hard_reset = ath10k_pci_qca6174_chip_reset; |
3593 | targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; |
3594 | break; |
3595 | default: |
3596 | WARN_ON(1); |
3597 | return -EOPNOTSUPP; |
3598 | } |
3599 | |
3600 | ar = ath10k_core_create(priv_size: sizeof(*ar_pci), dev: &pdev->dev, bus: ATH10K_BUS_PCI, |
3601 | hw_rev, hif_ops: &ath10k_pci_hif_ops); |
3602 | if (!ar) { |
3603 | dev_err(&pdev->dev, "failed to allocate core\n" ); |
3604 | return -ENOMEM; |
3605 | } |
3606 | |
3607 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n" , |
3608 | pdev->vendor, pdev->device, |
3609 | pdev->subsystem_vendor, pdev->subsystem_device); |
3610 | |
3611 | ar_pci = ath10k_pci_priv(ar); |
3612 | ar_pci->pdev = pdev; |
3613 | ar_pci->dev = &pdev->dev; |
3614 | ar_pci->ar = ar; |
3615 | ar->dev_id = pci_dev->device; |
3616 | ar_pci->pci_ps = pci_ps; |
3617 | ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; |
3618 | ar_pci->pci_soft_reset = pci_soft_reset; |
3619 | ar_pci->pci_hard_reset = pci_hard_reset; |
3620 | ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; |
3621 | ar->ce_priv = &ar_pci->ce; |
3622 | |
3623 | ar->id.vendor = pdev->vendor; |
3624 | ar->id.device = pdev->device; |
3625 | ar->id.subsystem_vendor = pdev->subsystem_vendor; |
3626 | ar->id.subsystem_device = pdev->subsystem_device; |
3627 | |
3628 | timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); |
3629 | |
3630 | ret = ath10k_pci_setup_resource(ar); |
3631 | if (ret) { |
3632 | ath10k_err(ar, fmt: "failed to setup resource: %d\n" , ret); |
3633 | goto err_core_destroy; |
3634 | } |
3635 | |
3636 | ret = ath10k_pci_claim(ar); |
3637 | if (ret) { |
3638 | ath10k_err(ar, fmt: "failed to claim device: %d\n" , ret); |
3639 | goto err_free_pipes; |
3640 | } |
3641 | |
3642 | ret = ath10k_pci_force_wake(ar); |
3643 | if (ret) { |
3644 | ath10k_warn(ar, fmt: "failed to wake up device : %d\n" , ret); |
3645 | goto err_sleep; |
3646 | } |
3647 | |
3648 | ath10k_pci_ce_deinit(ar); |
3649 | ath10k_pci_irq_disable(ar); |
3650 | |
3651 | ret = ath10k_pci_init_irq(ar); |
3652 | if (ret) { |
3653 | ath10k_err(ar, fmt: "failed to init irqs: %d\n" , ret); |
3654 | goto err_sleep; |
3655 | } |
3656 | |
3657 | ath10k_info(ar, fmt: "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n" , |
3658 | ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, |
3659 | ath10k_pci_irq_mode, ath10k_pci_reset_mode); |
3660 | |
3661 | ret = ath10k_pci_request_irq(ar); |
3662 | if (ret) { |
3663 | ath10k_warn(ar, fmt: "failed to request irqs: %d\n" , ret); |
3664 | goto err_deinit_irq; |
3665 | } |
3666 | |
3667 | bus_params.dev_type = ATH10K_DEV_TYPE_LL; |
3668 | bus_params.link_can_suspend = true; |
3669 | /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that |
3670 | * fall off the bus during chip_reset. These chips have the same pci |
3671 | * device id as the QCA9880 BR4A or 2R4E. So that's why the check. |
3672 | */ |
3673 | if (is_qca988x) { |
3674 | bus_params.chip_id = |
3675 | ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
3676 | if (bus_params.chip_id != 0xffffffff) { |
3677 | if (!ath10k_pci_chip_is_supported(dev_id: pdev->device, |
3678 | chip_id: bus_params.chip_id)) { |
3679 | ret = -ENODEV; |
3680 | goto err_unsupported; |
3681 | } |
3682 | } |
3683 | } |
3684 | |
3685 | ret = ath10k_pci_chip_reset(ar); |
3686 | if (ret) { |
3687 | ath10k_err(ar, fmt: "failed to reset chip: %d\n" , ret); |
3688 | goto err_free_irq; |
3689 | } |
3690 | |
3691 | bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
3692 | if (bus_params.chip_id == 0xffffffff) { |
3693 | ret = -ENODEV; |
3694 | goto err_unsupported; |
3695 | } |
3696 | |
3697 | if (!ath10k_pci_chip_is_supported(dev_id: pdev->device, chip_id: bus_params.chip_id)) { |
3698 | ret = -ENODEV; |
3699 | goto err_unsupported; |
3700 | } |
3701 | |
3702 | ret = ath10k_core_register(ar, bus_params: &bus_params); |
3703 | if (ret) { |
3704 | ath10k_err(ar, fmt: "failed to register driver core: %d\n" , ret); |
3705 | goto err_free_irq; |
3706 | } |
3707 | |
3708 | return 0; |
3709 | |
3710 | err_unsupported: |
3711 | ath10k_err(ar, fmt: "device %04x with chip_id %08x isn't supported\n" , |
3712 | pdev->device, bus_params.chip_id); |
3713 | |
3714 | err_free_irq: |
3715 | ath10k_pci_free_irq(ar); |
3716 | |
3717 | err_deinit_irq: |
3718 | ath10k_pci_release_resource(ar); |
3719 | |
3720 | err_sleep: |
3721 | ath10k_pci_sleep_sync(ar); |
3722 | ath10k_pci_release(ar); |
3723 | |
3724 | err_free_pipes: |
3725 | ath10k_pci_free_pipes(ar); |
3726 | |
3727 | err_core_destroy: |
3728 | ath10k_core_destroy(ar); |
3729 | |
3730 | return ret; |
3731 | } |
3732 | |
3733 | static void ath10k_pci_remove(struct pci_dev *pdev) |
3734 | { |
3735 | struct ath10k *ar = pci_get_drvdata(pdev); |
3736 | |
3737 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n" ); |
3738 | |
3739 | if (!ar) |
3740 | return; |
3741 | |
3742 | ath10k_core_unregister(ar); |
3743 | ath10k_pci_free_irq(ar); |
3744 | ath10k_pci_deinit_irq(ar); |
3745 | ath10k_pci_release_resource(ar); |
3746 | ath10k_pci_sleep_sync(ar); |
3747 | ath10k_pci_release(ar); |
3748 | ath10k_core_destroy(ar); |
3749 | } |
3750 | |
3751 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
3752 | |
3753 | static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) |
3754 | { |
3755 | struct ath10k *ar = dev_get_drvdata(dev); |
3756 | int ret; |
3757 | |
3758 | ret = ath10k_pci_suspend(ar); |
3759 | if (ret) |
3760 | ath10k_warn(ar, fmt: "failed to suspend hif: %d\n" , ret); |
3761 | |
3762 | return ret; |
3763 | } |
3764 | |
3765 | static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) |
3766 | { |
3767 | struct ath10k *ar = dev_get_drvdata(dev); |
3768 | int ret; |
3769 | |
3770 | ret = ath10k_pci_resume(ar); |
3771 | if (ret) |
3772 | ath10k_warn(ar, fmt: "failed to resume hif: %d\n" , ret); |
3773 | |
3774 | return ret; |
3775 | } |
3776 | |
3777 | static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, |
3778 | ath10k_pci_pm_suspend, |
3779 | ath10k_pci_pm_resume); |
3780 | |
3781 | static struct pci_driver ath10k_pci_driver = { |
3782 | .name = "ath10k_pci" , |
3783 | .id_table = ath10k_pci_id_table, |
3784 | .probe = ath10k_pci_probe, |
3785 | .remove = ath10k_pci_remove, |
3786 | #ifdef CONFIG_PM |
3787 | .driver.pm = &ath10k_pci_pm_ops, |
3788 | #endif |
3789 | }; |
3790 | |
3791 | static int __init ath10k_pci_init(void) |
3792 | { |
3793 | int ret1, ret2; |
3794 | |
3795 | ret1 = pci_register_driver(&ath10k_pci_driver); |
3796 | if (ret1) |
3797 | printk(KERN_ERR "failed to register ath10k pci driver: %d\n" , |
3798 | ret1); |
3799 | |
3800 | ret2 = ath10k_ahb_init(); |
3801 | if (ret2) |
3802 | printk(KERN_ERR "ahb init failed: %d\n" , ret2); |
3803 | |
3804 | if (ret1 && ret2) |
3805 | return ret1; |
3806 | |
3807 | /* registered to at least one bus */ |
3808 | return 0; |
3809 | } |
3810 | module_init(ath10k_pci_init); |
3811 | |
3812 | static void __exit ath10k_pci_exit(void) |
3813 | { |
3814 | pci_unregister_driver(dev: &ath10k_pci_driver); |
3815 | ath10k_ahb_exit(); |
3816 | } |
3817 | |
3818 | module_exit(ath10k_pci_exit); |
3819 | |
3820 | MODULE_AUTHOR("Qualcomm Atheros" ); |
3821 | MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices" ); |
3822 | MODULE_LICENSE("Dual BSD/GPL" ); |
3823 | |
3824 | /* QCA988x 2.0 firmware files */ |
3825 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); |
3826 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); |
3827 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); |
3828 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
3829 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |
3830 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
3831 | |
3832 | /* QCA9887 1.0 firmware files */ |
3833 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
3834 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); |
3835 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
3836 | |
3837 | /* QCA6174 2.1 firmware files */ |
3838 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); |
3839 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); |
3840 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); |
3841 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
3842 | |
3843 | /* QCA6174 3.1 firmware files */ |
3844 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); |
3845 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
3846 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); |
3847 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); |
3848 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
3849 | |
3850 | /* QCA9377 1.0 firmware files */ |
3851 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); |
3852 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
3853 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); |
3854 | |