1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include "core.h"
8#include "pcic.h"
9#include "debug.h"
10
11static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
12 "bhi",
13 "mhi-er0",
14 "mhi-er1",
15 "ce0",
16 "ce1",
17 "ce2",
18 "ce3",
19 "ce4",
20 "ce5",
21 "ce6",
22 "ce7",
23 "ce8",
24 "ce9",
25 "ce10",
26 "ce11",
27 "host2wbm-desc-feed",
28 "host2reo-re-injection",
29 "host2reo-command",
30 "host2rxdma-monitor-ring3",
31 "host2rxdma-monitor-ring2",
32 "host2rxdma-monitor-ring1",
33 "reo2ost-exception",
34 "wbm2host-rx-release",
35 "reo2host-status",
36 "reo2host-destination-ring4",
37 "reo2host-destination-ring3",
38 "reo2host-destination-ring2",
39 "reo2host-destination-ring1",
40 "rxdma2host-monitor-destination-mac3",
41 "rxdma2host-monitor-destination-mac2",
42 "rxdma2host-monitor-destination-mac1",
43 "ppdu-end-interrupts-mac3",
44 "ppdu-end-interrupts-mac2",
45 "ppdu-end-interrupts-mac1",
46 "rxdma2host-monitor-status-ring-mac3",
47 "rxdma2host-monitor-status-ring-mac2",
48 "rxdma2host-monitor-status-ring-mac1",
49 "host2rxdma-host-buf-ring-mac3",
50 "host2rxdma-host-buf-ring-mac2",
51 "host2rxdma-host-buf-ring-mac1",
52 "rxdma2host-destination-ring-mac3",
53 "rxdma2host-destination-ring-mac2",
54 "rxdma2host-destination-ring-mac1",
55 "host2tcl-input-ring4",
56 "host2tcl-input-ring3",
57 "host2tcl-input-ring2",
58 "host2tcl-input-ring1",
59 "wbm2host-tx-completions-ring3",
60 "wbm2host-tx-completions-ring2",
61 "wbm2host-tx-completions-ring1",
62 "tcl2host-status-ring",
63};
64
65static const struct ath11k_msi_config ath11k_msi_config[] = {
66 {
67 .total_vectors = 32,
68 .total_users = 4,
69 .users = (struct ath11k_msi_user[]) {
70 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
71 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
72 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
73 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
74 },
75 .hw_rev = ATH11K_HW_QCA6390_HW20,
76 },
77 {
78 .total_vectors = 16,
79 .total_users = 3,
80 .users = (struct ath11k_msi_user[]) {
81 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
82 { .name = "CE", .num_vectors = 5, .base_vector = 3 },
83 { .name = "DP", .num_vectors = 8, .base_vector = 8 },
84 },
85 .hw_rev = ATH11K_HW_QCN9074_HW10,
86 },
87 {
88 .total_vectors = 32,
89 .total_users = 4,
90 .users = (struct ath11k_msi_user[]) {
91 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
92 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
93 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
94 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
95 },
96 .hw_rev = ATH11K_HW_WCN6855_HW20,
97 },
98 {
99 .total_vectors = 32,
100 .total_users = 4,
101 .users = (struct ath11k_msi_user[]) {
102 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
103 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
104 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
105 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
106 },
107 .hw_rev = ATH11K_HW_WCN6855_HW21,
108 },
109 {
110 .total_vectors = 28,
111 .total_users = 2,
112 .users = (struct ath11k_msi_user[]) {
113 { .name = "CE", .num_vectors = 10, .base_vector = 0 },
114 { .name = "DP", .num_vectors = 18, .base_vector = 10 },
115 },
116 .hw_rev = ATH11K_HW_WCN6750_HW10,
117 },
118 {
119 .total_vectors = 32,
120 .total_users = 4,
121 .users = (struct ath11k_msi_user[]) {
122 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
123 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
124 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
125 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
126 },
127 .hw_rev = ATH11K_HW_QCA2066_HW21,
128 },
129};
130
131int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
132{
133 const struct ath11k_msi_config *msi_config;
134 int i;
135
136 for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
137 msi_config = &ath11k_msi_config[i];
138
139 if (msi_config->hw_rev == ab->hw_rev)
140 break;
141 }
142
143 if (i == ARRAY_SIZE(ath11k_msi_config)) {
144 ath11k_err(ab, fmt: "failed to fetch msi config, unsupported hw version: 0x%x\n",
145 ab->hw_rev);
146 return -EINVAL;
147 }
148
149 ab->pci.msi.config = msi_config;
150 return 0;
151}
152EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
153
154static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
155{
156 if (offset < ATH11K_PCI_WINDOW_START)
157 iowrite32(value, ab->mem + offset);
158 else
159 ab->pci.ops->window_write32(ab, offset, value);
160}
161
162void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
163{
164 int ret = 0;
165 bool wakeup_required;
166
167 /* for offset beyond BAR + 4K - 32, may
168 * need to wakeup the device to access.
169 */
170 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
171 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
172 if (wakeup_required && ab->pci.ops->wakeup)
173 ret = ab->pci.ops->wakeup(ab);
174
175 __ath11k_pcic_write32(ab, offset, value);
176
177 if (wakeup_required && !ret && ab->pci.ops->release)
178 ab->pci.ops->release(ab);
179}
180EXPORT_SYMBOL(ath11k_pcic_write32);
181
182static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
183{
184 u32 val;
185
186 if (offset < ATH11K_PCI_WINDOW_START)
187 val = ioread32(ab->mem + offset);
188 else
189 val = ab->pci.ops->window_read32(ab, offset);
190
191 return val;
192}
193
194u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
195{
196 int ret = 0;
197 u32 val;
198 bool wakeup_required;
199
200 /* for offset beyond BAR + 4K - 32, may
201 * need to wakeup the device to access.
202 */
203 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
204 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
205 if (wakeup_required && ab->pci.ops->wakeup)
206 ret = ab->pci.ops->wakeup(ab);
207
208 val = __ath11k_pcic_read32(ab, offset);
209
210 if (wakeup_required && !ret && ab->pci.ops->release)
211 ab->pci.ops->release(ab);
212
213 return val;
214}
215EXPORT_SYMBOL(ath11k_pcic_read32);
216
217int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
218{
219 int ret = 0;
220 bool wakeup_required;
221 u32 *data = buf;
222 u32 i;
223
224 /* for offset beyond BAR + 4K - 32, may
225 * need to wakeup the device to access.
226 */
227 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
228 end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
229 if (wakeup_required && ab->pci.ops->wakeup) {
230 ret = ab->pci.ops->wakeup(ab);
231 if (ret) {
232 ath11k_warn(ab,
233 fmt: "wakeup failed, data may be invalid: %d",
234 ret);
235 /* Even though wakeup() failed, continue processing rather
236 * than returning because some parts of the data may still
237 * be valid and useful in some cases, e.g. could give us
238 * some clues on firmware crash.
239 * Mislead due to invalid data could be avoided because we
240 * are aware of the wakeup failure.
241 */
242 }
243 }
244
245 for (i = start; i < end + 1; i += 4)
246 *data++ = __ath11k_pcic_read32(ab, offset: i);
247
248 if (wakeup_required && ab->pci.ops->release)
249 ab->pci.ops->release(ab);
250
251 return 0;
252}
253EXPORT_SYMBOL(ath11k_pcic_read);
254
255void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
256 u32 *msi_addr_hi)
257{
258 *msi_addr_lo = ab->pci.msi.addr_lo;
259 *msi_addr_hi = ab->pci.msi.addr_hi;
260}
261EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
262
263int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
264 int *num_vectors, u32 *user_base_data,
265 u32 *base_vector)
266{
267 const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
268 int idx;
269
270 for (idx = 0; idx < msi_config->total_users; idx++) {
271 if (strcmp(user_name, msi_config->users[idx].name) == 0) {
272 *num_vectors = msi_config->users[idx].num_vectors;
273 *base_vector = msi_config->users[idx].base_vector;
274 *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
275
276 ath11k_dbg(ab, ATH11K_DBG_PCI,
277 "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
278 user_name, *num_vectors, *user_base_data,
279 *base_vector);
280
281 return 0;
282 }
283 }
284
285 ath11k_err(ab, fmt: "Failed to find MSI assignment for %s!\n", user_name);
286
287 return -EINVAL;
288}
289EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
290
291void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
292{
293 u32 i, msi_data_idx;
294
295 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
296 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
297 continue;
298
299 if (ce_id == i)
300 break;
301
302 msi_data_idx++;
303 }
304 *msi_idx = msi_data_idx;
305}
306EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
307
308static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
309{
310 int i, j;
311
312 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
313 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
314
315 for (j = 0; j < irq_grp->num_irq; j++)
316 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
317
318 netif_napi_del(napi: &irq_grp->napi);
319 }
320}
321
322void ath11k_pcic_free_irq(struct ath11k_base *ab)
323{
324 int i, irq_idx;
325
326 for (i = 0; i < ab->hw_params.ce_count; i++) {
327 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
328 continue;
329 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
330 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
331 }
332
333 ath11k_pcic_free_ext_irq(ab);
334}
335EXPORT_SYMBOL(ath11k_pcic_free_irq);
336
337static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
338{
339 u32 irq_idx;
340
341 /* In case of one MSI vector, we handle irq enable/disable in a
342 * uniform way since we only have one irq
343 */
344 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
345 return;
346
347 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
348 enable_irq(irq: ab->irq_num[irq_idx]);
349}
350
351static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
352{
353 u32 irq_idx;
354
355 /* In case of one MSI vector, we handle irq enable/disable in a
356 * uniform way since we only have one irq
357 */
358 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
359 return;
360
361 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
362 disable_irq_nosync(irq: ab->irq_num[irq_idx]);
363}
364
365static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
366{
367 int i;
368
369 clear_bit(nr: ATH11K_FLAG_CE_IRQ_ENABLED, addr: &ab->dev_flags);
370
371 for (i = 0; i < ab->hw_params.ce_count; i++) {
372 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
373 continue;
374 ath11k_pcic_ce_irq_disable(ab, ce_id: i);
375 }
376}
377
378static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
379{
380 int i;
381 int irq_idx;
382
383 for (i = 0; i < ab->hw_params.ce_count; i++) {
384 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
385 continue;
386
387 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
388 synchronize_irq(irq: ab->irq_num[irq_idx]);
389 }
390}
391
392static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
393{
394 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
395 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
396
397 ath11k_ce_per_engine_service(ab: ce_pipe->ab, ce_id: ce_pipe->pipe_num);
398
399 enable_irq(irq: ce_pipe->ab->irq_num[irq_idx]);
400}
401
402static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
403{
404 struct ath11k_ce_pipe *ce_pipe = arg;
405 struct ath11k_base *ab = ce_pipe->ab;
406 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
407
408 if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
409 return IRQ_HANDLED;
410
411 /* last interrupt received for this CE */
412 ce_pipe->timestamp = jiffies;
413
414 disable_irq_nosync(irq: ab->irq_num[irq_idx]);
415
416 tasklet_schedule(t: &ce_pipe->intr_tq);
417
418 return IRQ_HANDLED;
419}
420
421static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
422{
423 struct ath11k_base *ab = irq_grp->ab;
424 int i;
425
426 /* In case of one MSI vector, we handle irq enable/disable
427 * in a uniform way since we only have one irq
428 */
429 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
430 return;
431
432 for (i = 0; i < irq_grp->num_irq; i++)
433 disable_irq_nosync(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
434}
435
436static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
437{
438 int i;
439
440 clear_bit(nr: ATH11K_FLAG_EXT_IRQ_ENABLED, addr: &ab->dev_flags);
441
442 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
443 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
444
445 ath11k_pcic_ext_grp_disable(irq_grp);
446
447 if (irq_grp->napi_enabled) {
448 napi_synchronize(n: &irq_grp->napi);
449 napi_disable(n: &irq_grp->napi);
450 irq_grp->napi_enabled = false;
451 }
452 }
453}
454
455static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
456{
457 struct ath11k_base *ab = irq_grp->ab;
458 int i;
459
460 /* In case of one MSI vector, we handle irq enable/disable in a
461 * uniform way since we only have one irq
462 */
463 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
464 return;
465
466 for (i = 0; i < irq_grp->num_irq; i++)
467 enable_irq(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
468}
469
470void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
471{
472 int i;
473
474 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
475 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
476
477 if (!irq_grp->napi_enabled) {
478 napi_enable(n: &irq_grp->napi);
479 irq_grp->napi_enabled = true;
480 }
481 ath11k_pcic_ext_grp_enable(irq_grp);
482 }
483
484 set_bit(nr: ATH11K_FLAG_EXT_IRQ_ENABLED, addr: &ab->dev_flags);
485}
486EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
487
488static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
489{
490 int i, j, irq_idx;
491
492 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
493 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
494
495 for (j = 0; j < irq_grp->num_irq; j++) {
496 irq_idx = irq_grp->irqs[j];
497 synchronize_irq(irq: ab->irq_num[irq_idx]);
498 }
499 }
500}
501
502void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
503{
504 __ath11k_pcic_ext_irq_disable(ab);
505 ath11k_pcic_sync_ext_irqs(ab);
506}
507EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
508
509static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
510{
511 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
512 struct ath11k_ext_irq_grp,
513 napi);
514 struct ath11k_base *ab = irq_grp->ab;
515 int work_done;
516 int i;
517
518 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
519 if (work_done < budget) {
520 napi_complete_done(n: napi, work_done);
521 for (i = 0; i < irq_grp->num_irq; i++)
522 enable_irq(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
523 }
524
525 if (work_done > budget)
526 work_done = budget;
527
528 return work_done;
529}
530
531static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
532{
533 struct ath11k_ext_irq_grp *irq_grp = arg;
534 struct ath11k_base *ab = irq_grp->ab;
535 int i;
536
537 if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
538 return IRQ_HANDLED;
539
540 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
541
542 /* last interrupt received for this group */
543 irq_grp->timestamp = jiffies;
544
545 for (i = 0; i < irq_grp->num_irq; i++)
546 disable_irq_nosync(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
547
548 napi_schedule(n: &irq_grp->napi);
549
550 return IRQ_HANDLED;
551}
552
553static int
554ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
555{
556 return ab->pci.ops->get_msi_irq(ab, vector);
557}
558
559static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
560{
561 int i, j, ret, num_vectors = 0;
562 u32 user_base_data = 0, base_vector = 0;
563 unsigned long irq_flags;
564
565 ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
566 &user_base_data,
567 &base_vector);
568 if (ret < 0)
569 return ret;
570
571 irq_flags = IRQF_SHARED;
572 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
573 irq_flags |= IRQF_NOBALANCING;
574
575 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
576 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
577 u32 num_irq = 0;
578
579 irq_grp->ab = ab;
580 irq_grp->grp_id = i;
581 init_dummy_netdev(dev: &irq_grp->napi_ndev);
582 netif_napi_add(dev: &irq_grp->napi_ndev, napi: &irq_grp->napi,
583 poll: ath11k_pcic_ext_grp_napi_poll);
584
585 if (ab->hw_params.ring_mask->tx[i] ||
586 ab->hw_params.ring_mask->rx[i] ||
587 ab->hw_params.ring_mask->rx_err[i] ||
588 ab->hw_params.ring_mask->rx_wbm_rel[i] ||
589 ab->hw_params.ring_mask->reo_status[i] ||
590 ab->hw_params.ring_mask->rxdma2host[i] ||
591 ab->hw_params.ring_mask->host2rxdma[i] ||
592 ab->hw_params.ring_mask->rx_mon_status[i]) {
593 num_irq = 1;
594 }
595
596 irq_grp->num_irq = num_irq;
597 irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
598
599 for (j = 0; j < irq_grp->num_irq; j++) {
600 int irq_idx = irq_grp->irqs[j];
601 int vector = (i % num_vectors) + base_vector;
602 int irq = ath11k_pcic_get_msi_irq(ab, vector);
603
604 if (irq < 0)
605 return irq;
606
607 ab->irq_num[irq_idx] = irq;
608
609 ath11k_dbg(ab, ATH11K_DBG_PCI,
610 "irq %d group %d\n", irq, i);
611
612 irq_set_status_flags(irq, set: IRQ_DISABLE_UNLAZY);
613 ret = request_irq(irq, handler: ath11k_pcic_ext_interrupt_handler,
614 flags: irq_flags, name: "DP_EXT_IRQ", dev: irq_grp);
615 if (ret) {
616 ath11k_err(ab, fmt: "failed request irq %d: %d\n",
617 vector, ret);
618 return ret;
619 }
620 }
621 ath11k_pcic_ext_grp_disable(irq_grp);
622 }
623
624 return 0;
625}
626
627int ath11k_pcic_config_irq(struct ath11k_base *ab)
628{
629 struct ath11k_ce_pipe *ce_pipe;
630 u32 msi_data_start;
631 u32 msi_data_count, msi_data_idx;
632 u32 msi_irq_start;
633 unsigned int msi_data;
634 int irq, i, ret, irq_idx;
635 unsigned long irq_flags;
636
637 ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
638 &msi_data_start, &msi_irq_start);
639 if (ret)
640 return ret;
641
642 irq_flags = IRQF_SHARED;
643 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
644 irq_flags |= IRQF_NOBALANCING;
645
646 /* Configure CE irqs */
647 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
648 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
649 continue;
650
651 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
652 irq = ath11k_pcic_get_msi_irq(ab, vector: msi_data);
653 if (irq < 0)
654 return irq;
655
656 ce_pipe = &ab->ce.ce_pipe[i];
657
658 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
659
660 tasklet_setup(t: &ce_pipe->intr_tq, callback: ath11k_pcic_ce_tasklet);
661
662 ret = request_irq(irq, handler: ath11k_pcic_ce_interrupt_handler,
663 flags: irq_flags, name: irq_name[irq_idx], dev: ce_pipe);
664 if (ret) {
665 ath11k_err(ab, fmt: "failed to request irq %d: %d\n",
666 irq_idx, ret);
667 return ret;
668 }
669
670 ab->irq_num[irq_idx] = irq;
671 msi_data_idx++;
672
673 ath11k_pcic_ce_irq_disable(ab, ce_id: i);
674 }
675
676 ret = ath11k_pcic_ext_irq_config(ab);
677 if (ret)
678 return ret;
679
680 return 0;
681}
682EXPORT_SYMBOL(ath11k_pcic_config_irq);
683
684void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
685{
686 int i;
687
688 set_bit(nr: ATH11K_FLAG_CE_IRQ_ENABLED, addr: &ab->dev_flags);
689
690 for (i = 0; i < ab->hw_params.ce_count; i++) {
691 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
692 continue;
693 ath11k_pcic_ce_irq_enable(ab, ce_id: i);
694 }
695}
696EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
697
698static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
699{
700 int i;
701
702 for (i = 0; i < ab->hw_params.ce_count; i++) {
703 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
704
705 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
706 continue;
707
708 tasklet_kill(t: &ce_pipe->intr_tq);
709 }
710}
711
712void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
713{
714 ath11k_pcic_ce_irqs_disable(ab);
715 ath11k_pcic_sync_ce_irqs(ab);
716 ath11k_pcic_kill_tasklets(ab);
717}
718EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
719
720void ath11k_pcic_stop(struct ath11k_base *ab)
721{
722 ath11k_pcic_ce_irq_disable_sync(ab);
723 ath11k_ce_cleanup_pipes(ab);
724}
725EXPORT_SYMBOL(ath11k_pcic_stop);
726
727int ath11k_pcic_start(struct ath11k_base *ab)
728{
729 set_bit(nr: ATH11K_FLAG_DEVICE_INIT_DONE, addr: &ab->dev_flags);
730
731 ath11k_pcic_ce_irqs_enable(ab);
732 ath11k_ce_rx_post_buf(ab);
733
734 return 0;
735}
736EXPORT_SYMBOL(ath11k_pcic_start);
737
738int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
739 u8 *ul_pipe, u8 *dl_pipe)
740{
741 const struct service_to_pipe *entry;
742 bool ul_set = false, dl_set = false;
743 int i;
744
745 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
746 entry = &ab->hw_params.svc_to_ce_map[i];
747
748 if (__le32_to_cpu(entry->service_id) != service_id)
749 continue;
750
751 switch (__le32_to_cpu(entry->pipedir)) {
752 case PIPEDIR_NONE:
753 break;
754 case PIPEDIR_IN:
755 WARN_ON(dl_set);
756 *dl_pipe = __le32_to_cpu(entry->pipenum);
757 dl_set = true;
758 break;
759 case PIPEDIR_OUT:
760 WARN_ON(ul_set);
761 *ul_pipe = __le32_to_cpu(entry->pipenum);
762 ul_set = true;
763 break;
764 case PIPEDIR_INOUT:
765 WARN_ON(dl_set);
766 WARN_ON(ul_set);
767 *dl_pipe = __le32_to_cpu(entry->pipenum);
768 *ul_pipe = __le32_to_cpu(entry->pipenum);
769 dl_set = true;
770 ul_set = true;
771 break;
772 }
773 }
774
775 if (WARN_ON(!ul_set || !dl_set))
776 return -ENOENT;
777
778 return 0;
779}
780EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
781
782int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
783 const struct ath11k_pci_ops *pci_ops)
784{
785 if (!pci_ops)
786 return 0;
787
788 /* Return error if mandatory pci_ops callbacks are missing */
789 if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
790 !pci_ops->window_read32)
791 return -EINVAL;
792
793 ab->pci.ops = pci_ops;
794 return 0;
795}
796EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
797
798void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
799{
800 int i;
801
802 for (i = 0; i < ab->hw_params.ce_count; i++) {
803 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR ||
804 i == ATH11K_PCI_CE_WAKE_IRQ)
805 continue;
806 ath11k_pcic_ce_irq_enable(ab, ce_id: i);
807 }
808}
809EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
810
811void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
812{
813 int i;
814 int irq_idx;
815 struct ath11k_ce_pipe *ce_pipe;
816
817 for (i = 0; i < ab->hw_params.ce_count; i++) {
818 ce_pipe = &ab->ce.ce_pipe[i];
819 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
820
821 if (ath11k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR ||
822 i == ATH11K_PCI_CE_WAKE_IRQ)
823 continue;
824
825 disable_irq_nosync(irq: ab->irq_num[irq_idx]);
826 synchronize_irq(irq: ab->irq_num[irq_idx]);
827 tasklet_kill(t: &ce_pipe->intr_tq);
828 }
829}
830EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
831

source code of linux/drivers/net/wireless/ath/ath11k/pcic.c