1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 2004-2016 Cavium, Inc. |
7 | */ |
8 | |
9 | #include <linux/of_address.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/irqdomain.h> |
12 | #include <linux/bitops.h> |
13 | #include <linux/of_irq.h> |
14 | #include <linux/percpu.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/irq.h> |
17 | #include <linux/smp.h> |
18 | #include <linux/of.h> |
19 | |
20 | #include <asm/octeon/octeon.h> |
21 | #include <asm/octeon/cvmx-ciu2-defs.h> |
22 | #include <asm/octeon/cvmx-ciu3-defs.h> |
23 | |
24 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); |
25 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
26 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
27 | static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2); |
28 | |
29 | static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3); |
30 | static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info); |
31 | #define CIU3_MBOX_PER_CORE 10 |
32 | |
33 | /* |
34 | * The 8 most significant bits of the intsn identify the interrupt major block. |
35 | * Each major block might use its own interrupt domain. Thus 256 domains are |
36 | * needed. |
37 | */ |
38 | #define MAX_CIU3_DOMAINS 256 |
39 | |
40 | typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int); |
41 | |
42 | /* Information for each ciu3 in the system */ |
43 | struct octeon_ciu3_info { |
44 | u64 ciu3_addr; |
45 | int node; |
46 | struct irq_domain *domain[MAX_CIU3_DOMAINS]; |
47 | octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS]; |
48 | }; |
49 | |
50 | /* Each ciu3 in the system uses its own data (one ciu3 per node) */ |
51 | static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4]; |
52 | |
53 | struct octeon_irq_ciu_domain_data { |
54 | int num_sum; /* number of sum registers (2 or 3). */ |
55 | }; |
56 | |
57 | /* Register offsets from ciu3_addr */ |
58 | #define CIU3_CONST 0x220 |
59 | #define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000) |
60 | #define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000) |
61 | #define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000) |
62 | #define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000) |
63 | #define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000) |
64 | #define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000) |
65 | #define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000) |
66 | #define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000) |
67 | |
68 | static __read_mostly int octeon_irq_ciu_to_irq[8][64]; |
69 | |
70 | struct octeon_ciu_chip_data { |
71 | union { |
72 | struct { /* only used for ciu3 */ |
73 | u64 ciu3_addr; |
74 | unsigned int intsn; |
75 | }; |
76 | struct { /* only used for ciu/ciu2 */ |
77 | u8 line; |
78 | u8 bit; |
79 | }; |
80 | }; |
81 | int gpio_line; |
82 | int current_cpu; /* Next CPU expected to take this irq */ |
83 | int ciu_node; /* NUMA node number of the CIU */ |
84 | }; |
85 | |
86 | struct octeon_core_chip_data { |
87 | struct mutex core_irq_mutex; |
88 | bool current_en; |
89 | bool desired_en; |
90 | u8 bit; |
91 | }; |
92 | |
93 | #define MIPS_CORE_IRQ_LINES 8 |
94 | |
95 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
96 | |
97 | static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
98 | struct irq_chip *chip, |
99 | irq_flow_handler_t handler) |
100 | { |
101 | struct octeon_ciu_chip_data *cd; |
102 | |
103 | cd = kzalloc(size: sizeof(*cd), GFP_KERNEL); |
104 | if (!cd) |
105 | return -ENOMEM; |
106 | |
107 | irq_set_chip_and_handler(irq, chip, handle: handler); |
108 | |
109 | cd->line = line; |
110 | cd->bit = bit; |
111 | cd->gpio_line = gpio_line; |
112 | |
113 | irq_set_chip_data(irq, data: cd); |
114 | octeon_irq_ciu_to_irq[line][bit] = irq; |
115 | return 0; |
116 | } |
117 | |
118 | static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) |
119 | { |
120 | struct irq_data *data = irq_get_irq_data(irq); |
121 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
122 | |
123 | irq_set_chip_data(irq, NULL); |
124 | kfree(objp: cd); |
125 | } |
126 | |
127 | static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, |
128 | int irq, int line, int bit) |
129 | { |
130 | struct device_node *of_node; |
131 | int ret; |
132 | |
133 | of_node = irq_domain_get_of_node(d: domain); |
134 | if (!of_node) |
135 | return -EINVAL; |
136 | ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node)); |
137 | if (ret < 0) |
138 | return ret; |
139 | |
140 | return irq_domain_associate(domain, irq, hwirq: line << 6 | bit); |
141 | } |
142 | |
143 | static int octeon_coreid_for_cpu(int cpu) |
144 | { |
145 | #ifdef CONFIG_SMP |
146 | return cpu_logical_map(cpu); |
147 | #else |
148 | return cvmx_get_core_num(); |
149 | #endif |
150 | } |
151 | |
152 | static int octeon_cpu_for_coreid(int coreid) |
153 | { |
154 | #ifdef CONFIG_SMP |
155 | return cpu_number_map(coreid); |
156 | #else |
157 | return smp_processor_id(); |
158 | #endif |
159 | } |
160 | |
161 | static void octeon_irq_core_ack(struct irq_data *data) |
162 | { |
163 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
164 | unsigned int bit = cd->bit; |
165 | |
166 | /* |
167 | * We don't need to disable IRQs to make these atomic since |
168 | * they are already disabled earlier in the low level |
169 | * interrupt code. |
170 | */ |
171 | clear_c0_status(0x100 << bit); |
172 | /* The two user interrupts must be cleared manually. */ |
173 | if (bit < 2) |
174 | clear_c0_cause(0x100 << bit); |
175 | } |
176 | |
177 | static void octeon_irq_core_eoi(struct irq_data *data) |
178 | { |
179 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
180 | |
181 | /* |
182 | * We don't need to disable IRQs to make these atomic since |
183 | * they are already disabled earlier in the low level |
184 | * interrupt code. |
185 | */ |
186 | set_c0_status(0x100 << cd->bit); |
187 | } |
188 | |
189 | static void octeon_irq_core_set_enable_local(void *arg) |
190 | { |
191 | struct irq_data *data = arg; |
192 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
193 | unsigned int mask = 0x100 << cd->bit; |
194 | |
195 | /* |
196 | * Interrupts are already disabled, so these are atomic. |
197 | */ |
198 | if (cd->desired_en) |
199 | set_c0_status(mask); |
200 | else |
201 | clear_c0_status(mask); |
202 | |
203 | } |
204 | |
205 | static void octeon_irq_core_disable(struct irq_data *data) |
206 | { |
207 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
208 | cd->desired_en = false; |
209 | } |
210 | |
211 | static void octeon_irq_core_enable(struct irq_data *data) |
212 | { |
213 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
214 | cd->desired_en = true; |
215 | } |
216 | |
217 | static void octeon_irq_core_bus_lock(struct irq_data *data) |
218 | { |
219 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
220 | |
221 | mutex_lock(&cd->core_irq_mutex); |
222 | } |
223 | |
224 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) |
225 | { |
226 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
227 | |
228 | if (cd->desired_en != cd->current_en) { |
229 | on_each_cpu(func: octeon_irq_core_set_enable_local, info: data, wait: 1); |
230 | |
231 | cd->current_en = cd->desired_en; |
232 | } |
233 | |
234 | mutex_unlock(lock: &cd->core_irq_mutex); |
235 | } |
236 | |
237 | static struct irq_chip octeon_irq_chip_core = { |
238 | .name = "Core" , |
239 | .irq_enable = octeon_irq_core_enable, |
240 | .irq_disable = octeon_irq_core_disable, |
241 | .irq_ack = octeon_irq_core_ack, |
242 | .irq_eoi = octeon_irq_core_eoi, |
243 | .irq_bus_lock = octeon_irq_core_bus_lock, |
244 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, |
245 | |
246 | .irq_cpu_online = octeon_irq_core_eoi, |
247 | .irq_cpu_offline = octeon_irq_core_ack, |
248 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
249 | }; |
250 | |
251 | static void __init octeon_irq_init_core(void) |
252 | { |
253 | int i; |
254 | int irq; |
255 | struct octeon_core_chip_data *cd; |
256 | |
257 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { |
258 | cd = &octeon_irq_core_chip_data[i]; |
259 | cd->current_en = false; |
260 | cd->desired_en = false; |
261 | cd->bit = i; |
262 | mutex_init(&cd->core_irq_mutex); |
263 | |
264 | irq = OCTEON_IRQ_SW0 + i; |
265 | irq_set_chip_data(irq, data: cd); |
266 | irq_set_chip_and_handler(irq, chip: &octeon_irq_chip_core, |
267 | handle: handle_percpu_irq); |
268 | } |
269 | } |
270 | |
271 | static int next_cpu_for_irq(struct irq_data *data) |
272 | { |
273 | |
274 | #ifdef CONFIG_SMP |
275 | int cpu; |
276 | const struct cpumask *mask = irq_data_get_affinity_mask(d: data); |
277 | int weight = cpumask_weight(srcp: mask); |
278 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
279 | |
280 | if (weight > 1) { |
281 | cpu = cd->current_cpu; |
282 | for (;;) { |
283 | cpu = cpumask_next(n: cpu, srcp: mask); |
284 | if (cpu >= nr_cpu_ids) { |
285 | cpu = -1; |
286 | continue; |
287 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
288 | break; |
289 | } |
290 | } |
291 | } else if (weight == 1) { |
292 | cpu = cpumask_first(srcp: mask); |
293 | } else { |
294 | cpu = smp_processor_id(); |
295 | } |
296 | cd->current_cpu = cpu; |
297 | return cpu; |
298 | #else |
299 | return smp_processor_id(); |
300 | #endif |
301 | } |
302 | |
303 | static void octeon_irq_ciu_enable(struct irq_data *data) |
304 | { |
305 | int cpu = next_cpu_for_irq(data); |
306 | int coreid = octeon_coreid_for_cpu(cpu); |
307 | unsigned long *pen; |
308 | unsigned long flags; |
309 | struct octeon_ciu_chip_data *cd; |
310 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
311 | |
312 | cd = irq_data_get_irq_chip_data(d: data); |
313 | |
314 | raw_spin_lock_irqsave(lock, flags); |
315 | if (cd->line == 0) { |
316 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
317 | __set_bit(cd->bit, pen); |
318 | /* |
319 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
320 | * enabling the irq. |
321 | */ |
322 | wmb(); |
323 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
324 | } else { |
325 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
326 | __set_bit(cd->bit, pen); |
327 | /* |
328 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
329 | * enabling the irq. |
330 | */ |
331 | wmb(); |
332 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
333 | } |
334 | raw_spin_unlock_irqrestore(lock, flags); |
335 | } |
336 | |
337 | static void octeon_irq_ciu_enable_local(struct irq_data *data) |
338 | { |
339 | unsigned long *pen; |
340 | unsigned long flags; |
341 | struct octeon_ciu_chip_data *cd; |
342 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
343 | |
344 | cd = irq_data_get_irq_chip_data(d: data); |
345 | |
346 | raw_spin_lock_irqsave(lock, flags); |
347 | if (cd->line == 0) { |
348 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
349 | __set_bit(cd->bit, pen); |
350 | /* |
351 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
352 | * enabling the irq. |
353 | */ |
354 | wmb(); |
355 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
356 | } else { |
357 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
358 | __set_bit(cd->bit, pen); |
359 | /* |
360 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
361 | * enabling the irq. |
362 | */ |
363 | wmb(); |
364 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
365 | } |
366 | raw_spin_unlock_irqrestore(lock, flags); |
367 | } |
368 | |
369 | static void octeon_irq_ciu_disable_local(struct irq_data *data) |
370 | { |
371 | unsigned long *pen; |
372 | unsigned long flags; |
373 | struct octeon_ciu_chip_data *cd; |
374 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
375 | |
376 | cd = irq_data_get_irq_chip_data(d: data); |
377 | |
378 | raw_spin_lock_irqsave(lock, flags); |
379 | if (cd->line == 0) { |
380 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
381 | __clear_bit(cd->bit, pen); |
382 | /* |
383 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
384 | * enabling the irq. |
385 | */ |
386 | wmb(); |
387 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
388 | } else { |
389 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
390 | __clear_bit(cd->bit, pen); |
391 | /* |
392 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
393 | * enabling the irq. |
394 | */ |
395 | wmb(); |
396 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
397 | } |
398 | raw_spin_unlock_irqrestore(lock, flags); |
399 | } |
400 | |
401 | static void octeon_irq_ciu_disable_all(struct irq_data *data) |
402 | { |
403 | unsigned long flags; |
404 | unsigned long *pen; |
405 | int cpu; |
406 | struct octeon_ciu_chip_data *cd; |
407 | raw_spinlock_t *lock; |
408 | |
409 | cd = irq_data_get_irq_chip_data(d: data); |
410 | |
411 | for_each_online_cpu(cpu) { |
412 | int coreid = octeon_coreid_for_cpu(cpu); |
413 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
414 | if (cd->line == 0) |
415 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
416 | else |
417 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
418 | |
419 | raw_spin_lock_irqsave(lock, flags); |
420 | __clear_bit(cd->bit, pen); |
421 | /* |
422 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
423 | * enabling the irq. |
424 | */ |
425 | wmb(); |
426 | if (cd->line == 0) |
427 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
428 | else |
429 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
430 | raw_spin_unlock_irqrestore(lock, flags); |
431 | } |
432 | } |
433 | |
434 | static void octeon_irq_ciu_enable_all(struct irq_data *data) |
435 | { |
436 | unsigned long flags; |
437 | unsigned long *pen; |
438 | int cpu; |
439 | struct octeon_ciu_chip_data *cd; |
440 | raw_spinlock_t *lock; |
441 | |
442 | cd = irq_data_get_irq_chip_data(d: data); |
443 | |
444 | for_each_online_cpu(cpu) { |
445 | int coreid = octeon_coreid_for_cpu(cpu); |
446 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
447 | if (cd->line == 0) |
448 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
449 | else |
450 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
451 | |
452 | raw_spin_lock_irqsave(lock, flags); |
453 | __set_bit(cd->bit, pen); |
454 | /* |
455 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
456 | * enabling the irq. |
457 | */ |
458 | wmb(); |
459 | if (cd->line == 0) |
460 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
461 | else |
462 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
463 | raw_spin_unlock_irqrestore(lock, flags); |
464 | } |
465 | } |
466 | |
467 | /* |
468 | * Enable the irq on the next core in the affinity set for chips that |
469 | * have the EN*_W1{S,C} registers. |
470 | */ |
471 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) |
472 | { |
473 | u64 mask; |
474 | int cpu = next_cpu_for_irq(data); |
475 | struct octeon_ciu_chip_data *cd; |
476 | |
477 | cd = irq_data_get_irq_chip_data(d: data); |
478 | mask = 1ull << (cd->bit); |
479 | |
480 | /* |
481 | * Called under the desc lock, so these should never get out |
482 | * of sync. |
483 | */ |
484 | if (cd->line == 0) { |
485 | int index = octeon_coreid_for_cpu(cpu) * 2; |
486 | set_bit(nr: cd->bit, addr: &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
487 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
488 | } else { |
489 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
490 | set_bit(nr: cd->bit, addr: &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
491 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
492 | } |
493 | } |
494 | |
495 | /* |
496 | * Enable the irq in the sum2 registers. |
497 | */ |
498 | static void octeon_irq_ciu_enable_sum2(struct irq_data *data) |
499 | { |
500 | u64 mask; |
501 | int cpu = next_cpu_for_irq(data); |
502 | int index = octeon_coreid_for_cpu(cpu); |
503 | struct octeon_ciu_chip_data *cd; |
504 | |
505 | cd = irq_data_get_irq_chip_data(d: data); |
506 | mask = 1ull << (cd->bit); |
507 | |
508 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); |
509 | } |
510 | |
511 | /* |
512 | * Disable the irq in the sum2 registers. |
513 | */ |
514 | static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) |
515 | { |
516 | u64 mask; |
517 | int cpu = next_cpu_for_irq(data); |
518 | int index = octeon_coreid_for_cpu(cpu); |
519 | struct octeon_ciu_chip_data *cd; |
520 | |
521 | cd = irq_data_get_irq_chip_data(d: data); |
522 | mask = 1ull << (cd->bit); |
523 | |
524 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); |
525 | } |
526 | |
527 | static void octeon_irq_ciu_ack_sum2(struct irq_data *data) |
528 | { |
529 | u64 mask; |
530 | int cpu = next_cpu_for_irq(data); |
531 | int index = octeon_coreid_for_cpu(cpu); |
532 | struct octeon_ciu_chip_data *cd; |
533 | |
534 | cd = irq_data_get_irq_chip_data(d: data); |
535 | mask = 1ull << (cd->bit); |
536 | |
537 | cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); |
538 | } |
539 | |
540 | static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) |
541 | { |
542 | int cpu; |
543 | struct octeon_ciu_chip_data *cd; |
544 | u64 mask; |
545 | |
546 | cd = irq_data_get_irq_chip_data(d: data); |
547 | mask = 1ull << (cd->bit); |
548 | |
549 | for_each_online_cpu(cpu) { |
550 | int coreid = octeon_coreid_for_cpu(cpu); |
551 | |
552 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); |
553 | } |
554 | } |
555 | |
556 | /* |
557 | * Enable the irq on the current CPU for chips that |
558 | * have the EN*_W1{S,C} registers. |
559 | */ |
560 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
561 | { |
562 | u64 mask; |
563 | struct octeon_ciu_chip_data *cd; |
564 | |
565 | cd = irq_data_get_irq_chip_data(d: data); |
566 | mask = 1ull << (cd->bit); |
567 | |
568 | if (cd->line == 0) { |
569 | int index = cvmx_get_core_num() * 2; |
570 | set_bit(nr: cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
571 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
572 | } else { |
573 | int index = cvmx_get_core_num() * 2 + 1; |
574 | set_bit(nr: cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
575 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
576 | } |
577 | } |
578 | |
579 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
580 | { |
581 | u64 mask; |
582 | struct octeon_ciu_chip_data *cd; |
583 | |
584 | cd = irq_data_get_irq_chip_data(d: data); |
585 | mask = 1ull << (cd->bit); |
586 | |
587 | if (cd->line == 0) { |
588 | int index = cvmx_get_core_num() * 2; |
589 | clear_bit(nr: cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
590 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
591 | } else { |
592 | int index = cvmx_get_core_num() * 2 + 1; |
593 | clear_bit(nr: cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
594 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
595 | } |
596 | } |
597 | |
598 | /* |
599 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. |
600 | */ |
601 | static void octeon_irq_ciu_ack(struct irq_data *data) |
602 | { |
603 | u64 mask; |
604 | struct octeon_ciu_chip_data *cd; |
605 | |
606 | cd = irq_data_get_irq_chip_data(d: data); |
607 | mask = 1ull << (cd->bit); |
608 | |
609 | if (cd->line == 0) { |
610 | int index = cvmx_get_core_num() * 2; |
611 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
612 | } else { |
613 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); |
614 | } |
615 | } |
616 | |
617 | /* |
618 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
619 | * registers. |
620 | */ |
621 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) |
622 | { |
623 | int cpu; |
624 | u64 mask; |
625 | struct octeon_ciu_chip_data *cd; |
626 | |
627 | cd = irq_data_get_irq_chip_data(d: data); |
628 | mask = 1ull << (cd->bit); |
629 | |
630 | if (cd->line == 0) { |
631 | for_each_online_cpu(cpu) { |
632 | int index = octeon_coreid_for_cpu(cpu) * 2; |
633 | clear_bit(nr: cd->bit, |
634 | addr: &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
635 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
636 | } |
637 | } else { |
638 | for_each_online_cpu(cpu) { |
639 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
640 | clear_bit(nr: cd->bit, |
641 | addr: &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
642 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
643 | } |
644 | } |
645 | } |
646 | |
647 | /* |
648 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} |
649 | * registers. |
650 | */ |
651 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) |
652 | { |
653 | int cpu; |
654 | u64 mask; |
655 | struct octeon_ciu_chip_data *cd; |
656 | |
657 | cd = irq_data_get_irq_chip_data(d: data); |
658 | mask = 1ull << (cd->bit); |
659 | |
660 | if (cd->line == 0) { |
661 | for_each_online_cpu(cpu) { |
662 | int index = octeon_coreid_for_cpu(cpu) * 2; |
663 | set_bit(nr: cd->bit, |
664 | addr: &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
665 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
666 | } |
667 | } else { |
668 | for_each_online_cpu(cpu) { |
669 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
670 | set_bit(nr: cd->bit, |
671 | addr: &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
672 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
673 | } |
674 | } |
675 | } |
676 | |
677 | static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t) |
678 | { |
679 | irqd_set_trigger_type(d: data, type: t); |
680 | |
681 | if (t & IRQ_TYPE_EDGE_BOTH) |
682 | irq_set_handler_locked(data, handler: handle_edge_irq); |
683 | else |
684 | irq_set_handler_locked(data, handler: handle_level_irq); |
685 | |
686 | return IRQ_SET_MASK_OK; |
687 | } |
688 | |
689 | static void octeon_irq_gpio_setup(struct irq_data *data) |
690 | { |
691 | union cvmx_gpio_bit_cfgx cfg; |
692 | struct octeon_ciu_chip_data *cd; |
693 | u32 t = irqd_get_trigger_type(d: data); |
694 | |
695 | cd = irq_data_get_irq_chip_data(d: data); |
696 | |
697 | cfg.u64 = 0; |
698 | cfg.s.int_en = 1; |
699 | cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; |
700 | cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; |
701 | |
702 | /* 140 nS glitch filter*/ |
703 | cfg.s.fil_cnt = 7; |
704 | cfg.s.fil_sel = 3; |
705 | |
706 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); |
707 | } |
708 | |
709 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
710 | { |
711 | octeon_irq_gpio_setup(data); |
712 | octeon_irq_ciu_enable_v2(data); |
713 | } |
714 | |
715 | static void octeon_irq_ciu_enable_gpio(struct irq_data *data) |
716 | { |
717 | octeon_irq_gpio_setup(data); |
718 | octeon_irq_ciu_enable(data); |
719 | } |
720 | |
721 | static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) |
722 | { |
723 | irqd_set_trigger_type(d: data, type: t); |
724 | octeon_irq_gpio_setup(data); |
725 | |
726 | if (t & IRQ_TYPE_EDGE_BOTH) |
727 | irq_set_handler_locked(data, handler: handle_edge_irq); |
728 | else |
729 | irq_set_handler_locked(data, handler: handle_level_irq); |
730 | |
731 | return IRQ_SET_MASK_OK; |
732 | } |
733 | |
734 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
735 | { |
736 | struct octeon_ciu_chip_data *cd; |
737 | |
738 | cd = irq_data_get_irq_chip_data(d: data); |
739 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
740 | |
741 | octeon_irq_ciu_disable_all_v2(data); |
742 | } |
743 | |
744 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
745 | { |
746 | struct octeon_ciu_chip_data *cd; |
747 | |
748 | cd = irq_data_get_irq_chip_data(d: data); |
749 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
750 | |
751 | octeon_irq_ciu_disable_all(data); |
752 | } |
753 | |
754 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
755 | { |
756 | struct octeon_ciu_chip_data *cd; |
757 | u64 mask; |
758 | |
759 | cd = irq_data_get_irq_chip_data(d: data); |
760 | mask = 1ull << (cd->gpio_line); |
761 | |
762 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
763 | } |
764 | |
765 | #ifdef CONFIG_SMP |
766 | |
767 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) |
768 | { |
769 | int cpu = smp_processor_id(); |
770 | cpumask_t new_affinity; |
771 | const struct cpumask *mask = irq_data_get_affinity_mask(d: data); |
772 | |
773 | if (!cpumask_test_cpu(cpu, cpumask: mask)) |
774 | return; |
775 | |
776 | if (cpumask_weight(srcp: mask) > 1) { |
777 | /* |
778 | * It has multi CPU affinity, just remove this CPU |
779 | * from the affinity set. |
780 | */ |
781 | cpumask_copy(dstp: &new_affinity, srcp: mask); |
782 | cpumask_clear_cpu(cpu, dstp: &new_affinity); |
783 | } else { |
784 | /* Otherwise, put it on lowest numbered online CPU. */ |
785 | cpumask_clear(dstp: &new_affinity); |
786 | cpumask_set_cpu(cpu: cpumask_first(cpu_online_mask), dstp: &new_affinity); |
787 | } |
788 | irq_set_affinity_locked(data, cpumask: &new_affinity, force: false); |
789 | } |
790 | |
791 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, |
792 | const struct cpumask *dest, bool force) |
793 | { |
794 | int cpu; |
795 | bool enable_one = !irqd_irq_disabled(d: data) && !irqd_irq_masked(d: data); |
796 | unsigned long flags; |
797 | struct octeon_ciu_chip_data *cd; |
798 | unsigned long *pen; |
799 | raw_spinlock_t *lock; |
800 | |
801 | cd = irq_data_get_irq_chip_data(d: data); |
802 | |
803 | /* |
804 | * For non-v2 CIU, we will allow only single CPU affinity. |
805 | * This removes the need to do locking in the .ack/.eoi |
806 | * functions. |
807 | */ |
808 | if (cpumask_weight(srcp: dest) != 1) |
809 | return -EINVAL; |
810 | |
811 | if (!enable_one) |
812 | return 0; |
813 | |
814 | |
815 | for_each_online_cpu(cpu) { |
816 | int coreid = octeon_coreid_for_cpu(cpu); |
817 | |
818 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
819 | raw_spin_lock_irqsave(lock, flags); |
820 | |
821 | if (cd->line == 0) |
822 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
823 | else |
824 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
825 | |
826 | if (cpumask_test_cpu(cpu, cpumask: dest) && enable_one) { |
827 | enable_one = false; |
828 | __set_bit(cd->bit, pen); |
829 | } else { |
830 | __clear_bit(cd->bit, pen); |
831 | } |
832 | /* |
833 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
834 | * enabling the irq. |
835 | */ |
836 | wmb(); |
837 | |
838 | if (cd->line == 0) |
839 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
840 | else |
841 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
842 | |
843 | raw_spin_unlock_irqrestore(lock, flags); |
844 | } |
845 | return 0; |
846 | } |
847 | |
848 | /* |
849 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
850 | * registers. |
851 | */ |
852 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, |
853 | const struct cpumask *dest, |
854 | bool force) |
855 | { |
856 | int cpu; |
857 | bool enable_one = !irqd_irq_disabled(d: data) && !irqd_irq_masked(d: data); |
858 | u64 mask; |
859 | struct octeon_ciu_chip_data *cd; |
860 | |
861 | if (!enable_one) |
862 | return 0; |
863 | |
864 | cd = irq_data_get_irq_chip_data(d: data); |
865 | mask = 1ull << cd->bit; |
866 | |
867 | if (cd->line == 0) { |
868 | for_each_online_cpu(cpu) { |
869 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
870 | int index = octeon_coreid_for_cpu(cpu) * 2; |
871 | if (cpumask_test_cpu(cpu, cpumask: dest) && enable_one) { |
872 | enable_one = false; |
873 | set_bit(nr: cd->bit, addr: pen); |
874 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
875 | } else { |
876 | clear_bit(nr: cd->bit, addr: pen); |
877 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
878 | } |
879 | } |
880 | } else { |
881 | for_each_online_cpu(cpu) { |
882 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
883 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
884 | if (cpumask_test_cpu(cpu, cpumask: dest) && enable_one) { |
885 | enable_one = false; |
886 | set_bit(nr: cd->bit, addr: pen); |
887 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
888 | } else { |
889 | clear_bit(nr: cd->bit, addr: pen); |
890 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
891 | } |
892 | } |
893 | } |
894 | return 0; |
895 | } |
896 | |
897 | static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, |
898 | const struct cpumask *dest, |
899 | bool force) |
900 | { |
901 | int cpu; |
902 | bool enable_one = !irqd_irq_disabled(d: data) && !irqd_irq_masked(d: data); |
903 | u64 mask; |
904 | struct octeon_ciu_chip_data *cd; |
905 | |
906 | if (!enable_one) |
907 | return 0; |
908 | |
909 | cd = irq_data_get_irq_chip_data(d: data); |
910 | mask = 1ull << cd->bit; |
911 | |
912 | for_each_online_cpu(cpu) { |
913 | int index = octeon_coreid_for_cpu(cpu); |
914 | |
915 | if (cpumask_test_cpu(cpu, cpumask: dest) && enable_one) { |
916 | enable_one = false; |
917 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); |
918 | } else { |
919 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); |
920 | } |
921 | } |
922 | return 0; |
923 | } |
924 | #endif |
925 | |
926 | static unsigned int edge_startup(struct irq_data *data) |
927 | { |
928 | /* ack any pending edge-irq at startup, so there is |
929 | * an _edge_ to fire on when the event reappears. |
930 | */ |
931 | data->chip->irq_ack(data); |
932 | data->chip->irq_enable(data); |
933 | return 0; |
934 | } |
935 | |
936 | /* |
937 | * Newer octeon chips have support for lockless CIU operation. |
938 | */ |
939 | static struct irq_chip octeon_irq_chip_ciu_v2 = { |
940 | .name = "CIU" , |
941 | .irq_enable = octeon_irq_ciu_enable_v2, |
942 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
943 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
944 | .irq_unmask = octeon_irq_ciu_enable_v2, |
945 | #ifdef CONFIG_SMP |
946 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
947 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
948 | #endif |
949 | }; |
950 | |
951 | static struct irq_chip octeon_irq_chip_ciu_v2_edge = { |
952 | .name = "CIU" , |
953 | .irq_enable = octeon_irq_ciu_enable_v2, |
954 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
955 | .irq_ack = octeon_irq_ciu_ack, |
956 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
957 | .irq_unmask = octeon_irq_ciu_enable_v2, |
958 | #ifdef CONFIG_SMP |
959 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
960 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
961 | #endif |
962 | }; |
963 | |
964 | /* |
965 | * Newer octeon chips have support for lockless CIU operation. |
966 | */ |
967 | static struct irq_chip octeon_irq_chip_ciu_sum2 = { |
968 | .name = "CIU" , |
969 | .irq_enable = octeon_irq_ciu_enable_sum2, |
970 | .irq_disable = octeon_irq_ciu_disable_all_sum2, |
971 | .irq_mask = octeon_irq_ciu_disable_local_sum2, |
972 | .irq_unmask = octeon_irq_ciu_enable_sum2, |
973 | #ifdef CONFIG_SMP |
974 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, |
975 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
976 | #endif |
977 | }; |
978 | |
979 | static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { |
980 | .name = "CIU" , |
981 | .irq_enable = octeon_irq_ciu_enable_sum2, |
982 | .irq_disable = octeon_irq_ciu_disable_all_sum2, |
983 | .irq_ack = octeon_irq_ciu_ack_sum2, |
984 | .irq_mask = octeon_irq_ciu_disable_local_sum2, |
985 | .irq_unmask = octeon_irq_ciu_enable_sum2, |
986 | #ifdef CONFIG_SMP |
987 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, |
988 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
989 | #endif |
990 | }; |
991 | |
992 | static struct irq_chip octeon_irq_chip_ciu = { |
993 | .name = "CIU" , |
994 | .irq_enable = octeon_irq_ciu_enable, |
995 | .irq_disable = octeon_irq_ciu_disable_all, |
996 | .irq_mask = octeon_irq_ciu_disable_local, |
997 | .irq_unmask = octeon_irq_ciu_enable, |
998 | #ifdef CONFIG_SMP |
999 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
1000 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1001 | #endif |
1002 | }; |
1003 | |
1004 | static struct irq_chip octeon_irq_chip_ciu_edge = { |
1005 | .name = "CIU" , |
1006 | .irq_enable = octeon_irq_ciu_enable, |
1007 | .irq_disable = octeon_irq_ciu_disable_all, |
1008 | .irq_ack = octeon_irq_ciu_ack, |
1009 | .irq_mask = octeon_irq_ciu_disable_local, |
1010 | .irq_unmask = octeon_irq_ciu_enable, |
1011 | #ifdef CONFIG_SMP |
1012 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
1013 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1014 | #endif |
1015 | }; |
1016 | |
1017 | /* The mbox versions don't do any affinity or round-robin. */ |
1018 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { |
1019 | .name = "CIU-M" , |
1020 | .irq_enable = octeon_irq_ciu_enable_all_v2, |
1021 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
1022 | .irq_ack = octeon_irq_ciu_disable_local_v2, |
1023 | .irq_eoi = octeon_irq_ciu_enable_local_v2, |
1024 | |
1025 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, |
1026 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, |
1027 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
1028 | }; |
1029 | |
1030 | static struct irq_chip octeon_irq_chip_ciu_mbox = { |
1031 | .name = "CIU-M" , |
1032 | .irq_enable = octeon_irq_ciu_enable_all, |
1033 | .irq_disable = octeon_irq_ciu_disable_all, |
1034 | .irq_ack = octeon_irq_ciu_disable_local, |
1035 | .irq_eoi = octeon_irq_ciu_enable_local, |
1036 | |
1037 | .irq_cpu_online = octeon_irq_ciu_enable_local, |
1038 | .irq_cpu_offline = octeon_irq_ciu_disable_local, |
1039 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
1040 | }; |
1041 | |
1042 | static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { |
1043 | .name = "CIU-GPIO" , |
1044 | .irq_enable = octeon_irq_ciu_enable_gpio_v2, |
1045 | .irq_disable = octeon_irq_ciu_disable_gpio_v2, |
1046 | .irq_ack = octeon_irq_ciu_gpio_ack, |
1047 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
1048 | .irq_unmask = octeon_irq_ciu_enable_v2, |
1049 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
1050 | #ifdef CONFIG_SMP |
1051 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
1052 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1053 | #endif |
1054 | .flags = IRQCHIP_SET_TYPE_MASKED, |
1055 | }; |
1056 | |
1057 | static struct irq_chip octeon_irq_chip_ciu_gpio = { |
1058 | .name = "CIU-GPIO" , |
1059 | .irq_enable = octeon_irq_ciu_enable_gpio, |
1060 | .irq_disable = octeon_irq_ciu_disable_gpio, |
1061 | .irq_mask = octeon_irq_ciu_disable_local, |
1062 | .irq_unmask = octeon_irq_ciu_enable, |
1063 | .irq_ack = octeon_irq_ciu_gpio_ack, |
1064 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
1065 | #ifdef CONFIG_SMP |
1066 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
1067 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1068 | #endif |
1069 | .flags = IRQCHIP_SET_TYPE_MASKED, |
1070 | }; |
1071 | |
1072 | /* |
1073 | * Watchdog interrupts are special. They are associated with a single |
1074 | * core, so we hardwire the affinity to that core. |
1075 | */ |
1076 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) |
1077 | { |
1078 | unsigned long flags; |
1079 | unsigned long *pen; |
1080 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
1081 | int cpu = octeon_cpu_for_coreid(coreid); |
1082 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
1083 | |
1084 | raw_spin_lock_irqsave(lock, flags); |
1085 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
1086 | __set_bit(coreid, pen); |
1087 | /* |
1088 | * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling |
1089 | * the irq. |
1090 | */ |
1091 | wmb(); |
1092 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
1093 | raw_spin_unlock_irqrestore(lock, flags); |
1094 | } |
1095 | |
1096 | /* |
1097 | * Watchdog interrupts are special. They are associated with a single |
1098 | * core, so we hardwire the affinity to that core. |
1099 | */ |
1100 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) |
1101 | { |
1102 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1103 | int cpu = octeon_cpu_for_coreid(coreid); |
1104 | |
1105 | set_bit(nr: coreid, addr: &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
1106 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); |
1107 | } |
1108 | |
1109 | |
1110 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { |
1111 | .name = "CIU-W" , |
1112 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, |
1113 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
1114 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
1115 | .irq_unmask = octeon_irq_ciu_enable_local_v2, |
1116 | }; |
1117 | |
1118 | static struct irq_chip octeon_irq_chip_ciu_wd = { |
1119 | .name = "CIU-W" , |
1120 | .irq_enable = octeon_irq_ciu_wd_enable, |
1121 | .irq_disable = octeon_irq_ciu_disable_all, |
1122 | .irq_mask = octeon_irq_ciu_disable_local, |
1123 | .irq_unmask = octeon_irq_ciu_enable_local, |
1124 | }; |
1125 | |
1126 | static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) |
1127 | { |
1128 | bool edge = false; |
1129 | |
1130 | if (line == 0) |
1131 | switch (bit) { |
1132 | case 48 ... 49: /* GMX DRP */ |
1133 | case 50: /* IPD_DRP */ |
1134 | case 52 ... 55: /* Timers */ |
1135 | case 58: /* MPI */ |
1136 | edge = true; |
1137 | break; |
1138 | default: |
1139 | break; |
1140 | } |
1141 | else /* line == 1 */ |
1142 | switch (bit) { |
1143 | case 47: /* PTP */ |
1144 | edge = true; |
1145 | break; |
1146 | default: |
1147 | break; |
1148 | } |
1149 | return edge; |
1150 | } |
1151 | |
1152 | struct octeon_irq_gpio_domain_data { |
1153 | unsigned int base_hwirq; |
1154 | }; |
1155 | |
1156 | static int octeon_irq_gpio_xlat(struct irq_domain *d, |
1157 | struct device_node *node, |
1158 | const u32 *intspec, |
1159 | unsigned int intsize, |
1160 | unsigned long *out_hwirq, |
1161 | unsigned int *out_type) |
1162 | { |
1163 | unsigned int type; |
1164 | unsigned int pin; |
1165 | unsigned int trigger; |
1166 | |
1167 | if (irq_domain_get_of_node(d) != node) |
1168 | return -EINVAL; |
1169 | |
1170 | if (intsize < 2) |
1171 | return -EINVAL; |
1172 | |
1173 | pin = intspec[0]; |
1174 | if (pin >= 16) |
1175 | return -EINVAL; |
1176 | |
1177 | trigger = intspec[1]; |
1178 | |
1179 | switch (trigger) { |
1180 | case 1: |
1181 | type = IRQ_TYPE_EDGE_RISING; |
1182 | break; |
1183 | case 2: |
1184 | type = IRQ_TYPE_EDGE_FALLING; |
1185 | break; |
1186 | case 4: |
1187 | type = IRQ_TYPE_LEVEL_HIGH; |
1188 | break; |
1189 | case 8: |
1190 | type = IRQ_TYPE_LEVEL_LOW; |
1191 | break; |
1192 | default: |
1193 | pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n" , |
1194 | node, |
1195 | trigger); |
1196 | type = IRQ_TYPE_LEVEL_LOW; |
1197 | break; |
1198 | } |
1199 | *out_type = type; |
1200 | *out_hwirq = pin; |
1201 | |
1202 | return 0; |
1203 | } |
1204 | |
1205 | static int octeon_irq_ciu_xlat(struct irq_domain *d, |
1206 | struct device_node *node, |
1207 | const u32 *intspec, |
1208 | unsigned int intsize, |
1209 | unsigned long *out_hwirq, |
1210 | unsigned int *out_type) |
1211 | { |
1212 | unsigned int ciu, bit; |
1213 | struct octeon_irq_ciu_domain_data *dd = d->host_data; |
1214 | |
1215 | ciu = intspec[0]; |
1216 | bit = intspec[1]; |
1217 | |
1218 | if (ciu >= dd->num_sum || bit > 63) |
1219 | return -EINVAL; |
1220 | |
1221 | *out_hwirq = (ciu << 6) | bit; |
1222 | *out_type = 0; |
1223 | |
1224 | return 0; |
1225 | } |
1226 | |
1227 | static struct irq_chip *octeon_irq_ciu_chip; |
1228 | static struct irq_chip *octeon_irq_ciu_chip_edge; |
1229 | static struct irq_chip *octeon_irq_gpio_chip; |
1230 | |
1231 | static int octeon_irq_ciu_map(struct irq_domain *d, |
1232 | unsigned int virq, irq_hw_number_t hw) |
1233 | { |
1234 | int rv; |
1235 | unsigned int line = hw >> 6; |
1236 | unsigned int bit = hw & 63; |
1237 | struct octeon_irq_ciu_domain_data *dd = d->host_data; |
1238 | |
1239 | if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) |
1240 | return -EINVAL; |
1241 | |
1242 | if (line == 2) { |
1243 | if (octeon_irq_ciu_is_edge(line, bit)) |
1244 | rv = octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1245 | chip: &octeon_irq_chip_ciu_sum2_edge, |
1246 | handler: handle_edge_irq); |
1247 | else |
1248 | rv = octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1249 | chip: &octeon_irq_chip_ciu_sum2, |
1250 | handler: handle_level_irq); |
1251 | } else { |
1252 | if (octeon_irq_ciu_is_edge(line, bit)) |
1253 | rv = octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1254 | chip: octeon_irq_ciu_chip_edge, |
1255 | handler: handle_edge_irq); |
1256 | else |
1257 | rv = octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1258 | chip: octeon_irq_ciu_chip, |
1259 | handler: handle_level_irq); |
1260 | } |
1261 | return rv; |
1262 | } |
1263 | |
1264 | static int octeon_irq_gpio_map(struct irq_domain *d, |
1265 | unsigned int virq, irq_hw_number_t hw) |
1266 | { |
1267 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
1268 | unsigned int line, bit; |
1269 | int r; |
1270 | |
1271 | line = (hw + gpiod->base_hwirq) >> 6; |
1272 | bit = (hw + gpiod->base_hwirq) & 63; |
1273 | if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) || |
1274 | octeon_irq_ciu_to_irq[line][bit] != 0) |
1275 | return -EINVAL; |
1276 | |
1277 | /* |
1278 | * Default to handle_level_irq. If the DT contains a different |
1279 | * trigger type, it will call the irq_set_type callback and |
1280 | * the handler gets updated. |
1281 | */ |
1282 | r = octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: hw, |
1283 | chip: octeon_irq_gpio_chip, handler: handle_level_irq); |
1284 | return r; |
1285 | } |
1286 | |
1287 | static const struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
1288 | .map = octeon_irq_ciu_map, |
1289 | .unmap = octeon_irq_free_cd, |
1290 | .xlate = octeon_irq_ciu_xlat, |
1291 | }; |
1292 | |
1293 | static const struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
1294 | .map = octeon_irq_gpio_map, |
1295 | .unmap = octeon_irq_free_cd, |
1296 | .xlate = octeon_irq_gpio_xlat, |
1297 | }; |
1298 | |
1299 | static void octeon_irq_ip2_ciu(void) |
1300 | { |
1301 | const unsigned long core_id = cvmx_get_core_num(); |
1302 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
1303 | |
1304 | ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); |
1305 | if (likely(ciu_sum)) { |
1306 | int bit = fls64(x: ciu_sum) - 1; |
1307 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
1308 | if (likely(irq)) |
1309 | do_IRQ(irq); |
1310 | else |
1311 | spurious_interrupt(); |
1312 | } else { |
1313 | spurious_interrupt(); |
1314 | } |
1315 | } |
1316 | |
1317 | static void octeon_irq_ip3_ciu(void) |
1318 | { |
1319 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
1320 | |
1321 | ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); |
1322 | if (likely(ciu_sum)) { |
1323 | int bit = fls64(x: ciu_sum) - 1; |
1324 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
1325 | if (likely(irq)) |
1326 | do_IRQ(irq); |
1327 | else |
1328 | spurious_interrupt(); |
1329 | } else { |
1330 | spurious_interrupt(); |
1331 | } |
1332 | } |
1333 | |
1334 | static void octeon_irq_ip4_ciu(void) |
1335 | { |
1336 | int coreid = cvmx_get_core_num(); |
1337 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); |
1338 | u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); |
1339 | |
1340 | ciu_sum &= ciu_en; |
1341 | if (likely(ciu_sum)) { |
1342 | int bit = fls64(x: ciu_sum) - 1; |
1343 | int irq = octeon_irq_ciu_to_irq[2][bit]; |
1344 | |
1345 | if (likely(irq)) |
1346 | do_IRQ(irq); |
1347 | else |
1348 | spurious_interrupt(); |
1349 | } else { |
1350 | spurious_interrupt(); |
1351 | } |
1352 | } |
1353 | |
1354 | static bool octeon_irq_use_ip4; |
1355 | |
1356 | static void octeon_irq_local_enable_ip4(void *arg) |
1357 | { |
1358 | set_c0_status(STATUSF_IP4); |
1359 | } |
1360 | |
1361 | static void octeon_irq_ip4_mask(void) |
1362 | { |
1363 | clear_c0_status(STATUSF_IP4); |
1364 | spurious_interrupt(); |
1365 | } |
1366 | |
1367 | static void (*octeon_irq_ip2)(void); |
1368 | static void (*octeon_irq_ip3)(void); |
1369 | static void (*octeon_irq_ip4)(void); |
1370 | |
1371 | void (*octeon_irq_setup_secondary)(void); |
1372 | |
1373 | void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) |
1374 | { |
1375 | octeon_irq_ip4 = h; |
1376 | octeon_irq_use_ip4 = true; |
1377 | on_each_cpu(func: octeon_irq_local_enable_ip4, NULL, wait: 1); |
1378 | } |
1379 | |
1380 | static void octeon_irq_percpu_enable(void) |
1381 | { |
1382 | irq_cpu_online(); |
1383 | } |
1384 | |
1385 | static void octeon_irq_init_ciu_percpu(void) |
1386 | { |
1387 | int coreid = cvmx_get_core_num(); |
1388 | |
1389 | |
1390 | __this_cpu_write(octeon_irq_ciu0_en_mirror, 0); |
1391 | __this_cpu_write(octeon_irq_ciu1_en_mirror, 0); |
1392 | wmb(); |
1393 | raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); |
1394 | /* |
1395 | * Disable All CIU Interrupts. The ones we need will be |
1396 | * enabled later. Read the SUM register so we know the write |
1397 | * completed. |
1398 | */ |
1399 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
1400 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
1401 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); |
1402 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
1403 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); |
1404 | } |
1405 | |
1406 | static void octeon_irq_init_ciu2_percpu(void) |
1407 | { |
1408 | u64 regx, ipx; |
1409 | int coreid = cvmx_get_core_num(); |
1410 | u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); |
1411 | |
1412 | /* |
1413 | * Disable All CIU2 Interrupts. The ones we need will be |
1414 | * enabled later. Read the SUM register so we know the write |
1415 | * completed. |
1416 | * |
1417 | * There are 9 registers and 3 IPX levels with strides 0x1000 |
1418 | * and 0x200 respectively. Use loops to clear them. |
1419 | */ |
1420 | for (regx = 0; regx <= 0x8000; regx += 0x1000) { |
1421 | for (ipx = 0; ipx <= 0x400; ipx += 0x200) |
1422 | cvmx_write_csr(base + regx + ipx, 0); |
1423 | } |
1424 | |
1425 | cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); |
1426 | } |
1427 | |
1428 | static void octeon_irq_setup_secondary_ciu(void) |
1429 | { |
1430 | octeon_irq_init_ciu_percpu(); |
1431 | octeon_irq_percpu_enable(); |
1432 | |
1433 | /* Enable the CIU lines */ |
1434 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1435 | if (octeon_irq_use_ip4) |
1436 | set_c0_status(STATUSF_IP4); |
1437 | else |
1438 | clear_c0_status(STATUSF_IP4); |
1439 | } |
1440 | |
1441 | static void octeon_irq_setup_secondary_ciu2(void) |
1442 | { |
1443 | octeon_irq_init_ciu2_percpu(); |
1444 | octeon_irq_percpu_enable(); |
1445 | |
1446 | /* Enable the CIU lines */ |
1447 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1448 | if (octeon_irq_use_ip4) |
1449 | set_c0_status(STATUSF_IP4); |
1450 | else |
1451 | clear_c0_status(STATUSF_IP4); |
1452 | } |
1453 | |
1454 | static int __init octeon_irq_init_ciu( |
1455 | struct device_node *ciu_node, struct device_node *parent) |
1456 | { |
1457 | int i, r; |
1458 | struct irq_chip *chip; |
1459 | struct irq_chip *chip_edge; |
1460 | struct irq_chip *chip_mbox; |
1461 | struct irq_chip *chip_wd; |
1462 | struct irq_domain *ciu_domain = NULL; |
1463 | struct octeon_irq_ciu_domain_data *dd; |
1464 | |
1465 | dd = kzalloc(size: sizeof(*dd), GFP_KERNEL); |
1466 | if (!dd) |
1467 | return -ENOMEM; |
1468 | |
1469 | octeon_irq_init_ciu_percpu(); |
1470 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
1471 | |
1472 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
1473 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
1474 | if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) |
1475 | && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
1476 | octeon_irq_ip4 = octeon_irq_ip4_ciu; |
1477 | dd->num_sum = 3; |
1478 | octeon_irq_use_ip4 = true; |
1479 | } else { |
1480 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
1481 | dd->num_sum = 2; |
1482 | octeon_irq_use_ip4 = false; |
1483 | } |
1484 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
1485 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
1486 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
1487 | OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { |
1488 | chip = &octeon_irq_chip_ciu_v2; |
1489 | chip_edge = &octeon_irq_chip_ciu_v2_edge; |
1490 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; |
1491 | chip_wd = &octeon_irq_chip_ciu_wd_v2; |
1492 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; |
1493 | } else { |
1494 | chip = &octeon_irq_chip_ciu; |
1495 | chip_edge = &octeon_irq_chip_ciu_edge; |
1496 | chip_mbox = &octeon_irq_chip_ciu_mbox; |
1497 | chip_wd = &octeon_irq_chip_ciu_wd; |
1498 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; |
1499 | } |
1500 | octeon_irq_ciu_chip = chip; |
1501 | octeon_irq_ciu_chip_edge = chip_edge; |
1502 | |
1503 | /* Mips internal */ |
1504 | octeon_irq_init_core(); |
1505 | |
1506 | ciu_domain = irq_domain_add_tree( |
1507 | of_node: ciu_node, ops: &octeon_irq_domain_ciu_ops, host_data: dd); |
1508 | irq_set_default_host(host: ciu_domain); |
1509 | |
1510 | /* CIU_0 */ |
1511 | for (i = 0; i < 16; i++) { |
1512 | r = octeon_irq_force_ciu_mapping( |
1513 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); |
1514 | if (r) |
1515 | goto err; |
1516 | } |
1517 | |
1518 | r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1); |
1519 | if (r < 0) { |
1520 | pr_err("Failed to allocate desc for %s\n" , "OCTEON_IRQ_MBOX0" ); |
1521 | goto err; |
1522 | } |
1523 | r = octeon_irq_set_ciu_mapping( |
1524 | OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); |
1525 | if (r) |
1526 | goto err; |
1527 | r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1); |
1528 | if (r < 0) { |
1529 | pr_err("Failed to allocate desc for %s\n" , "OCTEON_IRQ_MBOX1" ); |
1530 | goto err; |
1531 | } |
1532 | r = octeon_irq_set_ciu_mapping( |
1533 | OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); |
1534 | if (r) |
1535 | goto err; |
1536 | |
1537 | for (i = 0; i < 4; i++) { |
1538 | r = octeon_irq_force_ciu_mapping( |
1539 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); |
1540 | if (r) |
1541 | goto err; |
1542 | } |
1543 | for (i = 0; i < 4; i++) { |
1544 | r = octeon_irq_force_ciu_mapping( |
1545 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); |
1546 | if (r) |
1547 | goto err; |
1548 | } |
1549 | |
1550 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); |
1551 | if (r) |
1552 | goto err; |
1553 | |
1554 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
1555 | if (r) |
1556 | goto err; |
1557 | |
1558 | for (i = 0; i < 4; i++) { |
1559 | r = octeon_irq_force_ciu_mapping( |
1560 | ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
1561 | if (r) |
1562 | goto err; |
1563 | } |
1564 | |
1565 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); |
1566 | if (r) |
1567 | goto err; |
1568 | |
1569 | r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1); |
1570 | if (r < 0) { |
1571 | pr_err("Failed to allocate desc for %s\n" , "OCTEON_IRQ_WDOGx" ); |
1572 | goto err; |
1573 | } |
1574 | /* CIU_1 */ |
1575 | for (i = 0; i < 16; i++) { |
1576 | r = octeon_irq_set_ciu_mapping( |
1577 | i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, |
1578 | handle_level_irq); |
1579 | if (r) |
1580 | goto err; |
1581 | } |
1582 | |
1583 | /* Enable the CIU lines */ |
1584 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1585 | if (octeon_irq_use_ip4) |
1586 | set_c0_status(STATUSF_IP4); |
1587 | else |
1588 | clear_c0_status(STATUSF_IP4); |
1589 | |
1590 | return 0; |
1591 | err: |
1592 | return r; |
1593 | } |
1594 | |
1595 | static int __init octeon_irq_init_gpio( |
1596 | struct device_node *gpio_node, struct device_node *parent) |
1597 | { |
1598 | struct octeon_irq_gpio_domain_data *gpiod; |
1599 | u32 interrupt_cells; |
1600 | unsigned int base_hwirq; |
1601 | int r; |
1602 | |
1603 | r = of_property_read_u32(np: parent, propname: "#interrupt-cells" , out_value: &interrupt_cells); |
1604 | if (r) |
1605 | return r; |
1606 | |
1607 | if (interrupt_cells == 1) { |
1608 | u32 v; |
1609 | |
1610 | r = of_property_read_u32_index(np: gpio_node, propname: "interrupts" , index: 0, out_value: &v); |
1611 | if (r) { |
1612 | pr_warn("No \"interrupts\" property.\n" ); |
1613 | return r; |
1614 | } |
1615 | base_hwirq = v; |
1616 | } else if (interrupt_cells == 2) { |
1617 | u32 v0, v1; |
1618 | |
1619 | r = of_property_read_u32_index(np: gpio_node, propname: "interrupts" , index: 0, out_value: &v0); |
1620 | if (r) { |
1621 | pr_warn("No \"interrupts\" property.\n" ); |
1622 | return r; |
1623 | } |
1624 | r = of_property_read_u32_index(np: gpio_node, propname: "interrupts" , index: 1, out_value: &v1); |
1625 | if (r) { |
1626 | pr_warn("No \"interrupts\" property.\n" ); |
1627 | return r; |
1628 | } |
1629 | base_hwirq = (v0 << 6) | v1; |
1630 | } else { |
1631 | pr_warn("Bad \"#interrupt-cells\" property: %u\n" , |
1632 | interrupt_cells); |
1633 | return -EINVAL; |
1634 | } |
1635 | |
1636 | gpiod = kzalloc(size: sizeof(*gpiod), GFP_KERNEL); |
1637 | if (gpiod) { |
1638 | /* gpio domain host_data is the base hwirq number. */ |
1639 | gpiod->base_hwirq = base_hwirq; |
1640 | irq_domain_add_linear( |
1641 | of_node: gpio_node, size: 16, ops: &octeon_irq_domain_gpio_ops, host_data: gpiod); |
1642 | } else { |
1643 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n" ); |
1644 | return -ENOMEM; |
1645 | } |
1646 | |
1647 | /* |
1648 | * Clear the OF_POPULATED flag that was set by of_irq_init() |
1649 | * so that all GPIO devices will be probed. |
1650 | */ |
1651 | of_node_clear_flag(n: gpio_node, OF_POPULATED); |
1652 | |
1653 | return 0; |
1654 | } |
1655 | /* |
1656 | * Watchdog interrupts are special. They are associated with a single |
1657 | * core, so we hardwire the affinity to that core. |
1658 | */ |
1659 | static void octeon_irq_ciu2_wd_enable(struct irq_data *data) |
1660 | { |
1661 | u64 mask; |
1662 | u64 en_addr; |
1663 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1664 | struct octeon_ciu_chip_data *cd; |
1665 | |
1666 | cd = irq_data_get_irq_chip_data(d: data); |
1667 | mask = 1ull << (cd->bit); |
1668 | |
1669 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1670 | (0x1000ull * cd->line); |
1671 | cvmx_write_csr(en_addr, mask); |
1672 | |
1673 | } |
1674 | |
1675 | static void octeon_irq_ciu2_enable(struct irq_data *data) |
1676 | { |
1677 | u64 mask; |
1678 | u64 en_addr; |
1679 | int cpu = next_cpu_for_irq(data); |
1680 | int coreid = octeon_coreid_for_cpu(cpu); |
1681 | struct octeon_ciu_chip_data *cd; |
1682 | |
1683 | cd = irq_data_get_irq_chip_data(d: data); |
1684 | mask = 1ull << (cd->bit); |
1685 | |
1686 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1687 | (0x1000ull * cd->line); |
1688 | cvmx_write_csr(en_addr, mask); |
1689 | } |
1690 | |
1691 | static void octeon_irq_ciu2_enable_local(struct irq_data *data) |
1692 | { |
1693 | u64 mask; |
1694 | u64 en_addr; |
1695 | int coreid = cvmx_get_core_num(); |
1696 | struct octeon_ciu_chip_data *cd; |
1697 | |
1698 | cd = irq_data_get_irq_chip_data(d: data); |
1699 | mask = 1ull << (cd->bit); |
1700 | |
1701 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1702 | (0x1000ull * cd->line); |
1703 | cvmx_write_csr(en_addr, mask); |
1704 | |
1705 | } |
1706 | |
1707 | static void octeon_irq_ciu2_disable_local(struct irq_data *data) |
1708 | { |
1709 | u64 mask; |
1710 | u64 en_addr; |
1711 | int coreid = cvmx_get_core_num(); |
1712 | struct octeon_ciu_chip_data *cd; |
1713 | |
1714 | cd = irq_data_get_irq_chip_data(d: data); |
1715 | mask = 1ull << (cd->bit); |
1716 | |
1717 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + |
1718 | (0x1000ull * cd->line); |
1719 | cvmx_write_csr(en_addr, mask); |
1720 | |
1721 | } |
1722 | |
1723 | static void octeon_irq_ciu2_ack(struct irq_data *data) |
1724 | { |
1725 | u64 mask; |
1726 | u64 en_addr; |
1727 | int coreid = cvmx_get_core_num(); |
1728 | struct octeon_ciu_chip_data *cd; |
1729 | |
1730 | cd = irq_data_get_irq_chip_data(d: data); |
1731 | mask = 1ull << (cd->bit); |
1732 | |
1733 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); |
1734 | cvmx_write_csr(en_addr, mask); |
1735 | |
1736 | } |
1737 | |
1738 | static void octeon_irq_ciu2_disable_all(struct irq_data *data) |
1739 | { |
1740 | int cpu; |
1741 | u64 mask; |
1742 | struct octeon_ciu_chip_data *cd; |
1743 | |
1744 | cd = irq_data_get_irq_chip_data(d: data); |
1745 | mask = 1ull << (cd->bit); |
1746 | |
1747 | for_each_online_cpu(cpu) { |
1748 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1749 | octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); |
1750 | cvmx_write_csr(en_addr, mask); |
1751 | } |
1752 | } |
1753 | |
1754 | static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) |
1755 | { |
1756 | int cpu; |
1757 | u64 mask; |
1758 | |
1759 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1760 | |
1761 | for_each_online_cpu(cpu) { |
1762 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( |
1763 | octeon_coreid_for_cpu(cpu)); |
1764 | cvmx_write_csr(en_addr, mask); |
1765 | } |
1766 | } |
1767 | |
1768 | static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) |
1769 | { |
1770 | int cpu; |
1771 | u64 mask; |
1772 | |
1773 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1774 | |
1775 | for_each_online_cpu(cpu) { |
1776 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( |
1777 | octeon_coreid_for_cpu(cpu)); |
1778 | cvmx_write_csr(en_addr, mask); |
1779 | } |
1780 | } |
1781 | |
1782 | static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) |
1783 | { |
1784 | u64 mask; |
1785 | u64 en_addr; |
1786 | int coreid = cvmx_get_core_num(); |
1787 | |
1788 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1789 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); |
1790 | cvmx_write_csr(en_addr, mask); |
1791 | } |
1792 | |
1793 | static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) |
1794 | { |
1795 | u64 mask; |
1796 | u64 en_addr; |
1797 | int coreid = cvmx_get_core_num(); |
1798 | |
1799 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1800 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); |
1801 | cvmx_write_csr(en_addr, mask); |
1802 | } |
1803 | |
1804 | #ifdef CONFIG_SMP |
1805 | static int octeon_irq_ciu2_set_affinity(struct irq_data *data, |
1806 | const struct cpumask *dest, bool force) |
1807 | { |
1808 | int cpu; |
1809 | bool enable_one = !irqd_irq_disabled(d: data) && !irqd_irq_masked(d: data); |
1810 | u64 mask; |
1811 | struct octeon_ciu_chip_data *cd; |
1812 | |
1813 | if (!enable_one) |
1814 | return 0; |
1815 | |
1816 | cd = irq_data_get_irq_chip_data(d: data); |
1817 | mask = 1ull << cd->bit; |
1818 | |
1819 | for_each_online_cpu(cpu) { |
1820 | u64 en_addr; |
1821 | if (cpumask_test_cpu(cpu, cpumask: dest) && enable_one) { |
1822 | enable_one = false; |
1823 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( |
1824 | octeon_coreid_for_cpu(cpu)) + |
1825 | (0x1000ull * cd->line); |
1826 | } else { |
1827 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1828 | octeon_coreid_for_cpu(cpu)) + |
1829 | (0x1000ull * cd->line); |
1830 | } |
1831 | cvmx_write_csr(en_addr, mask); |
1832 | } |
1833 | |
1834 | return 0; |
1835 | } |
1836 | #endif |
1837 | |
1838 | static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) |
1839 | { |
1840 | octeon_irq_gpio_setup(data); |
1841 | octeon_irq_ciu2_enable(data); |
1842 | } |
1843 | |
1844 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
1845 | { |
1846 | struct octeon_ciu_chip_data *cd; |
1847 | |
1848 | cd = irq_data_get_irq_chip_data(d: data); |
1849 | |
1850 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
1851 | |
1852 | octeon_irq_ciu2_disable_all(data); |
1853 | } |
1854 | |
1855 | static struct irq_chip octeon_irq_chip_ciu2 = { |
1856 | .name = "CIU2-E" , |
1857 | .irq_enable = octeon_irq_ciu2_enable, |
1858 | .irq_disable = octeon_irq_ciu2_disable_all, |
1859 | .irq_mask = octeon_irq_ciu2_disable_local, |
1860 | .irq_unmask = octeon_irq_ciu2_enable, |
1861 | #ifdef CONFIG_SMP |
1862 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1863 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1864 | #endif |
1865 | }; |
1866 | |
1867 | static struct irq_chip octeon_irq_chip_ciu2_edge = { |
1868 | .name = "CIU2-E" , |
1869 | .irq_enable = octeon_irq_ciu2_enable, |
1870 | .irq_disable = octeon_irq_ciu2_disable_all, |
1871 | .irq_ack = octeon_irq_ciu2_ack, |
1872 | .irq_mask = octeon_irq_ciu2_disable_local, |
1873 | .irq_unmask = octeon_irq_ciu2_enable, |
1874 | #ifdef CONFIG_SMP |
1875 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1876 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1877 | #endif |
1878 | }; |
1879 | |
1880 | static struct irq_chip octeon_irq_chip_ciu2_mbox = { |
1881 | .name = "CIU2-M" , |
1882 | .irq_enable = octeon_irq_ciu2_mbox_enable_all, |
1883 | .irq_disable = octeon_irq_ciu2_mbox_disable_all, |
1884 | .irq_ack = octeon_irq_ciu2_mbox_disable_local, |
1885 | .irq_eoi = octeon_irq_ciu2_mbox_enable_local, |
1886 | |
1887 | .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, |
1888 | .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, |
1889 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
1890 | }; |
1891 | |
1892 | static struct irq_chip octeon_irq_chip_ciu2_wd = { |
1893 | .name = "CIU2-W" , |
1894 | .irq_enable = octeon_irq_ciu2_wd_enable, |
1895 | .irq_disable = octeon_irq_ciu2_disable_all, |
1896 | .irq_mask = octeon_irq_ciu2_disable_local, |
1897 | .irq_unmask = octeon_irq_ciu2_enable_local, |
1898 | }; |
1899 | |
1900 | static struct irq_chip octeon_irq_chip_ciu2_gpio = { |
1901 | .name = "CIU-GPIO" , |
1902 | .irq_enable = octeon_irq_ciu2_enable_gpio, |
1903 | .irq_disable = octeon_irq_ciu2_disable_gpio, |
1904 | .irq_ack = octeon_irq_ciu_gpio_ack, |
1905 | .irq_mask = octeon_irq_ciu2_disable_local, |
1906 | .irq_unmask = octeon_irq_ciu2_enable, |
1907 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
1908 | #ifdef CONFIG_SMP |
1909 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1910 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1911 | #endif |
1912 | .flags = IRQCHIP_SET_TYPE_MASKED, |
1913 | }; |
1914 | |
1915 | static int octeon_irq_ciu2_xlat(struct irq_domain *d, |
1916 | struct device_node *node, |
1917 | const u32 *intspec, |
1918 | unsigned int intsize, |
1919 | unsigned long *out_hwirq, |
1920 | unsigned int *out_type) |
1921 | { |
1922 | unsigned int ciu, bit; |
1923 | |
1924 | ciu = intspec[0]; |
1925 | bit = intspec[1]; |
1926 | |
1927 | *out_hwirq = (ciu << 6) | bit; |
1928 | *out_type = 0; |
1929 | |
1930 | return 0; |
1931 | } |
1932 | |
1933 | static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) |
1934 | { |
1935 | bool edge = false; |
1936 | |
1937 | if (line == 3) /* MIO */ |
1938 | switch (bit) { |
1939 | case 2: /* IPD_DRP */ |
1940 | case 8 ... 11: /* Timers */ |
1941 | case 48: /* PTP */ |
1942 | edge = true; |
1943 | break; |
1944 | default: |
1945 | break; |
1946 | } |
1947 | else if (line == 6) /* PKT */ |
1948 | switch (bit) { |
1949 | case 52 ... 53: /* ILK_DRP */ |
1950 | case 8 ... 12: /* GMX_DRP */ |
1951 | edge = true; |
1952 | break; |
1953 | default: |
1954 | break; |
1955 | } |
1956 | return edge; |
1957 | } |
1958 | |
1959 | static int octeon_irq_ciu2_map(struct irq_domain *d, |
1960 | unsigned int virq, irq_hw_number_t hw) |
1961 | { |
1962 | unsigned int line = hw >> 6; |
1963 | unsigned int bit = hw & 63; |
1964 | |
1965 | /* |
1966 | * Don't map irq if it is reserved for GPIO. |
1967 | * (Line 7 are the GPIO lines.) |
1968 | */ |
1969 | if (line == 7) |
1970 | return 0; |
1971 | |
1972 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1973 | return -EINVAL; |
1974 | |
1975 | if (octeon_irq_ciu2_is_edge(line, bit)) |
1976 | octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1977 | chip: &octeon_irq_chip_ciu2_edge, |
1978 | handler: handle_edge_irq); |
1979 | else |
1980 | octeon_irq_set_ciu_mapping(irq: virq, line, bit, gpio_line: 0, |
1981 | chip: &octeon_irq_chip_ciu2, |
1982 | handler: handle_level_irq); |
1983 | |
1984 | return 0; |
1985 | } |
1986 | |
1987 | static const struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
1988 | .map = octeon_irq_ciu2_map, |
1989 | .unmap = octeon_irq_free_cd, |
1990 | .xlate = octeon_irq_ciu2_xlat, |
1991 | }; |
1992 | |
1993 | static void octeon_irq_ciu2(void) |
1994 | { |
1995 | int line; |
1996 | int bit; |
1997 | int irq; |
1998 | u64 src_reg, src, sum; |
1999 | const unsigned long core_id = cvmx_get_core_num(); |
2000 | |
2001 | sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; |
2002 | |
2003 | if (unlikely(!sum)) |
2004 | goto spurious; |
2005 | |
2006 | line = fls64(x: sum) - 1; |
2007 | src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); |
2008 | src = cvmx_read_csr(src_reg); |
2009 | |
2010 | if (unlikely(!src)) |
2011 | goto spurious; |
2012 | |
2013 | bit = fls64(x: src) - 1; |
2014 | irq = octeon_irq_ciu_to_irq[line][bit]; |
2015 | if (unlikely(!irq)) |
2016 | goto spurious; |
2017 | |
2018 | do_IRQ(irq); |
2019 | goto out; |
2020 | |
2021 | spurious: |
2022 | spurious_interrupt(); |
2023 | out: |
2024 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
2025 | can stop interrupts from propagating */ |
2026 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
2027 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
2028 | else |
2029 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); |
2030 | return; |
2031 | } |
2032 | |
2033 | static void octeon_irq_ciu2_mbox(void) |
2034 | { |
2035 | int line; |
2036 | |
2037 | const unsigned long core_id = cvmx_get_core_num(); |
2038 | u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; |
2039 | |
2040 | if (unlikely(!sum)) |
2041 | goto spurious; |
2042 | |
2043 | line = fls64(x: sum) - 1; |
2044 | |
2045 | do_IRQ(OCTEON_IRQ_MBOX0 + line); |
2046 | goto out; |
2047 | |
2048 | spurious: |
2049 | spurious_interrupt(); |
2050 | out: |
2051 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
2052 | can stop interrupts from propagating */ |
2053 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
2054 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
2055 | else |
2056 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); |
2057 | return; |
2058 | } |
2059 | |
2060 | static int __init octeon_irq_init_ciu2( |
2061 | struct device_node *ciu_node, struct device_node *parent) |
2062 | { |
2063 | unsigned int i, r; |
2064 | struct irq_domain *ciu_domain = NULL; |
2065 | |
2066 | octeon_irq_init_ciu2_percpu(); |
2067 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
2068 | |
2069 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; |
2070 | octeon_irq_ip2 = octeon_irq_ciu2; |
2071 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
2072 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
2073 | |
2074 | /* Mips internal */ |
2075 | octeon_irq_init_core(); |
2076 | |
2077 | ciu_domain = irq_domain_add_tree( |
2078 | of_node: ciu_node, ops: &octeon_irq_domain_ciu2_ops, NULL); |
2079 | irq_set_default_host(host: ciu_domain); |
2080 | |
2081 | /* CUI2 */ |
2082 | for (i = 0; i < 64; i++) { |
2083 | r = octeon_irq_force_ciu_mapping( |
2084 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); |
2085 | if (r) |
2086 | goto err; |
2087 | } |
2088 | |
2089 | for (i = 0; i < 32; i++) { |
2090 | r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
2091 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
2092 | if (r) |
2093 | goto err; |
2094 | } |
2095 | |
2096 | for (i = 0; i < 4; i++) { |
2097 | r = octeon_irq_force_ciu_mapping( |
2098 | ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); |
2099 | if (r) |
2100 | goto err; |
2101 | } |
2102 | |
2103 | for (i = 0; i < 4; i++) { |
2104 | r = octeon_irq_force_ciu_mapping( |
2105 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); |
2106 | if (r) |
2107 | goto err; |
2108 | } |
2109 | |
2110 | for (i = 0; i < 4; i++) { |
2111 | r = octeon_irq_force_ciu_mapping( |
2112 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); |
2113 | if (r) |
2114 | goto err; |
2115 | } |
2116 | |
2117 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
2118 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
2119 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
2120 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
2121 | |
2122 | /* Enable the CIU lines */ |
2123 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
2124 | clear_c0_status(STATUSF_IP4); |
2125 | return 0; |
2126 | err: |
2127 | return r; |
2128 | } |
2129 | |
2130 | struct octeon_irq_cib_host_data { |
2131 | raw_spinlock_t lock; |
2132 | u64 raw_reg; |
2133 | u64 en_reg; |
2134 | int max_bits; |
2135 | }; |
2136 | |
2137 | struct octeon_irq_cib_chip_data { |
2138 | struct octeon_irq_cib_host_data *host_data; |
2139 | int bit; |
2140 | }; |
2141 | |
2142 | static void octeon_irq_cib_enable(struct irq_data *data) |
2143 | { |
2144 | unsigned long flags; |
2145 | u64 en; |
2146 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
2147 | struct octeon_irq_cib_host_data *host_data = cd->host_data; |
2148 | |
2149 | raw_spin_lock_irqsave(&host_data->lock, flags); |
2150 | en = cvmx_read_csr(host_data->en_reg); |
2151 | en |= 1ull << cd->bit; |
2152 | cvmx_write_csr(host_data->en_reg, en); |
2153 | raw_spin_unlock_irqrestore(&host_data->lock, flags); |
2154 | } |
2155 | |
2156 | static void octeon_irq_cib_disable(struct irq_data *data) |
2157 | { |
2158 | unsigned long flags; |
2159 | u64 en; |
2160 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
2161 | struct octeon_irq_cib_host_data *host_data = cd->host_data; |
2162 | |
2163 | raw_spin_lock_irqsave(&host_data->lock, flags); |
2164 | en = cvmx_read_csr(host_data->en_reg); |
2165 | en &= ~(1ull << cd->bit); |
2166 | cvmx_write_csr(host_data->en_reg, en); |
2167 | raw_spin_unlock_irqrestore(&host_data->lock, flags); |
2168 | } |
2169 | |
2170 | static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) |
2171 | { |
2172 | irqd_set_trigger_type(d: data, type: t); |
2173 | return IRQ_SET_MASK_OK; |
2174 | } |
2175 | |
2176 | static struct irq_chip octeon_irq_chip_cib = { |
2177 | .name = "CIB" , |
2178 | .irq_enable = octeon_irq_cib_enable, |
2179 | .irq_disable = octeon_irq_cib_disable, |
2180 | .irq_mask = octeon_irq_cib_disable, |
2181 | .irq_unmask = octeon_irq_cib_enable, |
2182 | .irq_set_type = octeon_irq_cib_set_type, |
2183 | }; |
2184 | |
2185 | static int octeon_irq_cib_xlat(struct irq_domain *d, |
2186 | struct device_node *node, |
2187 | const u32 *intspec, |
2188 | unsigned int intsize, |
2189 | unsigned long *out_hwirq, |
2190 | unsigned int *out_type) |
2191 | { |
2192 | unsigned int type = 0; |
2193 | |
2194 | if (intsize == 2) |
2195 | type = intspec[1]; |
2196 | |
2197 | switch (type) { |
2198 | case 0: /* unofficial value, but we might as well let it work. */ |
2199 | case 4: /* official value for level triggering. */ |
2200 | *out_type = IRQ_TYPE_LEVEL_HIGH; |
2201 | break; |
2202 | case 1: /* official value for edge triggering. */ |
2203 | *out_type = IRQ_TYPE_EDGE_RISING; |
2204 | break; |
2205 | default: /* Nothing else is acceptable. */ |
2206 | return -EINVAL; |
2207 | } |
2208 | |
2209 | *out_hwirq = intspec[0]; |
2210 | |
2211 | return 0; |
2212 | } |
2213 | |
2214 | static int octeon_irq_cib_map(struct irq_domain *d, |
2215 | unsigned int virq, irq_hw_number_t hw) |
2216 | { |
2217 | struct octeon_irq_cib_host_data *host_data = d->host_data; |
2218 | struct octeon_irq_cib_chip_data *cd; |
2219 | |
2220 | if (hw >= host_data->max_bits) { |
2221 | pr_err("ERROR: %s mapping %u is too big!\n" , |
2222 | irq_domain_get_of_node(d)->name, (unsigned)hw); |
2223 | return -EINVAL; |
2224 | } |
2225 | |
2226 | cd = kzalloc(size: sizeof(*cd), GFP_KERNEL); |
2227 | if (!cd) |
2228 | return -ENOMEM; |
2229 | |
2230 | cd->host_data = host_data; |
2231 | cd->bit = hw; |
2232 | |
2233 | irq_set_chip_and_handler(irq: virq, chip: &octeon_irq_chip_cib, |
2234 | handle: handle_simple_irq); |
2235 | irq_set_chip_data(irq: virq, data: cd); |
2236 | return 0; |
2237 | } |
2238 | |
2239 | static const struct irq_domain_ops octeon_irq_domain_cib_ops = { |
2240 | .map = octeon_irq_cib_map, |
2241 | .unmap = octeon_irq_free_cd, |
2242 | .xlate = octeon_irq_cib_xlat, |
2243 | }; |
2244 | |
2245 | /* Chain to real handler. */ |
2246 | static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) |
2247 | { |
2248 | u64 en; |
2249 | u64 raw; |
2250 | u64 bits; |
2251 | int i; |
2252 | int irq; |
2253 | struct irq_domain *cib_domain = data; |
2254 | struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; |
2255 | |
2256 | en = cvmx_read_csr(host_data->en_reg); |
2257 | raw = cvmx_read_csr(host_data->raw_reg); |
2258 | |
2259 | bits = en & raw; |
2260 | |
2261 | for (i = 0; i < host_data->max_bits; i++) { |
2262 | if ((bits & 1ull << i) == 0) |
2263 | continue; |
2264 | irq = irq_find_mapping(domain: cib_domain, hwirq: i); |
2265 | if (!irq) { |
2266 | unsigned long flags; |
2267 | |
2268 | pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n" , |
2269 | i, host_data->raw_reg); |
2270 | raw_spin_lock_irqsave(&host_data->lock, flags); |
2271 | en = cvmx_read_csr(host_data->en_reg); |
2272 | en &= ~(1ull << i); |
2273 | cvmx_write_csr(host_data->en_reg, en); |
2274 | cvmx_write_csr(host_data->raw_reg, 1ull << i); |
2275 | raw_spin_unlock_irqrestore(&host_data->lock, flags); |
2276 | } else { |
2277 | struct irq_desc *desc = irq_to_desc(irq); |
2278 | struct irq_data *irq_data = irq_desc_get_irq_data(desc); |
2279 | /* If edge, acknowledge the bit we will be sending. */ |
2280 | if (irqd_get_trigger_type(d: irq_data) & |
2281 | IRQ_TYPE_EDGE_BOTH) |
2282 | cvmx_write_csr(host_data->raw_reg, 1ull << i); |
2283 | generic_handle_irq_desc(desc); |
2284 | } |
2285 | } |
2286 | |
2287 | return IRQ_HANDLED; |
2288 | } |
2289 | |
2290 | static int __init octeon_irq_init_cib(struct device_node *ciu_node, |
2291 | struct device_node *parent) |
2292 | { |
2293 | struct resource res; |
2294 | u32 val; |
2295 | struct octeon_irq_cib_host_data *host_data; |
2296 | int parent_irq; |
2297 | int r; |
2298 | struct irq_domain *cib_domain; |
2299 | |
2300 | parent_irq = irq_of_parse_and_map(node: ciu_node, index: 0); |
2301 | if (!parent_irq) { |
2302 | pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n" , |
2303 | ciu_node); |
2304 | return -EINVAL; |
2305 | } |
2306 | |
2307 | host_data = kzalloc(size: sizeof(*host_data), GFP_KERNEL); |
2308 | if (!host_data) |
2309 | return -ENOMEM; |
2310 | raw_spin_lock_init(&host_data->lock); |
2311 | |
2312 | r = of_address_to_resource(dev: ciu_node, index: 0, r: &res); |
2313 | if (r) { |
2314 | pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n" , ciu_node); |
2315 | return r; |
2316 | } |
2317 | host_data->raw_reg = (u64)phys_to_virt(address: res.start); |
2318 | |
2319 | r = of_address_to_resource(dev: ciu_node, index: 1, r: &res); |
2320 | if (r) { |
2321 | pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n" , ciu_node); |
2322 | return r; |
2323 | } |
2324 | host_data->en_reg = (u64)phys_to_virt(address: res.start); |
2325 | |
2326 | r = of_property_read_u32(np: ciu_node, propname: "cavium,max-bits" , out_value: &val); |
2327 | if (r) { |
2328 | pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n" , |
2329 | ciu_node); |
2330 | return r; |
2331 | } |
2332 | host_data->max_bits = val; |
2333 | |
2334 | cib_domain = irq_domain_add_linear(of_node: ciu_node, size: host_data->max_bits, |
2335 | ops: &octeon_irq_domain_cib_ops, |
2336 | host_data); |
2337 | if (!cib_domain) { |
2338 | pr_err("ERROR: Couldn't irq_domain_add_linear()\n" ); |
2339 | return -ENOMEM; |
2340 | } |
2341 | |
2342 | cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ |
2343 | cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ |
2344 | |
2345 | r = request_irq(irq: parent_irq, handler: octeon_irq_cib_handler, |
2346 | IRQF_NO_THREAD, name: "cib" , dev: cib_domain); |
2347 | if (r) { |
2348 | pr_err("request_irq cib failed %d\n" , r); |
2349 | return r; |
2350 | } |
2351 | pr_info("CIB interrupt controller probed: %llx %d\n" , |
2352 | host_data->raw_reg, host_data->max_bits); |
2353 | return 0; |
2354 | } |
2355 | |
2356 | int octeon_irq_ciu3_xlat(struct irq_domain *d, |
2357 | struct device_node *node, |
2358 | const u32 *intspec, |
2359 | unsigned int intsize, |
2360 | unsigned long *out_hwirq, |
2361 | unsigned int *out_type) |
2362 | { |
2363 | struct octeon_ciu3_info *ciu3_info = d->host_data; |
2364 | unsigned int hwirq, type, intsn_major; |
2365 | union cvmx_ciu3_iscx_ctl isc; |
2366 | |
2367 | if (intsize < 2) |
2368 | return -EINVAL; |
2369 | hwirq = intspec[0]; |
2370 | type = intspec[1]; |
2371 | |
2372 | if (hwirq >= (1 << 20)) |
2373 | return -EINVAL; |
2374 | |
2375 | intsn_major = hwirq >> 12; |
2376 | switch (intsn_major) { |
2377 | case 0x04: /* Software handled separately. */ |
2378 | return -EINVAL; |
2379 | default: |
2380 | break; |
2381 | } |
2382 | |
2383 | isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq)); |
2384 | if (!isc.s.imp) |
2385 | return -EINVAL; |
2386 | |
2387 | switch (type) { |
2388 | case 4: /* official value for level triggering. */ |
2389 | *out_type = IRQ_TYPE_LEVEL_HIGH; |
2390 | break; |
2391 | case 0: /* unofficial value, but we might as well let it work. */ |
2392 | case 1: /* official value for edge triggering. */ |
2393 | *out_type = IRQ_TYPE_EDGE_RISING; |
2394 | break; |
2395 | default: /* Nothing else is acceptable. */ |
2396 | return -EINVAL; |
2397 | } |
2398 | |
2399 | *out_hwirq = hwirq; |
2400 | |
2401 | return 0; |
2402 | } |
2403 | |
2404 | void octeon_irq_ciu3_enable(struct irq_data *data) |
2405 | { |
2406 | int cpu; |
2407 | union cvmx_ciu3_iscx_ctl isc_ctl; |
2408 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2409 | u64 isc_ctl_addr; |
2410 | |
2411 | struct octeon_ciu_chip_data *cd; |
2412 | |
2413 | cpu = next_cpu_for_irq(data); |
2414 | |
2415 | cd = irq_data_get_irq_chip_data(d: data); |
2416 | |
2417 | isc_w1c.u64 = 0; |
2418 | isc_w1c.s.en = 1; |
2419 | cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64); |
2420 | |
2421 | isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn); |
2422 | isc_ctl.u64 = 0; |
2423 | isc_ctl.s.en = 1; |
2424 | isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu); |
2425 | cvmx_write_csr(isc_ctl_addr, isc_ctl.u64); |
2426 | cvmx_read_csr(isc_ctl_addr); |
2427 | } |
2428 | |
2429 | void octeon_irq_ciu3_disable(struct irq_data *data) |
2430 | { |
2431 | u64 isc_ctl_addr; |
2432 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2433 | |
2434 | struct octeon_ciu_chip_data *cd; |
2435 | |
2436 | cd = irq_data_get_irq_chip_data(d: data); |
2437 | |
2438 | isc_w1c.u64 = 0; |
2439 | isc_w1c.s.en = 1; |
2440 | |
2441 | isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn); |
2442 | cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64); |
2443 | cvmx_write_csr(isc_ctl_addr, 0); |
2444 | cvmx_read_csr(isc_ctl_addr); |
2445 | } |
2446 | |
2447 | void octeon_irq_ciu3_ack(struct irq_data *data) |
2448 | { |
2449 | u64 isc_w1c_addr; |
2450 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2451 | struct octeon_ciu_chip_data *cd; |
2452 | u32 trigger_type = irqd_get_trigger_type(d: data); |
2453 | |
2454 | /* |
2455 | * We use a single irq_chip, so we have to do nothing to ack a |
2456 | * level interrupt. |
2457 | */ |
2458 | if (!(trigger_type & IRQ_TYPE_EDGE_BOTH)) |
2459 | return; |
2460 | |
2461 | cd = irq_data_get_irq_chip_data(d: data); |
2462 | |
2463 | isc_w1c.u64 = 0; |
2464 | isc_w1c.s.raw = 1; |
2465 | |
2466 | isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn); |
2467 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2468 | cvmx_read_csr(isc_w1c_addr); |
2469 | } |
2470 | |
2471 | void octeon_irq_ciu3_mask(struct irq_data *data) |
2472 | { |
2473 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2474 | u64 isc_w1c_addr; |
2475 | struct octeon_ciu_chip_data *cd; |
2476 | |
2477 | cd = irq_data_get_irq_chip_data(d: data); |
2478 | |
2479 | isc_w1c.u64 = 0; |
2480 | isc_w1c.s.en = 1; |
2481 | |
2482 | isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn); |
2483 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2484 | cvmx_read_csr(isc_w1c_addr); |
2485 | } |
2486 | |
2487 | void octeon_irq_ciu3_mask_ack(struct irq_data *data) |
2488 | { |
2489 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2490 | u64 isc_w1c_addr; |
2491 | struct octeon_ciu_chip_data *cd; |
2492 | u32 trigger_type = irqd_get_trigger_type(d: data); |
2493 | |
2494 | cd = irq_data_get_irq_chip_data(d: data); |
2495 | |
2496 | isc_w1c.u64 = 0; |
2497 | isc_w1c.s.en = 1; |
2498 | |
2499 | /* |
2500 | * We use a single irq_chip, so only ack an edge (!level) |
2501 | * interrupt. |
2502 | */ |
2503 | if (trigger_type & IRQ_TYPE_EDGE_BOTH) |
2504 | isc_w1c.s.raw = 1; |
2505 | |
2506 | isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn); |
2507 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2508 | cvmx_read_csr(isc_w1c_addr); |
2509 | } |
2510 | |
2511 | #ifdef CONFIG_SMP |
2512 | static int octeon_irq_ciu3_set_affinity(struct irq_data *data, |
2513 | const struct cpumask *dest, bool force) |
2514 | { |
2515 | union cvmx_ciu3_iscx_ctl isc_ctl; |
2516 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2517 | u64 isc_ctl_addr; |
2518 | int cpu; |
2519 | bool enable_one = !irqd_irq_disabled(d: data) && !irqd_irq_masked(d: data); |
2520 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(d: data); |
2521 | |
2522 | if (!cpumask_subset(src1p: dest, src2p: cpumask_of_node(node: cd->ciu_node))) |
2523 | return -EINVAL; |
2524 | |
2525 | if (!enable_one) |
2526 | return IRQ_SET_MASK_OK; |
2527 | |
2528 | cd = irq_data_get_irq_chip_data(d: data); |
2529 | cpu = cpumask_first(srcp: dest); |
2530 | if (cpu >= nr_cpu_ids) |
2531 | cpu = smp_processor_id(); |
2532 | cd->current_cpu = cpu; |
2533 | |
2534 | isc_w1c.u64 = 0; |
2535 | isc_w1c.s.en = 1; |
2536 | cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64); |
2537 | |
2538 | isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn); |
2539 | isc_ctl.u64 = 0; |
2540 | isc_ctl.s.en = 1; |
2541 | isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu); |
2542 | cvmx_write_csr(isc_ctl_addr, isc_ctl.u64); |
2543 | cvmx_read_csr(isc_ctl_addr); |
2544 | |
2545 | return IRQ_SET_MASK_OK; |
2546 | } |
2547 | #endif |
2548 | |
2549 | static struct irq_chip octeon_irq_chip_ciu3 = { |
2550 | .name = "CIU3" , |
2551 | .irq_startup = edge_startup, |
2552 | .irq_enable = octeon_irq_ciu3_enable, |
2553 | .irq_disable = octeon_irq_ciu3_disable, |
2554 | .irq_ack = octeon_irq_ciu3_ack, |
2555 | .irq_mask = octeon_irq_ciu3_mask, |
2556 | .irq_mask_ack = octeon_irq_ciu3_mask_ack, |
2557 | .irq_unmask = octeon_irq_ciu3_enable, |
2558 | .irq_set_type = octeon_irq_ciu_set_type, |
2559 | #ifdef CONFIG_SMP |
2560 | .irq_set_affinity = octeon_irq_ciu3_set_affinity, |
2561 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
2562 | #endif |
2563 | }; |
2564 | |
2565 | int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq, |
2566 | irq_hw_number_t hw, struct irq_chip *chip) |
2567 | { |
2568 | struct octeon_ciu3_info *ciu3_info = d->host_data; |
2569 | struct octeon_ciu_chip_data *cd = kzalloc_node(size: sizeof(*cd), GFP_KERNEL, |
2570 | node: ciu3_info->node); |
2571 | if (!cd) |
2572 | return -ENOMEM; |
2573 | cd->intsn = hw; |
2574 | cd->current_cpu = -1; |
2575 | cd->ciu3_addr = ciu3_info->ciu3_addr; |
2576 | cd->ciu_node = ciu3_info->node; |
2577 | irq_set_chip_and_handler(irq: virq, chip, handle: handle_edge_irq); |
2578 | irq_set_chip_data(irq: virq, data: cd); |
2579 | |
2580 | return 0; |
2581 | } |
2582 | |
2583 | static int octeon_irq_ciu3_map(struct irq_domain *d, |
2584 | unsigned int virq, irq_hw_number_t hw) |
2585 | { |
2586 | return octeon_irq_ciu3_mapx(d, virq, hw, chip: &octeon_irq_chip_ciu3); |
2587 | } |
2588 | |
2589 | static const struct irq_domain_ops octeon_dflt_domain_ciu3_ops = { |
2590 | .map = octeon_irq_ciu3_map, |
2591 | .unmap = octeon_irq_free_cd, |
2592 | .xlate = octeon_irq_ciu3_xlat, |
2593 | }; |
2594 | |
2595 | static void octeon_irq_ciu3_ip2(void) |
2596 | { |
2597 | union cvmx_ciu3_destx_pp_int dest_pp_int; |
2598 | struct octeon_ciu3_info *ciu3_info; |
2599 | u64 ciu3_addr; |
2600 | |
2601 | ciu3_info = __this_cpu_read(octeon_ciu3_info); |
2602 | ciu3_addr = ciu3_info->ciu3_addr; |
2603 | |
2604 | dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num())); |
2605 | |
2606 | if (likely(dest_pp_int.s.intr)) { |
2607 | irq_hw_number_t intsn = dest_pp_int.s.intsn; |
2608 | irq_hw_number_t hw; |
2609 | struct irq_domain *domain; |
2610 | /* Get the domain to use from the major block */ |
2611 | int block = intsn >> 12; |
2612 | int ret; |
2613 | |
2614 | domain = ciu3_info->domain[block]; |
2615 | if (ciu3_info->intsn2hw[block]) |
2616 | hw = ciu3_info->intsn2hw[block](domain, intsn); |
2617 | else |
2618 | hw = intsn; |
2619 | |
2620 | irq_enter(); |
2621 | ret = generic_handle_domain_irq(domain, hwirq: hw); |
2622 | irq_exit(); |
2623 | |
2624 | if (ret < 0) { |
2625 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2626 | u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn); |
2627 | |
2628 | isc_w1c.u64 = 0; |
2629 | isc_w1c.s.en = 1; |
2630 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2631 | cvmx_read_csr(isc_w1c_addr); |
2632 | spurious_interrupt(); |
2633 | } |
2634 | } else { |
2635 | spurious_interrupt(); |
2636 | } |
2637 | } |
2638 | |
2639 | /* |
2640 | * 10 mbox per core starting from zero. |
2641 | * Base mbox is core * 10 |
2642 | */ |
2643 | static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core) |
2644 | { |
2645 | /* SW (mbox) are 0x04 in bits 12..19 */ |
2646 | return 0x04000 + CIU3_MBOX_PER_CORE * core; |
2647 | } |
2648 | |
2649 | static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox) |
2650 | { |
2651 | return octeon_irq_ciu3_base_mbox_intsn(core) + mbox; |
2652 | } |
2653 | |
2654 | static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox) |
2655 | { |
2656 | int local_core = octeon_coreid_for_cpu(cpu) & 0x3f; |
2657 | |
2658 | return octeon_irq_ciu3_mbox_intsn_for_core(core: local_core, mbox); |
2659 | } |
2660 | |
2661 | static void octeon_irq_ciu3_mbox(void) |
2662 | { |
2663 | union cvmx_ciu3_destx_pp_int dest_pp_int; |
2664 | struct octeon_ciu3_info *ciu3_info; |
2665 | u64 ciu3_addr; |
2666 | int core = cvmx_get_local_core_num(); |
2667 | |
2668 | ciu3_info = __this_cpu_read(octeon_ciu3_info); |
2669 | ciu3_addr = ciu3_info->ciu3_addr; |
2670 | |
2671 | dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core)); |
2672 | |
2673 | if (likely(dest_pp_int.s.intr)) { |
2674 | irq_hw_number_t intsn = dest_pp_int.s.intsn; |
2675 | int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core); |
2676 | |
2677 | if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) { |
2678 | do_IRQ(mbox + OCTEON_IRQ_MBOX0); |
2679 | } else { |
2680 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2681 | u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn); |
2682 | |
2683 | isc_w1c.u64 = 0; |
2684 | isc_w1c.s.en = 1; |
2685 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2686 | cvmx_read_csr(isc_w1c_addr); |
2687 | spurious_interrupt(); |
2688 | } |
2689 | } else { |
2690 | spurious_interrupt(); |
2691 | } |
2692 | } |
2693 | |
2694 | void octeon_ciu3_mbox_send(int cpu, unsigned int mbox) |
2695 | { |
2696 | struct octeon_ciu3_info *ciu3_info; |
2697 | unsigned int intsn; |
2698 | union cvmx_ciu3_iscx_w1s isc_w1s; |
2699 | u64 isc_w1s_addr; |
2700 | |
2701 | if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE)) |
2702 | return; |
2703 | |
2704 | intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox); |
2705 | ciu3_info = per_cpu(octeon_ciu3_info, cpu); |
2706 | isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn); |
2707 | |
2708 | isc_w1s.u64 = 0; |
2709 | isc_w1s.s.raw = 1; |
2710 | |
2711 | cvmx_write_csr(isc_w1s_addr, isc_w1s.u64); |
2712 | cvmx_read_csr(isc_w1s_addr); |
2713 | } |
2714 | |
2715 | static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en) |
2716 | { |
2717 | struct octeon_ciu3_info *ciu3_info; |
2718 | unsigned int intsn; |
2719 | u64 isc_ctl_addr, isc_w1c_addr; |
2720 | union cvmx_ciu3_iscx_ctl isc_ctl; |
2721 | unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0; |
2722 | |
2723 | intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox); |
2724 | ciu3_info = per_cpu(octeon_ciu3_info, cpu); |
2725 | isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn); |
2726 | isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn); |
2727 | |
2728 | isc_ctl.u64 = 0; |
2729 | isc_ctl.s.en = 1; |
2730 | |
2731 | cvmx_write_csr(isc_w1c_addr, isc_ctl.u64); |
2732 | cvmx_write_csr(isc_ctl_addr, 0); |
2733 | if (en) { |
2734 | unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu); |
2735 | |
2736 | isc_ctl.u64 = 0; |
2737 | isc_ctl.s.en = 1; |
2738 | isc_ctl.s.idt = idt; |
2739 | cvmx_write_csr(isc_ctl_addr, isc_ctl.u64); |
2740 | } |
2741 | cvmx_read_csr(isc_ctl_addr); |
2742 | } |
2743 | |
2744 | static void octeon_irq_ciu3_mbox_enable(struct irq_data *data) |
2745 | { |
2746 | int cpu; |
2747 | unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0; |
2748 | |
2749 | WARN_ON(mbox >= CIU3_MBOX_PER_CORE); |
2750 | |
2751 | for_each_online_cpu(cpu) |
2752 | octeon_irq_ciu3_mbox_set_enable(data, cpu, en: true); |
2753 | } |
2754 | |
2755 | static void octeon_irq_ciu3_mbox_disable(struct irq_data *data) |
2756 | { |
2757 | int cpu; |
2758 | unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0; |
2759 | |
2760 | WARN_ON(mbox >= CIU3_MBOX_PER_CORE); |
2761 | |
2762 | for_each_online_cpu(cpu) |
2763 | octeon_irq_ciu3_mbox_set_enable(data, cpu, en: false); |
2764 | } |
2765 | |
2766 | static void octeon_irq_ciu3_mbox_ack(struct irq_data *data) |
2767 | { |
2768 | struct octeon_ciu3_info *ciu3_info; |
2769 | unsigned int intsn; |
2770 | u64 isc_w1c_addr; |
2771 | union cvmx_ciu3_iscx_w1c isc_w1c; |
2772 | unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0; |
2773 | |
2774 | intsn = octeon_irq_ciu3_mbox_intsn_for_core(core: cvmx_get_local_core_num(), mbox); |
2775 | |
2776 | isc_w1c.u64 = 0; |
2777 | isc_w1c.s.raw = 1; |
2778 | |
2779 | ciu3_info = __this_cpu_read(octeon_ciu3_info); |
2780 | isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn); |
2781 | cvmx_write_csr(isc_w1c_addr, isc_w1c.u64); |
2782 | cvmx_read_csr(isc_w1c_addr); |
2783 | } |
2784 | |
2785 | static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data) |
2786 | { |
2787 | octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), en: true); |
2788 | } |
2789 | |
2790 | static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data) |
2791 | { |
2792 | octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), en: false); |
2793 | } |
2794 | |
2795 | static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info) |
2796 | { |
2797 | u64 b = ciu3_info->ciu3_addr; |
2798 | int idt_ip2, idt_ip3, idt_ip4; |
2799 | int unused_idt2; |
2800 | int core = cvmx_get_local_core_num(); |
2801 | int i; |
2802 | |
2803 | __this_cpu_write(octeon_ciu3_info, ciu3_info); |
2804 | |
2805 | /* |
2806 | * 4 idt per core starting from 1 because zero is reserved. |
2807 | * Base idt per core is 4 * core + 1 |
2808 | */ |
2809 | idt_ip2 = core * 4 + 1; |
2810 | idt_ip3 = core * 4 + 2; |
2811 | idt_ip4 = core * 4 + 3; |
2812 | unused_idt2 = core * 4 + 4; |
2813 | __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2); |
2814 | __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3); |
2815 | |
2816 | /* ip2 interrupts for this CPU */ |
2817 | cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0); |
2818 | cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core); |
2819 | cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0); |
2820 | |
2821 | /* ip3 interrupts for this CPU */ |
2822 | cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1); |
2823 | cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core); |
2824 | cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0); |
2825 | |
2826 | /* ip4 interrupts for this CPU */ |
2827 | cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2); |
2828 | cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0); |
2829 | cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0); |
2830 | |
2831 | cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0); |
2832 | cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0); |
2833 | cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0); |
2834 | |
2835 | for (i = 0; i < CIU3_MBOX_PER_CORE; i++) { |
2836 | unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, mbox: i); |
2837 | |
2838 | cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2); |
2839 | cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0); |
2840 | } |
2841 | |
2842 | return 0; |
2843 | } |
2844 | |
2845 | static void octeon_irq_setup_secondary_ciu3(void) |
2846 | { |
2847 | struct octeon_ciu3_info *ciu3_info; |
2848 | |
2849 | ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()]; |
2850 | octeon_irq_ciu3_alloc_resources(ciu3_info); |
2851 | irq_cpu_online(); |
2852 | |
2853 | /* Enable the CIU lines */ |
2854 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
2855 | if (octeon_irq_use_ip4) |
2856 | set_c0_status(STATUSF_IP4); |
2857 | else |
2858 | clear_c0_status(STATUSF_IP4); |
2859 | } |
2860 | |
2861 | static struct irq_chip octeon_irq_chip_ciu3_mbox = { |
2862 | .name = "CIU3-M" , |
2863 | .irq_enable = octeon_irq_ciu3_mbox_enable, |
2864 | .irq_disable = octeon_irq_ciu3_mbox_disable, |
2865 | .irq_ack = octeon_irq_ciu3_mbox_ack, |
2866 | |
2867 | .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online, |
2868 | .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline, |
2869 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
2870 | }; |
2871 | |
2872 | static int __init octeon_irq_init_ciu3(struct device_node *ciu_node, |
2873 | struct device_node *parent) |
2874 | { |
2875 | int i, ret; |
2876 | int node; |
2877 | struct irq_domain *domain; |
2878 | struct octeon_ciu3_info *ciu3_info; |
2879 | struct resource res; |
2880 | u64 base_addr; |
2881 | union cvmx_ciu3_const consts; |
2882 | |
2883 | node = 0; /* of_node_to_nid(ciu_node); */ |
2884 | ciu3_info = kzalloc_node(size: sizeof(*ciu3_info), GFP_KERNEL, node); |
2885 | |
2886 | if (!ciu3_info) |
2887 | return -ENOMEM; |
2888 | |
2889 | ret = of_address_to_resource(dev: ciu_node, index: 0, r: &res); |
2890 | if (WARN_ON(ret)) |
2891 | return ret; |
2892 | |
2893 | ciu3_info->ciu3_addr = base_addr = (u64)phys_to_virt(address: res.start); |
2894 | ciu3_info->node = node; |
2895 | |
2896 | consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST); |
2897 | |
2898 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3; |
2899 | |
2900 | octeon_irq_ip2 = octeon_irq_ciu3_ip2; |
2901 | octeon_irq_ip3 = octeon_irq_ciu3_mbox; |
2902 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
2903 | |
2904 | if (node == cvmx_get_node_num()) { |
2905 | /* Mips internal */ |
2906 | octeon_irq_init_core(); |
2907 | |
2908 | /* Only do per CPU things if it is the CIU of the boot node. */ |
2909 | i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node); |
2910 | WARN_ON(i < 0); |
2911 | |
2912 | for (i = 0; i < 8; i++) |
2913 | irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0, |
2914 | &octeon_irq_chip_ciu3_mbox, handle_percpu_irq); |
2915 | } |
2916 | |
2917 | /* |
2918 | * Initialize all domains to use the default domain. Specific major |
2919 | * blocks will overwrite the default domain as needed. |
2920 | */ |
2921 | domain = irq_domain_add_tree(of_node: ciu_node, ops: &octeon_dflt_domain_ciu3_ops, |
2922 | host_data: ciu3_info); |
2923 | for (i = 0; i < MAX_CIU3_DOMAINS; i++) |
2924 | ciu3_info->domain[i] = domain; |
2925 | |
2926 | octeon_ciu3_info_per_node[node] = ciu3_info; |
2927 | |
2928 | if (node == cvmx_get_node_num()) { |
2929 | /* Only do per CPU things if it is the CIU of the boot node. */ |
2930 | octeon_irq_ciu3_alloc_resources(ciu3_info); |
2931 | if (node == 0) |
2932 | irq_set_default_host(host: domain); |
2933 | |
2934 | octeon_irq_use_ip4 = false; |
2935 | /* Enable the CIU lines */ |
2936 | set_c0_status(STATUSF_IP2 | STATUSF_IP3); |
2937 | clear_c0_status(STATUSF_IP4); |
2938 | } |
2939 | |
2940 | return 0; |
2941 | } |
2942 | |
2943 | static struct of_device_id ciu_types[] __initdata = { |
2944 | {.compatible = "cavium,octeon-3860-ciu" , .data = octeon_irq_init_ciu}, |
2945 | {.compatible = "cavium,octeon-3860-gpio" , .data = octeon_irq_init_gpio}, |
2946 | {.compatible = "cavium,octeon-6880-ciu2" , .data = octeon_irq_init_ciu2}, |
2947 | {.compatible = "cavium,octeon-7890-ciu3" , .data = octeon_irq_init_ciu3}, |
2948 | {.compatible = "cavium,octeon-7130-cib" , .data = octeon_irq_init_cib}, |
2949 | {} |
2950 | }; |
2951 | |
2952 | void __init arch_init_irq(void) |
2953 | { |
2954 | #ifdef CONFIG_SMP |
2955 | /* Set the default affinity to the boot cpu. */ |
2956 | cpumask_clear(dstp: irq_default_affinity); |
2957 | cpumask_set_cpu(smp_processor_id(), dstp: irq_default_affinity); |
2958 | #endif |
2959 | of_irq_init(matches: ciu_types); |
2960 | } |
2961 | |
2962 | asmlinkage void plat_irq_dispatch(void) |
2963 | { |
2964 | unsigned long cop0_cause; |
2965 | unsigned long cop0_status; |
2966 | |
2967 | while (1) { |
2968 | cop0_cause = read_c0_cause(); |
2969 | cop0_status = read_c0_status(); |
2970 | cop0_cause &= cop0_status; |
2971 | cop0_cause &= ST0_IM; |
2972 | |
2973 | if (cop0_cause & STATUSF_IP2) |
2974 | octeon_irq_ip2(); |
2975 | else if (cop0_cause & STATUSF_IP3) |
2976 | octeon_irq_ip3(); |
2977 | else if (cop0_cause & STATUSF_IP4) |
2978 | octeon_irq_ip4(); |
2979 | else if (cop0_cause) |
2980 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
2981 | else |
2982 | break; |
2983 | } |
2984 | } |
2985 | |
2986 | #ifdef CONFIG_HOTPLUG_CPU |
2987 | |
2988 | void octeon_fixup_irqs(void) |
2989 | { |
2990 | irq_cpu_offline(); |
2991 | } |
2992 | |
2993 | #endif /* CONFIG_HOTPLUG_CPU */ |
2994 | |
2995 | struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block) |
2996 | { |
2997 | struct octeon_ciu3_info *ciu3_info; |
2998 | |
2999 | ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK]; |
3000 | return ciu3_info->domain[block]; |
3001 | } |
3002 | EXPORT_SYMBOL(octeon_irq_get_block_domain); |
3003 | |