1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> |
7 | * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr> |
8 | */ |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/irq.h> |
14 | #include <linux/spinlock.h> |
15 | #include <asm/irq_cpu.h> |
16 | #include <asm/mipsregs.h> |
17 | #include <bcm63xx_cpu.h> |
18 | #include <bcm63xx_regs.h> |
19 | #include <bcm63xx_io.h> |
20 | #include <bcm63xx_irq.h> |
21 | |
22 | |
23 | static DEFINE_SPINLOCK(ipic_lock); |
24 | static DEFINE_SPINLOCK(epic_lock); |
25 | |
26 | static u32 irq_stat_addr[2]; |
27 | static u32 irq_mask_addr[2]; |
28 | static void (*dispatch_internal)(int cpu); |
29 | static int is_ext_irq_cascaded; |
30 | static unsigned int ext_irq_count; |
31 | static unsigned int ext_irq_start, ext_irq_end; |
32 | static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2; |
33 | static void (*internal_irq_mask)(struct irq_data *d); |
34 | static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m); |
35 | |
36 | |
37 | static inline u32 get_ext_irq_perf_reg(int irq) |
38 | { |
39 | if (irq < 4) |
40 | return ext_irq_cfg_reg1; |
41 | return ext_irq_cfg_reg2; |
42 | } |
43 | |
44 | static inline void handle_internal(int intbit) |
45 | { |
46 | if (is_ext_irq_cascaded && |
47 | intbit >= ext_irq_start && intbit <= ext_irq_end) |
48 | do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE); |
49 | else |
50 | do_IRQ(intbit + IRQ_INTERNAL_BASE); |
51 | } |
52 | |
53 | static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, |
54 | const struct cpumask *m) |
55 | { |
56 | bool enable = cpu_online(cpu); |
57 | |
58 | #ifdef CONFIG_SMP |
59 | if (m) |
60 | enable &= cpumask_test_cpu(cpu, cpumask: m); |
61 | else if (irqd_affinity_was_set(d)) |
62 | enable &= cpumask_test_cpu(cpu, cpumask: irq_data_get_affinity_mask(d)); |
63 | #endif |
64 | return enable; |
65 | } |
66 | |
67 | /* |
68 | * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not |
69 | * prioritize any interrupt relatively to another. the static counter |
70 | * will resume the loop where it ended the last time we left this |
71 | * function. |
72 | */ |
73 | |
74 | #define BUILD_IPIC_INTERNAL(width) \ |
75 | static void __dispatch_internal_##width(int cpu) \ |
76 | { \ |
77 | u32 pending[width / 32]; \ |
78 | unsigned int src, tgt; \ |
79 | bool irqs_pending = false; \ |
80 | static unsigned int i[2]; \ |
81 | unsigned int *next = &i[cpu]; \ |
82 | unsigned long flags; \ |
83 | \ |
84 | /* read registers in reverse order */ \ |
85 | spin_lock_irqsave(&ipic_lock, flags); \ |
86 | for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \ |
87 | u32 val; \ |
88 | \ |
89 | val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \ |
90 | val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \ |
91 | pending[--tgt] = val; \ |
92 | \ |
93 | if (val) \ |
94 | irqs_pending = true; \ |
95 | } \ |
96 | spin_unlock_irqrestore(&ipic_lock, flags); \ |
97 | \ |
98 | if (!irqs_pending) \ |
99 | return; \ |
100 | \ |
101 | while (1) { \ |
102 | unsigned int to_call = *next; \ |
103 | \ |
104 | *next = (*next + 1) & (width - 1); \ |
105 | if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \ |
106 | handle_internal(to_call); \ |
107 | break; \ |
108 | } \ |
109 | } \ |
110 | } \ |
111 | \ |
112 | static void __internal_irq_mask_##width(struct irq_data *d) \ |
113 | { \ |
114 | u32 val; \ |
115 | unsigned irq = d->irq - IRQ_INTERNAL_BASE; \ |
116 | unsigned reg = (irq / 32) ^ (width/32 - 1); \ |
117 | unsigned bit = irq & 0x1f; \ |
118 | unsigned long flags; \ |
119 | int cpu; \ |
120 | \ |
121 | spin_lock_irqsave(&ipic_lock, flags); \ |
122 | for_each_present_cpu(cpu) { \ |
123 | if (!irq_mask_addr[cpu]) \ |
124 | break; \ |
125 | \ |
126 | val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\ |
127 | val &= ~(1 << bit); \ |
128 | bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\ |
129 | } \ |
130 | spin_unlock_irqrestore(&ipic_lock, flags); \ |
131 | } \ |
132 | \ |
133 | static void __internal_irq_unmask_##width(struct irq_data *d, \ |
134 | const struct cpumask *m) \ |
135 | { \ |
136 | u32 val; \ |
137 | unsigned irq = d->irq - IRQ_INTERNAL_BASE; \ |
138 | unsigned reg = (irq / 32) ^ (width/32 - 1); \ |
139 | unsigned bit = irq & 0x1f; \ |
140 | unsigned long flags; \ |
141 | int cpu; \ |
142 | \ |
143 | spin_lock_irqsave(&ipic_lock, flags); \ |
144 | for_each_present_cpu(cpu) { \ |
145 | if (!irq_mask_addr[cpu]) \ |
146 | break; \ |
147 | \ |
148 | val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\ |
149 | if (enable_irq_for_cpu(cpu, d, m)) \ |
150 | val |= (1 << bit); \ |
151 | else \ |
152 | val &= ~(1 << bit); \ |
153 | bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\ |
154 | } \ |
155 | spin_unlock_irqrestore(&ipic_lock, flags); \ |
156 | } |
157 | |
158 | BUILD_IPIC_INTERNAL(32); |
159 | BUILD_IPIC_INTERNAL(64); |
160 | |
161 | asmlinkage void plat_irq_dispatch(void) |
162 | { |
163 | u32 cause; |
164 | |
165 | do { |
166 | cause = read_c0_cause() & read_c0_status() & ST0_IM; |
167 | |
168 | if (!cause) |
169 | break; |
170 | |
171 | if (cause & CAUSEF_IP7) |
172 | do_IRQ(7); |
173 | if (cause & CAUSEF_IP0) |
174 | do_IRQ(0); |
175 | if (cause & CAUSEF_IP1) |
176 | do_IRQ(1); |
177 | if (cause & CAUSEF_IP2) |
178 | dispatch_internal(0); |
179 | if (is_ext_irq_cascaded) { |
180 | if (cause & CAUSEF_IP3) |
181 | dispatch_internal(1); |
182 | } else { |
183 | if (cause & CAUSEF_IP3) |
184 | do_IRQ(IRQ_EXT_0); |
185 | if (cause & CAUSEF_IP4) |
186 | do_IRQ(IRQ_EXT_1); |
187 | if (cause & CAUSEF_IP5) |
188 | do_IRQ(IRQ_EXT_2); |
189 | if (cause & CAUSEF_IP6) |
190 | do_IRQ(IRQ_EXT_3); |
191 | } |
192 | } while (1); |
193 | } |
194 | |
195 | /* |
196 | * internal IRQs operations: only mask/unmask on PERF irq mask |
197 | * register. |
198 | */ |
199 | static void bcm63xx_internal_irq_mask(struct irq_data *d) |
200 | { |
201 | internal_irq_mask(d); |
202 | } |
203 | |
204 | static void bcm63xx_internal_irq_unmask(struct irq_data *d) |
205 | { |
206 | internal_irq_unmask(d, NULL); |
207 | } |
208 | |
209 | /* |
210 | * external IRQs operations: mask/unmask and clear on PERF external |
211 | * irq control register. |
212 | */ |
213 | static void bcm63xx_external_irq_mask(struct irq_data *d) |
214 | { |
215 | unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; |
216 | u32 reg, regaddr; |
217 | unsigned long flags; |
218 | |
219 | regaddr = get_ext_irq_perf_reg(irq); |
220 | spin_lock_irqsave(&epic_lock, flags); |
221 | reg = bcm_perf_readl(regaddr); |
222 | |
223 | if (BCMCPU_IS_6348()) |
224 | reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4); |
225 | else |
226 | reg &= ~EXTIRQ_CFG_MASK(irq % 4); |
227 | |
228 | bcm_perf_writel(reg, regaddr); |
229 | spin_unlock_irqrestore(lock: &epic_lock, flags); |
230 | |
231 | if (is_ext_irq_cascaded) |
232 | internal_irq_mask(irq_get_irq_data(irq: irq + ext_irq_start)); |
233 | } |
234 | |
235 | static void bcm63xx_external_irq_unmask(struct irq_data *d) |
236 | { |
237 | unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; |
238 | u32 reg, regaddr; |
239 | unsigned long flags; |
240 | |
241 | regaddr = get_ext_irq_perf_reg(irq); |
242 | spin_lock_irqsave(&epic_lock, flags); |
243 | reg = bcm_perf_readl(regaddr); |
244 | |
245 | if (BCMCPU_IS_6348()) |
246 | reg |= EXTIRQ_CFG_MASK_6348(irq % 4); |
247 | else |
248 | reg |= EXTIRQ_CFG_MASK(irq % 4); |
249 | |
250 | bcm_perf_writel(reg, regaddr); |
251 | spin_unlock_irqrestore(lock: &epic_lock, flags); |
252 | |
253 | if (is_ext_irq_cascaded) |
254 | internal_irq_unmask(irq_get_irq_data(irq: irq + ext_irq_start), |
255 | NULL); |
256 | } |
257 | |
258 | static void bcm63xx_external_irq_clear(struct irq_data *d) |
259 | { |
260 | unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; |
261 | u32 reg, regaddr; |
262 | unsigned long flags; |
263 | |
264 | regaddr = get_ext_irq_perf_reg(irq); |
265 | spin_lock_irqsave(&epic_lock, flags); |
266 | reg = bcm_perf_readl(regaddr); |
267 | |
268 | if (BCMCPU_IS_6348()) |
269 | reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4); |
270 | else |
271 | reg |= EXTIRQ_CFG_CLEAR(irq % 4); |
272 | |
273 | bcm_perf_writel(reg, regaddr); |
274 | spin_unlock_irqrestore(lock: &epic_lock, flags); |
275 | } |
276 | |
277 | static int bcm63xx_external_irq_set_type(struct irq_data *d, |
278 | unsigned int flow_type) |
279 | { |
280 | unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; |
281 | u32 reg, regaddr; |
282 | int levelsense, sense, bothedge; |
283 | unsigned long flags; |
284 | |
285 | flow_type &= IRQ_TYPE_SENSE_MASK; |
286 | |
287 | if (flow_type == IRQ_TYPE_NONE) |
288 | flow_type = IRQ_TYPE_LEVEL_LOW; |
289 | |
290 | levelsense = sense = bothedge = 0; |
291 | switch (flow_type) { |
292 | case IRQ_TYPE_EDGE_BOTH: |
293 | bothedge = 1; |
294 | break; |
295 | |
296 | case IRQ_TYPE_EDGE_RISING: |
297 | sense = 1; |
298 | break; |
299 | |
300 | case IRQ_TYPE_EDGE_FALLING: |
301 | break; |
302 | |
303 | case IRQ_TYPE_LEVEL_HIGH: |
304 | levelsense = 1; |
305 | sense = 1; |
306 | break; |
307 | |
308 | case IRQ_TYPE_LEVEL_LOW: |
309 | levelsense = 1; |
310 | break; |
311 | |
312 | default: |
313 | pr_err("bogus flow type combination given !\n" ); |
314 | return -EINVAL; |
315 | } |
316 | |
317 | regaddr = get_ext_irq_perf_reg(irq); |
318 | spin_lock_irqsave(&epic_lock, flags); |
319 | reg = bcm_perf_readl(regaddr); |
320 | irq %= 4; |
321 | |
322 | switch (bcm63xx_get_cpu_id()) { |
323 | case BCM6348_CPU_ID: |
324 | if (levelsense) |
325 | reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq); |
326 | else |
327 | reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq); |
328 | if (sense) |
329 | reg |= EXTIRQ_CFG_SENSE_6348(irq); |
330 | else |
331 | reg &= ~EXTIRQ_CFG_SENSE_6348(irq); |
332 | if (bothedge) |
333 | reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq); |
334 | else |
335 | reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); |
336 | break; |
337 | |
338 | case BCM3368_CPU_ID: |
339 | case BCM6328_CPU_ID: |
340 | case BCM6338_CPU_ID: |
341 | case BCM6345_CPU_ID: |
342 | case BCM6358_CPU_ID: |
343 | case BCM6362_CPU_ID: |
344 | case BCM6368_CPU_ID: |
345 | if (levelsense) |
346 | reg |= EXTIRQ_CFG_LEVELSENSE(irq); |
347 | else |
348 | reg &= ~EXTIRQ_CFG_LEVELSENSE(irq); |
349 | if (sense) |
350 | reg |= EXTIRQ_CFG_SENSE(irq); |
351 | else |
352 | reg &= ~EXTIRQ_CFG_SENSE(irq); |
353 | if (bothedge) |
354 | reg |= EXTIRQ_CFG_BOTHEDGE(irq); |
355 | else |
356 | reg &= ~EXTIRQ_CFG_BOTHEDGE(irq); |
357 | break; |
358 | default: |
359 | BUG(); |
360 | } |
361 | |
362 | bcm_perf_writel(reg, regaddr); |
363 | spin_unlock_irqrestore(lock: &epic_lock, flags); |
364 | |
365 | irqd_set_trigger_type(d, type: flow_type); |
366 | if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
367 | irq_set_handler_locked(data: d, handler: handle_level_irq); |
368 | else |
369 | irq_set_handler_locked(data: d, handler: handle_edge_irq); |
370 | |
371 | return IRQ_SET_MASK_OK_NOCOPY; |
372 | } |
373 | |
374 | #ifdef CONFIG_SMP |
375 | static int bcm63xx_internal_set_affinity(struct irq_data *data, |
376 | const struct cpumask *dest, |
377 | bool force) |
378 | { |
379 | if (!irqd_irq_disabled(d: data)) |
380 | internal_irq_unmask(data, dest); |
381 | |
382 | return 0; |
383 | } |
384 | #endif |
385 | |
386 | static struct irq_chip bcm63xx_internal_irq_chip = { |
387 | .name = "bcm63xx_ipic" , |
388 | .irq_mask = bcm63xx_internal_irq_mask, |
389 | .irq_unmask = bcm63xx_internal_irq_unmask, |
390 | }; |
391 | |
392 | static struct irq_chip bcm63xx_external_irq_chip = { |
393 | .name = "bcm63xx_epic" , |
394 | .irq_ack = bcm63xx_external_irq_clear, |
395 | |
396 | .irq_mask = bcm63xx_external_irq_mask, |
397 | .irq_unmask = bcm63xx_external_irq_unmask, |
398 | |
399 | .irq_set_type = bcm63xx_external_irq_set_type, |
400 | }; |
401 | |
402 | static void bcm63xx_init_irq(void) |
403 | { |
404 | int irq_bits; |
405 | |
406 | irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF); |
407 | irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF); |
408 | irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF); |
409 | irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF); |
410 | |
411 | switch (bcm63xx_get_cpu_id()) { |
412 | case BCM3368_CPU_ID: |
413 | irq_stat_addr[0] += PERF_IRQSTAT_3368_REG; |
414 | irq_mask_addr[0] += PERF_IRQMASK_3368_REG; |
415 | irq_stat_addr[1] = 0; |
416 | irq_mask_addr[1] = 0; |
417 | irq_bits = 32; |
418 | ext_irq_count = 4; |
419 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; |
420 | break; |
421 | case BCM6328_CPU_ID: |
422 | irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0); |
423 | irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0); |
424 | irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1); |
425 | irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1); |
426 | irq_bits = 64; |
427 | ext_irq_count = 4; |
428 | is_ext_irq_cascaded = 1; |
429 | ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE; |
430 | ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE; |
431 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328; |
432 | break; |
433 | case BCM6338_CPU_ID: |
434 | irq_stat_addr[0] += PERF_IRQSTAT_6338_REG; |
435 | irq_mask_addr[0] += PERF_IRQMASK_6338_REG; |
436 | irq_stat_addr[1] = 0; |
437 | irq_mask_addr[1] = 0; |
438 | irq_bits = 32; |
439 | ext_irq_count = 4; |
440 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338; |
441 | break; |
442 | case BCM6345_CPU_ID: |
443 | irq_stat_addr[0] += PERF_IRQSTAT_6345_REG; |
444 | irq_mask_addr[0] += PERF_IRQMASK_6345_REG; |
445 | irq_stat_addr[1] = 0; |
446 | irq_mask_addr[1] = 0; |
447 | irq_bits = 32; |
448 | ext_irq_count = 4; |
449 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345; |
450 | break; |
451 | case BCM6348_CPU_ID: |
452 | irq_stat_addr[0] += PERF_IRQSTAT_6348_REG; |
453 | irq_mask_addr[0] += PERF_IRQMASK_6348_REG; |
454 | irq_stat_addr[1] = 0; |
455 | irq_mask_addr[1] = 0; |
456 | irq_bits = 32; |
457 | ext_irq_count = 4; |
458 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348; |
459 | break; |
460 | case BCM6358_CPU_ID: |
461 | irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0); |
462 | irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0); |
463 | irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1); |
464 | irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1); |
465 | irq_bits = 32; |
466 | ext_irq_count = 4; |
467 | is_ext_irq_cascaded = 1; |
468 | ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE; |
469 | ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; |
470 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; |
471 | break; |
472 | case BCM6362_CPU_ID: |
473 | irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0); |
474 | irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0); |
475 | irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1); |
476 | irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1); |
477 | irq_bits = 64; |
478 | ext_irq_count = 4; |
479 | is_ext_irq_cascaded = 1; |
480 | ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE; |
481 | ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE; |
482 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362; |
483 | break; |
484 | case BCM6368_CPU_ID: |
485 | irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0); |
486 | irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0); |
487 | irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1); |
488 | irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1); |
489 | irq_bits = 64; |
490 | ext_irq_count = 6; |
491 | is_ext_irq_cascaded = 1; |
492 | ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE; |
493 | ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE; |
494 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368; |
495 | ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368; |
496 | break; |
497 | default: |
498 | BUG(); |
499 | } |
500 | |
501 | if (irq_bits == 32) { |
502 | dispatch_internal = __dispatch_internal_32; |
503 | internal_irq_mask = __internal_irq_mask_32; |
504 | internal_irq_unmask = __internal_irq_unmask_32; |
505 | } else { |
506 | dispatch_internal = __dispatch_internal_64; |
507 | internal_irq_mask = __internal_irq_mask_64; |
508 | internal_irq_unmask = __internal_irq_unmask_64; |
509 | } |
510 | } |
511 | |
512 | void __init arch_init_irq(void) |
513 | { |
514 | int i, irq; |
515 | |
516 | bcm63xx_init_irq(); |
517 | mips_cpu_irq_init(); |
518 | for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) |
519 | irq_set_chip_and_handler(irq: i, chip: &bcm63xx_internal_irq_chip, |
520 | handle: handle_level_irq); |
521 | |
522 | for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i) |
523 | irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip, |
524 | handle_edge_irq); |
525 | |
526 | if (!is_ext_irq_cascaded) { |
527 | for (i = 3; i < 3 + ext_irq_count; ++i) { |
528 | irq = MIPS_CPU_IRQ_BASE + i; |
529 | if (request_irq(irq, handler: no_action, IRQF_NO_THREAD, |
530 | name: "cascade_extirq" , NULL)) { |
531 | pr_err("Failed to request irq %d (cascade_extirq)\n" , |
532 | irq); |
533 | } |
534 | } |
535 | } |
536 | |
537 | irq = MIPS_CPU_IRQ_BASE + 2; |
538 | if (request_irq(irq, handler: no_action, IRQF_NO_THREAD, name: "cascade_ip2" , NULL)) |
539 | pr_err("Failed to request irq %d (cascade_ip2)\n" , irq); |
540 | #ifdef CONFIG_SMP |
541 | if (is_ext_irq_cascaded) { |
542 | irq = MIPS_CPU_IRQ_BASE + 3; |
543 | if (request_irq(irq, handler: no_action, IRQF_NO_THREAD, name: "cascade_ip3" , |
544 | NULL)) |
545 | pr_err("Failed to request irq %d (cascade_ip3)\n" , irq); |
546 | bcm63xx_internal_irq_chip.irq_set_affinity = |
547 | bcm63xx_internal_set_affinity; |
548 | |
549 | cpumask_clear(dstp: irq_default_affinity); |
550 | cpumask_set_cpu(smp_processor_id(), dstp: irq_default_affinity); |
551 | } |
552 | #endif |
553 | } |
554 | |