1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/arch/sh/kernel/irq.c |
4 | * |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
6 | * |
7 | * |
8 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
9 | */ |
10 | #include <linux/irq.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/module.h> |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/seq_file.h> |
15 | #include <linux/ftrace.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/ratelimit.h> |
18 | #include <asm/processor.h> |
19 | #include <asm/machvec.h> |
20 | #include <linux/uaccess.h> |
21 | #include <asm/thread_info.h> |
22 | #include <cpu/mmu_context.h> |
23 | #include <asm/softirq_stack.h> |
24 | |
25 | atomic_t irq_err_count; |
26 | |
27 | /* |
28 | * 'what should we do if we get a hw irq event on an illegal vector'. |
29 | * each architecture has to answer this themselves, it doesn't deserve |
30 | * a generic callback i think. |
31 | */ |
32 | void ack_bad_irq(unsigned int irq) |
33 | { |
34 | atomic_inc(v: &irq_err_count); |
35 | printk("unexpected IRQ trap at vector %02x\n" , irq); |
36 | } |
37 | |
38 | #if defined(CONFIG_PROC_FS) |
39 | /* |
40 | * /proc/interrupts printing for arch specific interrupts |
41 | */ |
42 | int arch_show_interrupts(struct seq_file *p, int prec) |
43 | { |
44 | int j; |
45 | |
46 | seq_printf(m: p, fmt: "%*s: " , prec, "NMI" ); |
47 | for_each_online_cpu(j) |
48 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat.__nmi_count, j)); |
49 | seq_printf(m: p, fmt: " Non-maskable interrupts\n" ); |
50 | |
51 | seq_printf(m: p, fmt: "%*s: %10u\n" , prec, "ERR" , atomic_read(v: &irq_err_count)); |
52 | |
53 | return 0; |
54 | } |
55 | #endif |
56 | |
57 | #ifdef CONFIG_IRQSTACKS |
58 | /* |
59 | * per-CPU IRQ handling contexts (thread information and stack) |
60 | */ |
61 | union irq_ctx { |
62 | struct thread_info tinfo; |
63 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
64 | }; |
65 | |
66 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
67 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
68 | |
69 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
70 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
71 | |
72 | static inline void handle_one_irq(unsigned int irq) |
73 | { |
74 | union irq_ctx *curctx, *irqctx; |
75 | |
76 | curctx = (union irq_ctx *)current_thread_info(); |
77 | irqctx = hardirq_ctx[smp_processor_id()]; |
78 | |
79 | /* |
80 | * this is where we switch to the IRQ stack. However, if we are |
81 | * already using the IRQ stack (because we interrupted a hardirq |
82 | * handler) we can't do that and just have to keep using the |
83 | * current stack (which is the irq stack already after all) |
84 | */ |
85 | if (curctx != irqctx) { |
86 | u32 *isp; |
87 | |
88 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
89 | irqctx->tinfo.task = curctx->tinfo.task; |
90 | irqctx->tinfo.previous_sp = current_stack_pointer; |
91 | |
92 | /* |
93 | * Copy the softirq bits in preempt_count so that the |
94 | * softirq checks work in the hardirq context. |
95 | */ |
96 | irqctx->tinfo.preempt_count = |
97 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
98 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
99 | |
100 | __asm__ __volatile__ ( |
101 | "mov %0, r4 \n" |
102 | "mov r15, r8 \n" |
103 | "jsr @%1 \n" |
104 | /* switch to the irq stack */ |
105 | " mov %2, r15 \n" |
106 | /* restore the stack (ring zero) */ |
107 | "mov r8, r15 \n" |
108 | : /* no outputs */ |
109 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) |
110 | : "memory" , "r0" , "r1" , "r2" , "r3" , "r4" , |
111 | "r5" , "r6" , "r7" , "r8" , "t" , "pr" |
112 | ); |
113 | } else |
114 | generic_handle_irq(irq); |
115 | } |
116 | |
117 | /* |
118 | * allocate per-cpu stacks for hardirq and for softirq processing |
119 | */ |
120 | void irq_ctx_init(int cpu) |
121 | { |
122 | union irq_ctx *irqctx; |
123 | |
124 | if (hardirq_ctx[cpu]) |
125 | return; |
126 | |
127 | irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; |
128 | irqctx->tinfo.task = NULL; |
129 | irqctx->tinfo.cpu = cpu; |
130 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
131 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
132 | |
133 | hardirq_ctx[cpu] = irqctx; |
134 | |
135 | irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; |
136 | irqctx->tinfo.task = NULL; |
137 | irqctx->tinfo.cpu = cpu; |
138 | irqctx->tinfo.preempt_count = 0; |
139 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
140 | |
141 | softirq_ctx[cpu] = irqctx; |
142 | |
143 | printk("CPU %u irqstacks, hard=%p soft=%p\n" , |
144 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); |
145 | } |
146 | |
147 | void irq_ctx_exit(int cpu) |
148 | { |
149 | hardirq_ctx[cpu] = NULL; |
150 | } |
151 | |
152 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
153 | void do_softirq_own_stack(void) |
154 | { |
155 | struct thread_info *curctx; |
156 | union irq_ctx *irqctx; |
157 | u32 *isp; |
158 | |
159 | curctx = current_thread_info(); |
160 | irqctx = softirq_ctx[smp_processor_id()]; |
161 | irqctx->tinfo.task = curctx->task; |
162 | irqctx->tinfo.previous_sp = current_stack_pointer; |
163 | |
164 | /* build the stack frame on the softirq stack */ |
165 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
166 | |
167 | __asm__ __volatile__ ( |
168 | "mov r15, r9 \n" |
169 | "jsr @%0 \n" |
170 | /* switch to the softirq stack */ |
171 | " mov %1, r15 \n" |
172 | /* restore the thread stack */ |
173 | "mov r9, r15 \n" |
174 | : /* no outputs */ |
175 | : "r" (__do_softirq), "r" (isp) |
176 | : "memory" , "r0" , "r1" , "r2" , "r3" , "r4" , |
177 | "r5" , "r6" , "r7" , "r8" , "r9" , "r15" , "t" , "pr" |
178 | ); |
179 | } |
180 | #endif |
181 | #else |
182 | static inline void handle_one_irq(unsigned int irq) |
183 | { |
184 | generic_handle_irq(irq); |
185 | } |
186 | #endif |
187 | |
188 | asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) |
189 | { |
190 | struct pt_regs *old_regs = set_irq_regs(regs); |
191 | |
192 | irq_enter(); |
193 | |
194 | irq = irq_demux(irq_lookup(irq)); |
195 | |
196 | if (irq != NO_IRQ_IGNORE) { |
197 | handle_one_irq(irq); |
198 | irq_finish(irq); |
199 | } |
200 | |
201 | irq_exit(); |
202 | |
203 | set_irq_regs(old_regs); |
204 | |
205 | return IRQ_HANDLED; |
206 | } |
207 | |
208 | void __init init_IRQ(void) |
209 | { |
210 | plat_irq_setup(); |
211 | |
212 | /* Perform the machine specific initialisation */ |
213 | if (sh_mv.mv_init_irq) |
214 | sh_mv.mv_init_irq(); |
215 | |
216 | intc_finalize(); |
217 | |
218 | irq_ctx_init(smp_processor_id()); |
219 | } |
220 | |
221 | #ifdef CONFIG_HOTPLUG_CPU |
222 | /* |
223 | * The CPU has been marked offline. Migrate IRQs off this CPU. If |
224 | * the affinity settings do not allow other CPUs, force them onto any |
225 | * available CPU. |
226 | */ |
227 | void migrate_irqs(void) |
228 | { |
229 | unsigned int irq, cpu = smp_processor_id(); |
230 | |
231 | for_each_active_irq(irq) { |
232 | struct irq_data *data = irq_get_irq_data(irq); |
233 | |
234 | if (irq_data_get_node(d: data) == cpu) { |
235 | const struct cpumask *mask = irq_data_get_affinity_mask(d: data); |
236 | unsigned int newcpu = cpumask_any_and(mask, |
237 | cpu_online_mask); |
238 | if (newcpu >= nr_cpu_ids) { |
239 | pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n" , |
240 | irq, cpu); |
241 | |
242 | irq_set_affinity(irq, cpu_all_mask); |
243 | } else { |
244 | irq_set_affinity(irq, cpumask: mask); |
245 | } |
246 | } |
247 | } |
248 | } |
249 | #endif |
250 | |