1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. |
4 | * |
5 | * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) |
6 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
7 | * Copyright (C) 1999 - 2001 Kanoj Sarcar |
8 | */ |
9 | |
10 | #include <linux/interrupt.h> |
11 | #include <linux/irq.h> |
12 | #include <linux/irqdomain.h> |
13 | #include <linux/ioport.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/sched.h> |
17 | |
18 | #include <asm/io.h> |
19 | #include <asm/irq_cpu.h> |
20 | #include <asm/sn/addrs.h> |
21 | #include <asm/sn/agent.h> |
22 | #include <asm/sn/arch.h> |
23 | #include <asm/sn/intr.h> |
24 | #include <asm/sn/irq_alloc.h> |
25 | |
26 | #include "ip27-common.h" |
27 | |
28 | struct hub_irq_data { |
29 | u64 *irq_mask[2]; |
30 | cpuid_t cpu; |
31 | }; |
32 | |
33 | static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT); |
34 | |
35 | static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask); |
36 | |
37 | static inline int alloc_level(void) |
38 | { |
39 | int level; |
40 | |
41 | again: |
42 | level = find_first_zero_bit(addr: hub_irq_map, size: IP27_HUB_IRQ_COUNT); |
43 | if (level >= IP27_HUB_IRQ_COUNT) |
44 | return -ENOSPC; |
45 | |
46 | if (test_and_set_bit(nr: level, addr: hub_irq_map)) |
47 | goto again; |
48 | |
49 | return level; |
50 | } |
51 | |
52 | static void enable_hub_irq(struct irq_data *d) |
53 | { |
54 | struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); |
55 | unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); |
56 | |
57 | set_bit(nr: d->hwirq, addr: mask); |
58 | __raw_writeq(val: mask[0], addr: hd->irq_mask[0]); |
59 | __raw_writeq(val: mask[1], addr: hd->irq_mask[1]); |
60 | } |
61 | |
62 | static void disable_hub_irq(struct irq_data *d) |
63 | { |
64 | struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); |
65 | unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); |
66 | |
67 | clear_bit(nr: d->hwirq, addr: mask); |
68 | __raw_writeq(val: mask[0], addr: hd->irq_mask[0]); |
69 | __raw_writeq(val: mask[1], addr: hd->irq_mask[1]); |
70 | } |
71 | |
72 | static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) |
73 | { |
74 | nasid_t nasid; |
75 | int cpu; |
76 | |
77 | cpu = cpumask_first_and(srcp1: mask, cpu_online_mask); |
78 | if (cpu >= nr_cpu_ids) |
79 | cpu = cpumask_any(cpu_online_mask); |
80 | |
81 | nasid = cpu_to_node(cpu); |
82 | hd->cpu = cpu; |
83 | if (!cputoslice(cpu)) { |
84 | hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A); |
85 | hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A); |
86 | } else { |
87 | hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B); |
88 | hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B); |
89 | } |
90 | } |
91 | |
92 | static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask, |
93 | bool force) |
94 | { |
95 | struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); |
96 | |
97 | if (!hd) |
98 | return -EINVAL; |
99 | |
100 | if (irqd_is_started(d)) |
101 | disable_hub_irq(d); |
102 | |
103 | setup_hub_mask(hd, mask); |
104 | |
105 | if (irqd_is_started(d)) |
106 | enable_hub_irq(d); |
107 | |
108 | irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); |
109 | |
110 | return 0; |
111 | } |
112 | |
113 | static struct irq_chip hub_irq_type = { |
114 | .name = "HUB" , |
115 | .irq_mask = disable_hub_irq, |
116 | .irq_unmask = enable_hub_irq, |
117 | .irq_set_affinity = set_affinity_hub_irq, |
118 | }; |
119 | |
120 | static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq, |
121 | unsigned int nr_irqs, void *arg) |
122 | { |
123 | struct irq_alloc_info *info = arg; |
124 | struct hub_irq_data *hd; |
125 | struct hub_data *hub; |
126 | struct irq_desc *desc; |
127 | int swlevel; |
128 | |
129 | if (nr_irqs > 1 || !info) |
130 | return -EINVAL; |
131 | |
132 | hd = kzalloc(size: sizeof(*hd), GFP_KERNEL); |
133 | if (!hd) |
134 | return -ENOMEM; |
135 | |
136 | swlevel = alloc_level(); |
137 | if (unlikely(swlevel < 0)) { |
138 | kfree(objp: hd); |
139 | return -EAGAIN; |
140 | } |
141 | irq_domain_set_info(domain, virq, hwirq: swlevel, chip: &hub_irq_type, chip_data: hd, |
142 | handler: handle_level_irq, NULL, NULL); |
143 | |
144 | /* use CPU connected to nearest hub */ |
145 | hub = hub_data(info->nasid); |
146 | setup_hub_mask(hd, mask: &hub->h_cpus); |
147 | info->nasid = cpu_to_node(cpu: hd->cpu); |
148 | |
149 | /* Make sure it's not already pending when we connect it. */ |
150 | REMOTE_HUB_CLR_INTR(info->nasid, swlevel); |
151 | |
152 | desc = irq_to_desc(irq: virq); |
153 | desc->irq_common_data.node = info->nasid; |
154 | cpumask_copy(dstp: desc->irq_common_data.affinity, srcp: &hub->h_cpus); |
155 | |
156 | return 0; |
157 | } |
158 | |
159 | static void hub_domain_free(struct irq_domain *domain, |
160 | unsigned int virq, unsigned int nr_irqs) |
161 | { |
162 | struct irq_data *irqd; |
163 | |
164 | if (nr_irqs > 1) |
165 | return; |
166 | |
167 | irqd = irq_domain_get_irq_data(domain, virq); |
168 | if (irqd && irqd->chip_data) |
169 | kfree(objp: irqd->chip_data); |
170 | } |
171 | |
172 | static const struct irq_domain_ops hub_domain_ops = { |
173 | .alloc = hub_domain_alloc, |
174 | .free = hub_domain_free, |
175 | }; |
176 | |
177 | /* |
178 | * This code is unnecessarily complex, because we do |
179 | * intr enabling. Basically, once we grab the set of intrs we need |
180 | * to service, we must mask _all_ these interrupts; firstly, to make |
181 | * sure the same intr does not intr again, causing recursion that |
182 | * can lead to stack overflow. Secondly, we can not just mask the |
183 | * one intr we are do_IRQing, because the non-masked intrs in the |
184 | * first set might intr again, causing multiple servicings of the |
185 | * same intr. This effect is mostly seen for intercpu intrs. |
186 | * Kanoj 05.13.00 |
187 | */ |
188 | |
189 | static void ip27_do_irq_mask0(struct irq_desc *desc) |
190 | { |
191 | cpuid_t cpu = smp_processor_id(); |
192 | unsigned long *mask = per_cpu(irq_enable_mask, cpu); |
193 | struct irq_domain *domain; |
194 | u64 pend0; |
195 | int ret; |
196 | |
197 | /* copied from Irix intpend0() */ |
198 | pend0 = LOCAL_HUB_L(PI_INT_PEND0); |
199 | |
200 | pend0 &= mask[0]; /* Pick intrs we should look at */ |
201 | if (!pend0) |
202 | return; |
203 | |
204 | #ifdef CONFIG_SMP |
205 | if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { |
206 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); |
207 | scheduler_ipi(); |
208 | } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { |
209 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); |
210 | scheduler_ipi(); |
211 | } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { |
212 | LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); |
213 | generic_smp_call_function_interrupt(); |
214 | } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { |
215 | LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); |
216 | generic_smp_call_function_interrupt(); |
217 | } else |
218 | #endif |
219 | { |
220 | domain = irq_desc_get_handler_data(desc); |
221 | ret = generic_handle_domain_irq(domain, __ffs(pend0)); |
222 | if (ret) |
223 | spurious_interrupt(); |
224 | } |
225 | |
226 | LOCAL_HUB_L(PI_INT_PEND0); |
227 | } |
228 | |
229 | static void ip27_do_irq_mask1(struct irq_desc *desc) |
230 | { |
231 | cpuid_t cpu = smp_processor_id(); |
232 | unsigned long *mask = per_cpu(irq_enable_mask, cpu); |
233 | struct irq_domain *domain; |
234 | u64 pend1; |
235 | int ret; |
236 | |
237 | /* copied from Irix intpend0() */ |
238 | pend1 = LOCAL_HUB_L(PI_INT_PEND1); |
239 | |
240 | pend1 &= mask[1]; /* Pick intrs we should look at */ |
241 | if (!pend1) |
242 | return; |
243 | |
244 | domain = irq_desc_get_handler_data(desc); |
245 | ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64); |
246 | if (ret) |
247 | spurious_interrupt(); |
248 | |
249 | LOCAL_HUB_L(PI_INT_PEND1); |
250 | } |
251 | |
252 | void install_ipi(void) |
253 | { |
254 | int cpu = smp_processor_id(); |
255 | unsigned long *mask = per_cpu(irq_enable_mask, cpu); |
256 | int slice = LOCAL_HUB_L(PI_CPU_NUM); |
257 | int resched, call; |
258 | |
259 | resched = CPU_RESCHED_A_IRQ + slice; |
260 | set_bit(nr: resched, addr: mask); |
261 | LOCAL_HUB_CLR_INTR(resched); |
262 | |
263 | call = CPU_CALL_A_IRQ + slice; |
264 | set_bit(nr: call, addr: mask); |
265 | LOCAL_HUB_CLR_INTR(call); |
266 | |
267 | if (slice == 0) { |
268 | LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]); |
269 | LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]); |
270 | } else { |
271 | LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]); |
272 | LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]); |
273 | } |
274 | } |
275 | |
276 | void __init arch_init_irq(void) |
277 | { |
278 | struct irq_domain *domain; |
279 | struct fwnode_handle *fn; |
280 | int i; |
281 | |
282 | mips_cpu_irq_init(); |
283 | |
284 | /* |
285 | * Some interrupts are reserved by hardware or by software convention. |
286 | * Mark these as reserved right away so they won't be used accidentally |
287 | * later. |
288 | */ |
289 | for (i = 0; i <= CPU_CALL_B_IRQ; i++) |
290 | set_bit(i, hub_irq_map); |
291 | |
292 | for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) |
293 | set_bit(i, hub_irq_map); |
294 | |
295 | fn = irq_domain_alloc_named_fwnode(name: "HUB" ); |
296 | WARN_ON(fn == NULL); |
297 | if (!fn) |
298 | return; |
299 | domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT, |
300 | &hub_domain_ops, NULL); |
301 | WARN_ON(domain == NULL); |
302 | if (!domain) |
303 | return; |
304 | |
305 | irq_set_default_host(host: domain); |
306 | |
307 | irq_set_percpu_devid(IP27_HUB_PEND0_IRQ); |
308 | irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0, |
309 | domain); |
310 | irq_set_percpu_devid(IP27_HUB_PEND1_IRQ); |
311 | irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1, |
312 | domain); |
313 | } |
314 | |