1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) |
4 | * |
5 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) |
6 | */ |
7 | |
8 | #include <linux/smp.h> |
9 | #include <linux/irq.h> |
10 | #include <linux/irqchip/chained_irq.h> |
11 | #include <linux/spinlock.h> |
12 | #include <soc/arc/mcip.h> |
13 | #include <asm/irqflags-arcv2.h> |
14 | #include <asm/setup.h> |
15 | |
16 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
17 | |
18 | #ifdef CONFIG_SMP |
19 | |
20 | static char smp_cpuinfo_buf[128]; |
21 | |
22 | /* |
23 | * Set mask to halt GFRC if any online core in SMP cluster is halted. |
24 | * Only works for ARC HS v3.0+, on earlier versions has no effect. |
25 | */ |
26 | static void mcip_update_gfrc_halt_mask(int cpu) |
27 | { |
28 | struct bcr_generic gfrc; |
29 | unsigned long flags; |
30 | u32 gfrc_halt_mask; |
31 | |
32 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); |
33 | |
34 | /* |
35 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in |
36 | * GFRC 0x3 version. |
37 | */ |
38 | if (gfrc.ver < 0x3) |
39 | return; |
40 | |
41 | raw_spin_lock_irqsave(&mcip_lock, flags); |
42 | |
43 | __mcip_cmd(CMD_GFRC_READ_CORE, param: 0); |
44 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
45 | gfrc_halt_mask |= BIT(cpu); |
46 | __mcip_cmd_data(CMD_GFRC_SET_CORE, param: 0, data: gfrc_halt_mask); |
47 | |
48 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
49 | } |
50 | |
51 | static void mcip_update_debug_halt_mask(int cpu) |
52 | { |
53 | u32 mcip_mask = 0; |
54 | unsigned long flags; |
55 | |
56 | raw_spin_lock_irqsave(&mcip_lock, flags); |
57 | |
58 | /* |
59 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK |
60 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK |
61 | * and CMD_DEBUG_READ_SELECT. |
62 | */ |
63 | __mcip_cmd(CMD_DEBUG_READ_SELECT, param: 0); |
64 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
65 | |
66 | mcip_mask |= BIT(cpu); |
67 | |
68 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, param: 0, data: mcip_mask); |
69 | /* |
70 | * Parameter specified halt cause: |
71 | * STATUS32[H]/actionpoint/breakpoint/self-halt |
72 | * We choose all of them (0xF). |
73 | */ |
74 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, param: 0xF, data: mcip_mask); |
75 | |
76 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
77 | } |
78 | |
79 | static void mcip_setup_per_cpu(int cpu) |
80 | { |
81 | struct mcip_bcr mp; |
82 | |
83 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
84 | |
85 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
86 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
87 | |
88 | /* Update GFRC halt mask as new CPU came online */ |
89 | if (mp.gfrc) |
90 | mcip_update_gfrc_halt_mask(cpu); |
91 | |
92 | /* Update MCIP debug mask as new CPU came online */ |
93 | if (mp.dbg) |
94 | mcip_update_debug_halt_mask(cpu); |
95 | } |
96 | |
97 | static void mcip_ipi_send(int cpu) |
98 | { |
99 | unsigned long flags; |
100 | int ipi_was_pending; |
101 | |
102 | /* ARConnect can only send IPI to others */ |
103 | if (unlikely(cpu == raw_smp_processor_id())) { |
104 | arc_softirq_trigger(SOFTIRQ_IRQ); |
105 | return; |
106 | } |
107 | |
108 | raw_spin_lock_irqsave(&mcip_lock, flags); |
109 | |
110 | /* |
111 | * If receiver already has a pending interrupt, elide sending this one. |
112 | * Linux cross core calling works well with concurrent IPIs |
113 | * coalesced into one |
114 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() |
115 | */ |
116 | __mcip_cmd(CMD_INTRPT_READ_STATUS, param: cpu); |
117 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); |
118 | if (!ipi_was_pending) |
119 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, param: cpu); |
120 | |
121 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
122 | } |
123 | |
124 | static void mcip_ipi_clear(int irq) |
125 | { |
126 | unsigned int cpu, c; |
127 | unsigned long flags; |
128 | |
129 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
130 | arc_softirq_clear(irq); |
131 | return; |
132 | } |
133 | |
134 | raw_spin_lock_irqsave(&mcip_lock, flags); |
135 | |
136 | /* Who sent the IPI */ |
137 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, param: 0); |
138 | |
139 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
140 | |
141 | /* |
142 | * In rare case, multiple concurrent IPIs sent to same target can |
143 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be |
144 | * "vectored" (multiple bits sets) as opposed to typical single bit |
145 | */ |
146 | do { |
147 | c = __ffs(cpu); /* 0,1,2,3 */ |
148 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, param: c); |
149 | cpu &= ~(1U << c); |
150 | } while (cpu); |
151 | |
152 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
153 | } |
154 | |
155 | static void mcip_probe_n_setup(void) |
156 | { |
157 | struct mcip_bcr mp; |
158 | |
159 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
160 | |
161 | sprintf(buf: smp_cpuinfo_buf, |
162 | fmt: "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n" , |
163 | mp.ver, mp.num_cores, |
164 | IS_AVAIL1(mp.ipi, "IPI " ), |
165 | IS_AVAIL1(mp.idu, "IDU " ), |
166 | IS_AVAIL1(mp.dbg, "DEBUG " ), |
167 | IS_AVAIL1(mp.gfrc, "GFRC" )); |
168 | } |
169 | |
170 | struct plat_smp_ops plat_smp_ops = { |
171 | .info = smp_cpuinfo_buf, |
172 | .init_early_smp = mcip_probe_n_setup, |
173 | .init_per_cpu = mcip_setup_per_cpu, |
174 | .ipi_send = mcip_ipi_send, |
175 | .ipi_clear = mcip_ipi_clear, |
176 | }; |
177 | |
178 | #endif |
179 | |
180 | /*************************************************************************** |
181 | * ARCv2 Interrupt Distribution Unit (IDU) |
182 | * |
183 | * Connects external "COMMON" IRQs to core intc, providing: |
184 | * -dynamic routing (IRQ affinity) |
185 | * -load balancing (Round Robin interrupt distribution) |
186 | * -1:N distribution |
187 | * |
188 | * It physically resides in the MCIP hw block |
189 | */ |
190 | |
191 | #include <linux/irqchip.h> |
192 | #include <linux/of.h> |
193 | #include <linux/of_irq.h> |
194 | |
195 | /* |
196 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) |
197 | */ |
198 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) |
199 | { |
200 | __mcip_cmd_data(CMD_IDU_SET_DEST, param: cmn_irq, data: cpu_mask); |
201 | } |
202 | |
203 | static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl, |
204 | bool set_distr, unsigned int distr) |
205 | { |
206 | union { |
207 | unsigned int word; |
208 | struct { |
209 | unsigned int distr:2, pad:2, lvl:1, pad2:27; |
210 | }; |
211 | } data; |
212 | |
213 | data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, param: cmn_irq); |
214 | if (set_distr) |
215 | data.distr = distr; |
216 | if (set_lvl) |
217 | data.lvl = lvl; |
218 | __mcip_cmd_data(CMD_IDU_SET_MODE, param: cmn_irq, data: data.word); |
219 | } |
220 | |
221 | static void idu_irq_mask_raw(irq_hw_number_t hwirq) |
222 | { |
223 | unsigned long flags; |
224 | |
225 | raw_spin_lock_irqsave(&mcip_lock, flags); |
226 | __mcip_cmd_data(CMD_IDU_SET_MASK, param: hwirq, data: 1); |
227 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
228 | } |
229 | |
230 | static void idu_irq_mask(struct irq_data *data) |
231 | { |
232 | idu_irq_mask_raw(hwirq: data->hwirq); |
233 | } |
234 | |
235 | static void idu_irq_unmask(struct irq_data *data) |
236 | { |
237 | unsigned long flags; |
238 | |
239 | raw_spin_lock_irqsave(&mcip_lock, flags); |
240 | __mcip_cmd_data(CMD_IDU_SET_MASK, param: data->hwirq, data: 0); |
241 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
242 | } |
243 | |
244 | static void idu_irq_ack(struct irq_data *data) |
245 | { |
246 | unsigned long flags; |
247 | |
248 | raw_spin_lock_irqsave(&mcip_lock, flags); |
249 | __mcip_cmd(CMD_IDU_ACK_CIRQ, param: data->hwirq); |
250 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
251 | } |
252 | |
253 | static void idu_irq_mask_ack(struct irq_data *data) |
254 | { |
255 | unsigned long flags; |
256 | |
257 | raw_spin_lock_irqsave(&mcip_lock, flags); |
258 | __mcip_cmd_data(CMD_IDU_SET_MASK, param: data->hwirq, data: 1); |
259 | __mcip_cmd(CMD_IDU_ACK_CIRQ, param: data->hwirq); |
260 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
261 | } |
262 | |
263 | static int |
264 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
265 | bool force) |
266 | { |
267 | unsigned long flags; |
268 | cpumask_t online; |
269 | unsigned int destination_bits; |
270 | unsigned int distribution_mode; |
271 | |
272 | /* errout if no online cpu per @cpumask */ |
273 | if (!cpumask_and(dstp: &online, src1p: cpumask, cpu_online_mask)) |
274 | return -EINVAL; |
275 | |
276 | raw_spin_lock_irqsave(&mcip_lock, flags); |
277 | |
278 | destination_bits = cpumask_bits(&online)[0]; |
279 | idu_set_dest(cmn_irq: data->hwirq, cpu_mask: destination_bits); |
280 | |
281 | if (ffs(destination_bits) == fls(x: destination_bits)) |
282 | distribution_mode = IDU_M_DISTRI_DEST; |
283 | else |
284 | distribution_mode = IDU_M_DISTRI_RR; |
285 | |
286 | idu_set_mode(cmn_irq: data->hwirq, set_lvl: false, lvl: 0, set_distr: true, distr: distribution_mode); |
287 | |
288 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
289 | |
290 | return IRQ_SET_MASK_OK; |
291 | } |
292 | |
293 | static int idu_irq_set_type(struct irq_data *data, u32 type) |
294 | { |
295 | unsigned long flags; |
296 | |
297 | /* |
298 | * ARCv2 IDU HW does not support inverse polarity, so these are the |
299 | * only interrupt types supported. |
300 | */ |
301 | if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) |
302 | return -EINVAL; |
303 | |
304 | raw_spin_lock_irqsave(&mcip_lock, flags); |
305 | |
306 | idu_set_mode(cmn_irq: data->hwirq, set_lvl: true, |
307 | lvl: type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE : |
308 | IDU_M_TRIG_LEVEL, |
309 | set_distr: false, distr: 0); |
310 | |
311 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
312 | |
313 | return 0; |
314 | } |
315 | |
316 | static void idu_irq_enable(struct irq_data *data) |
317 | { |
318 | /* |
319 | * By default send all common interrupts to all available online CPUs. |
320 | * The affinity of common interrupts in IDU must be set manually since |
321 | * in some cases the kernel will not call irq_set_affinity() by itself: |
322 | * 1. When the kernel is not configured with support of SMP. |
323 | * 2. When the kernel is configured with support of SMP but upper |
324 | * interrupt controllers does not support setting of the affinity |
325 | * and cannot propagate it to IDU. |
326 | */ |
327 | idu_irq_set_affinity(data, cpu_online_mask, force: false); |
328 | idu_irq_unmask(data); |
329 | } |
330 | |
331 | static struct irq_chip idu_irq_chip = { |
332 | .name = "MCIP IDU Intc" , |
333 | .irq_mask = idu_irq_mask, |
334 | .irq_unmask = idu_irq_unmask, |
335 | .irq_ack = idu_irq_ack, |
336 | .irq_mask_ack = idu_irq_mask_ack, |
337 | .irq_enable = idu_irq_enable, |
338 | .irq_set_type = idu_irq_set_type, |
339 | #ifdef CONFIG_SMP |
340 | .irq_set_affinity = idu_irq_set_affinity, |
341 | #endif |
342 | |
343 | }; |
344 | |
345 | static void idu_cascade_isr(struct irq_desc *desc) |
346 | { |
347 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
348 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
349 | irq_hw_number_t core_hwirq = irqd_to_hwirq(d: irq_desc_get_irq_data(desc)); |
350 | irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; |
351 | |
352 | chained_irq_enter(chip: core_chip, desc); |
353 | generic_handle_domain_irq(domain: idu_domain, hwirq: idu_hwirq); |
354 | chained_irq_exit(chip: core_chip, desc); |
355 | } |
356 | |
357 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) |
358 | { |
359 | irq_set_chip_and_handler(irq: virq, chip: &idu_irq_chip, handle: handle_level_irq); |
360 | irq_set_status_flags(irq: virq, set: IRQ_MOVE_PCNTXT); |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | static const struct irq_domain_ops idu_irq_ops = { |
366 | .xlate = irq_domain_xlate_onetwocell, |
367 | .map = idu_irq_map, |
368 | }; |
369 | |
370 | /* |
371 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) |
372 | * [24, 23+C]: If C > 0 then "C" common IRQs |
373 | * [24+C, N]: Not statically assigned, private-per-core |
374 | */ |
375 | |
376 | |
377 | static int __init |
378 | idu_of_init(struct device_node *intc, struct device_node *parent) |
379 | { |
380 | struct irq_domain *domain; |
381 | int nr_irqs; |
382 | int i, virq; |
383 | struct mcip_bcr mp; |
384 | struct mcip_idu_bcr idu_bcr; |
385 | |
386 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
387 | |
388 | if (!mp.idu) |
389 | panic(fmt: "IDU not detected, but DeviceTree using it" ); |
390 | |
391 | READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); |
392 | nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); |
393 | |
394 | pr_info("MCIP: IDU supports %u common irqs\n" , nr_irqs); |
395 | |
396 | domain = irq_domain_add_linear(of_node: intc, size: nr_irqs, ops: &idu_irq_ops, NULL); |
397 | |
398 | /* Parent interrupts (core-intc) are already mapped */ |
399 | |
400 | for (i = 0; i < nr_irqs; i++) { |
401 | /* Mask all common interrupts by default */ |
402 | idu_irq_mask_raw(hwirq: i); |
403 | |
404 | /* |
405 | * Return parent uplink IRQs (towards core intc) 24,25,..... |
406 | * this step has been done before already |
407 | * however we need it to get the parent virq and set IDU handler |
408 | * as first level isr |
409 | */ |
410 | virq = irq_create_mapping(NULL, hwirq: i + FIRST_EXT_IRQ); |
411 | BUG_ON(!virq); |
412 | irq_set_chained_handler_and_data(irq: virq, handle: idu_cascade_isr, data: domain); |
413 | } |
414 | |
415 | __mcip_cmd(CMD_IDU_ENABLE, param: 0); |
416 | |
417 | return 0; |
418 | } |
419 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc" , idu_of_init); |
420 | |