1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
4 | * |
5 | * Interrupt architecture for the GIC: |
6 | * |
7 | * o There is one Interrupt Distributor, which receives interrupts |
8 | * from system devices and sends them to the Interrupt Controllers. |
9 | * |
10 | * o There is one CPU Interface per CPU, which sends interrupts sent |
11 | * by the Distributor, and interrupts generated locally, to the |
12 | * associated CPU. The base address of the CPU interface is usually |
13 | * aliased so that the same address points to different chips depending |
14 | * on the CPU it is accessed from. |
15 | * |
16 | * Note that IRQs 0-31 are special - they are local to each CPU. |
17 | * As such, the enable set/clear, pending set/clear and active bit |
18 | * registers are banked per-cpu for these sources. |
19 | */ |
20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/kstrtox.h> |
23 | #include <linux/err.h> |
24 | #include <linux/module.h> |
25 | #include <linux/list.h> |
26 | #include <linux/smp.h> |
27 | #include <linux/cpu.h> |
28 | #include <linux/cpu_pm.h> |
29 | #include <linux/cpumask.h> |
30 | #include <linux/io.h> |
31 | #include <linux/of.h> |
32 | #include <linux/of_address.h> |
33 | #include <linux/of_irq.h> |
34 | #include <linux/acpi.h> |
35 | #include <linux/irqdomain.h> |
36 | #include <linux/interrupt.h> |
37 | #include <linux/percpu.h> |
38 | #include <linux/seq_file.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/irqchip.h> |
41 | #include <linux/irqchip/chained_irq.h> |
42 | #include <linux/irqchip/arm-gic.h> |
43 | |
44 | #include <asm/cputype.h> |
45 | #include <asm/irq.h> |
46 | #include <asm/exception.h> |
47 | #include <asm/smp_plat.h> |
48 | #include <asm/virt.h> |
49 | |
50 | #include "irq-gic-common.h" |
51 | |
52 | #ifdef CONFIG_ARM64 |
53 | #include <asm/cpufeature.h> |
54 | |
55 | static void gic_check_cpu_features(void) |
56 | { |
57 | WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS), |
58 | TAINT_CPU_OUT_OF_SPEC, |
59 | "GICv3 system registers enabled, broken firmware!\n" ); |
60 | } |
61 | #else |
62 | #define gic_check_cpu_features() do { } while(0) |
63 | #endif |
64 | |
65 | union gic_base { |
66 | void __iomem *common_base; |
67 | void __percpu * __iomem *percpu_base; |
68 | }; |
69 | |
70 | struct gic_chip_data { |
71 | union gic_base dist_base; |
72 | union gic_base cpu_base; |
73 | void __iomem *raw_dist_base; |
74 | void __iomem *raw_cpu_base; |
75 | u32 percpu_offset; |
76 | #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) |
77 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
78 | u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; |
79 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
80 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
81 | u32 __percpu *saved_ppi_enable; |
82 | u32 __percpu *saved_ppi_active; |
83 | u32 __percpu *saved_ppi_conf; |
84 | #endif |
85 | struct irq_domain *domain; |
86 | unsigned int gic_irqs; |
87 | }; |
88 | |
89 | #ifdef CONFIG_BL_SWITCHER |
90 | |
91 | static DEFINE_RAW_SPINLOCK(cpu_map_lock); |
92 | |
93 | #define gic_lock_irqsave(f) \ |
94 | raw_spin_lock_irqsave(&cpu_map_lock, (f)) |
95 | #define gic_unlock_irqrestore(f) \ |
96 | raw_spin_unlock_irqrestore(&cpu_map_lock, (f)) |
97 | |
98 | #define gic_lock() raw_spin_lock(&cpu_map_lock) |
99 | #define gic_unlock() raw_spin_unlock(&cpu_map_lock) |
100 | |
101 | #else |
102 | |
103 | #define gic_lock_irqsave(f) do { (void)(f); } while(0) |
104 | #define gic_unlock_irqrestore(f) do { (void)(f); } while(0) |
105 | |
106 | #define gic_lock() do { } while(0) |
107 | #define gic_unlock() do { } while(0) |
108 | |
109 | #endif |
110 | |
111 | static DEFINE_STATIC_KEY_FALSE(needs_rmw_access); |
112 | |
113 | /* |
114 | * The GIC mapping of CPU interfaces does not necessarily match |
115 | * the logical CPU numbering. Let's use a mapping as returned |
116 | * by the GIC itself. |
117 | */ |
118 | #define NR_GIC_CPU_IF 8 |
119 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; |
120 | |
121 | static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); |
122 | |
123 | static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; |
124 | |
125 | static struct gic_kvm_info gic_v2_kvm_info __initdata; |
126 | |
127 | static DEFINE_PER_CPU(u32, sgi_intid); |
128 | |
129 | #ifdef CONFIG_GIC_NON_BANKED |
130 | static DEFINE_STATIC_KEY_FALSE(frankengic_key); |
131 | |
132 | static void enable_frankengic(void) |
133 | { |
134 | static_branch_enable(&frankengic_key); |
135 | } |
136 | |
137 | static inline void __iomem *__get_base(union gic_base *base) |
138 | { |
139 | if (static_branch_unlikely(&frankengic_key)) |
140 | return raw_cpu_read(*base->percpu_base); |
141 | |
142 | return base->common_base; |
143 | } |
144 | |
145 | #define gic_data_dist_base(d) __get_base(&(d)->dist_base) |
146 | #define gic_data_cpu_base(d) __get_base(&(d)->cpu_base) |
147 | #else |
148 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) |
149 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) |
150 | #define enable_frankengic() do { } while(0) |
151 | #endif |
152 | |
153 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
154 | { |
155 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
156 | return gic_data_dist_base(gic_data); |
157 | } |
158 | |
159 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
160 | { |
161 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
162 | return gic_data_cpu_base(gic_data); |
163 | } |
164 | |
165 | static inline unsigned int gic_irq(struct irq_data *d) |
166 | { |
167 | return d->hwirq; |
168 | } |
169 | |
170 | static inline bool cascading_gic_irq(struct irq_data *d) |
171 | { |
172 | void *data = irq_data_get_irq_handler_data(d); |
173 | |
174 | /* |
175 | * If handler_data is set, this is a cascading interrupt, and |
176 | * it cannot possibly be forwarded. |
177 | */ |
178 | return data != NULL; |
179 | } |
180 | |
181 | /* |
182 | * Routines to acknowledge, disable and enable interrupts |
183 | */ |
184 | static void gic_poke_irq(struct irq_data *d, u32 offset) |
185 | { |
186 | u32 mask = 1 << (gic_irq(d) % 32); |
187 | writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); |
188 | } |
189 | |
190 | static int gic_peek_irq(struct irq_data *d, u32 offset) |
191 | { |
192 | u32 mask = 1 << (gic_irq(d) % 32); |
193 | return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); |
194 | } |
195 | |
196 | static void gic_mask_irq(struct irq_data *d) |
197 | { |
198 | gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); |
199 | } |
200 | |
201 | static void gic_eoimode1_mask_irq(struct irq_data *d) |
202 | { |
203 | gic_mask_irq(d); |
204 | /* |
205 | * When masking a forwarded interrupt, make sure it is |
206 | * deactivated as well. |
207 | * |
208 | * This ensures that an interrupt that is getting |
209 | * disabled/masked will not get "stuck", because there is |
210 | * noone to deactivate it (guest is being terminated). |
211 | */ |
212 | if (irqd_is_forwarded_to_vcpu(d)) |
213 | gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); |
214 | } |
215 | |
216 | static void gic_unmask_irq(struct irq_data *d) |
217 | { |
218 | gic_poke_irq(d, GIC_DIST_ENABLE_SET); |
219 | } |
220 | |
221 | static void gic_eoi_irq(struct irq_data *d) |
222 | { |
223 | u32 hwirq = gic_irq(d); |
224 | |
225 | if (hwirq < 16) |
226 | hwirq = this_cpu_read(sgi_intid); |
227 | |
228 | writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI); |
229 | } |
230 | |
231 | static void gic_eoimode1_eoi_irq(struct irq_data *d) |
232 | { |
233 | u32 hwirq = gic_irq(d); |
234 | |
235 | /* Do not deactivate an IRQ forwarded to a vcpu. */ |
236 | if (irqd_is_forwarded_to_vcpu(d)) |
237 | return; |
238 | |
239 | if (hwirq < 16) |
240 | hwirq = this_cpu_read(sgi_intid); |
241 | |
242 | writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE); |
243 | } |
244 | |
245 | static int gic_irq_set_irqchip_state(struct irq_data *d, |
246 | enum irqchip_irq_state which, bool val) |
247 | { |
248 | u32 reg; |
249 | |
250 | switch (which) { |
251 | case IRQCHIP_STATE_PENDING: |
252 | reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; |
253 | break; |
254 | |
255 | case IRQCHIP_STATE_ACTIVE: |
256 | reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; |
257 | break; |
258 | |
259 | case IRQCHIP_STATE_MASKED: |
260 | reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; |
261 | break; |
262 | |
263 | default: |
264 | return -EINVAL; |
265 | } |
266 | |
267 | gic_poke_irq(d, offset: reg); |
268 | return 0; |
269 | } |
270 | |
271 | static int gic_irq_get_irqchip_state(struct irq_data *d, |
272 | enum irqchip_irq_state which, bool *val) |
273 | { |
274 | switch (which) { |
275 | case IRQCHIP_STATE_PENDING: |
276 | *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); |
277 | break; |
278 | |
279 | case IRQCHIP_STATE_ACTIVE: |
280 | *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); |
281 | break; |
282 | |
283 | case IRQCHIP_STATE_MASKED: |
284 | *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); |
285 | break; |
286 | |
287 | default: |
288 | return -EINVAL; |
289 | } |
290 | |
291 | return 0; |
292 | } |
293 | |
294 | static int gic_set_type(struct irq_data *d, unsigned int type) |
295 | { |
296 | void __iomem *base = gic_dist_base(d); |
297 | unsigned int gicirq = gic_irq(d); |
298 | int ret; |
299 | |
300 | /* Interrupt configuration for SGIs can't be changed */ |
301 | if (gicirq < 16) |
302 | return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; |
303 | |
304 | /* SPIs have restrictions on the supported types */ |
305 | if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && |
306 | type != IRQ_TYPE_EDGE_RISING) |
307 | return -EINVAL; |
308 | |
309 | ret = gic_configure_irq(irq: gicirq, type, base: base + GIC_DIST_CONFIG, NULL); |
310 | if (ret && gicirq < 32) { |
311 | /* Misconfigured PPIs are usually not fatal */ |
312 | pr_warn("GIC: PPI%d is secure or misconfigured\n" , gicirq - 16); |
313 | ret = 0; |
314 | } |
315 | |
316 | return ret; |
317 | } |
318 | |
319 | static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) |
320 | { |
321 | /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ |
322 | if (cascading_gic_irq(d) || gic_irq(d) < 16) |
323 | return -EINVAL; |
324 | |
325 | if (vcpu) |
326 | irqd_set_forwarded_to_vcpu(d); |
327 | else |
328 | irqd_clr_forwarded_to_vcpu(d); |
329 | return 0; |
330 | } |
331 | |
332 | static int gic_retrigger(struct irq_data *data) |
333 | { |
334 | return !gic_irq_set_irqchip_state(d: data, which: IRQCHIP_STATE_PENDING, val: true); |
335 | } |
336 | |
337 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
338 | { |
339 | u32 irqstat, irqnr; |
340 | struct gic_chip_data *gic = &gic_data[0]; |
341 | void __iomem *cpu_base = gic_data_cpu_base(gic); |
342 | |
343 | do { |
344 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); |
345 | irqnr = irqstat & GICC_IAR_INT_ID_MASK; |
346 | |
347 | if (unlikely(irqnr >= 1020)) |
348 | break; |
349 | |
350 | if (static_branch_likely(&supports_deactivate_key)) |
351 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
352 | isb(); |
353 | |
354 | /* |
355 | * Ensure any shared data written by the CPU sending the IPI |
356 | * is read after we've read the ACK register on the GIC. |
357 | * |
358 | * Pairs with the write barrier in gic_ipi_send_mask |
359 | */ |
360 | if (irqnr <= 15) { |
361 | smp_rmb(); |
362 | |
363 | /* |
364 | * The GIC encodes the source CPU in GICC_IAR, |
365 | * leading to the deactivation to fail if not |
366 | * written back as is to GICC_EOI. Stash the INTID |
367 | * away for gic_eoi_irq() to write back. This only |
368 | * works because we don't nest SGIs... |
369 | */ |
370 | this_cpu_write(sgi_intid, irqstat); |
371 | } |
372 | |
373 | generic_handle_domain_irq(gic->domain, irqnr); |
374 | } while (1); |
375 | } |
376 | |
377 | static void gic_handle_cascade_irq(struct irq_desc *desc) |
378 | { |
379 | struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); |
380 | struct irq_chip *chip = irq_desc_get_chip(desc); |
381 | unsigned int gic_irq; |
382 | unsigned long status; |
383 | int ret; |
384 | |
385 | chained_irq_enter(chip, desc); |
386 | |
387 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); |
388 | |
389 | gic_irq = (status & GICC_IAR_INT_ID_MASK); |
390 | if (gic_irq == GICC_INT_SPURIOUS) |
391 | goto out; |
392 | |
393 | isb(); |
394 | ret = generic_handle_domain_irq(domain: chip_data->domain, hwirq: gic_irq); |
395 | if (unlikely(ret)) |
396 | handle_bad_irq(desc); |
397 | out: |
398 | chained_irq_exit(chip, desc); |
399 | } |
400 | |
401 | static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p) |
402 | { |
403 | struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); |
404 | |
405 | if (gic->domain->pm_dev) |
406 | seq_printf(m: p, fmt: gic->domain->pm_dev->of_node->name); |
407 | else |
408 | seq_printf(m: p, fmt: "GIC-%d" , (int)(gic - &gic_data[0])); |
409 | } |
410 | |
411 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
412 | { |
413 | BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); |
414 | irq_set_chained_handler_and_data(irq, handle: gic_handle_cascade_irq, |
415 | data: &gic_data[gic_nr]); |
416 | } |
417 | |
418 | static u8 gic_get_cpumask(struct gic_chip_data *gic) |
419 | { |
420 | void __iomem *base = gic_data_dist_base(gic); |
421 | u32 mask, i; |
422 | |
423 | for (i = mask = 0; i < 32; i += 4) { |
424 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); |
425 | mask |= mask >> 16; |
426 | mask |= mask >> 8; |
427 | if (mask) |
428 | break; |
429 | } |
430 | |
431 | if (!mask && num_possible_cpus() > 1) |
432 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n" ); |
433 | |
434 | return mask; |
435 | } |
436 | |
437 | static bool gic_check_gicv2(void __iomem *base) |
438 | { |
439 | u32 val = readl_relaxed(base + GIC_CPU_IDENT); |
440 | return (val & 0xff0fff) == 0x02043B; |
441 | } |
442 | |
443 | static void gic_cpu_if_up(struct gic_chip_data *gic) |
444 | { |
445 | void __iomem *cpu_base = gic_data_cpu_base(gic); |
446 | u32 bypass = 0; |
447 | u32 mode = 0; |
448 | int i; |
449 | |
450 | if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key)) |
451 | mode = GIC_CPU_CTRL_EOImodeNS; |
452 | |
453 | if (gic_check_gicv2(base: cpu_base)) |
454 | for (i = 0; i < 4; i++) |
455 | writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4); |
456 | |
457 | /* |
458 | * Preserve bypass disable bits to be written back later |
459 | */ |
460 | bypass = readl(addr: cpu_base + GIC_CPU_CTRL); |
461 | bypass &= GICC_DIS_BYPASS_MASK; |
462 | |
463 | writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); |
464 | } |
465 | |
466 | |
467 | static void gic_dist_init(struct gic_chip_data *gic) |
468 | { |
469 | unsigned int i; |
470 | u32 cpumask; |
471 | unsigned int gic_irqs = gic->gic_irqs; |
472 | void __iomem *base = gic_data_dist_base(gic); |
473 | |
474 | writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL); |
475 | |
476 | /* |
477 | * Set all global interrupts to this CPU only. |
478 | */ |
479 | cpumask = gic_get_cpumask(gic); |
480 | cpumask |= cpumask << 8; |
481 | cpumask |= cpumask << 16; |
482 | for (i = 32; i < gic_irqs; i += 4) |
483 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
484 | |
485 | gic_dist_config(base, gic_irqs, NULL); |
486 | |
487 | writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); |
488 | } |
489 | |
490 | static int gic_cpu_init(struct gic_chip_data *gic) |
491 | { |
492 | void __iomem *dist_base = gic_data_dist_base(gic); |
493 | void __iomem *base = gic_data_cpu_base(gic); |
494 | unsigned int cpu_mask, cpu = smp_processor_id(); |
495 | int i; |
496 | |
497 | /* |
498 | * Setting up the CPU map is only relevant for the primary GIC |
499 | * because any nested/secondary GICs do not directly interface |
500 | * with the CPU(s). |
501 | */ |
502 | if (gic == &gic_data[0]) { |
503 | /* |
504 | * Get what the GIC says our CPU mask is. |
505 | */ |
506 | if (WARN_ON(cpu >= NR_GIC_CPU_IF)) |
507 | return -EINVAL; |
508 | |
509 | gic_check_cpu_features(); |
510 | cpu_mask = gic_get_cpumask(gic); |
511 | gic_cpu_map[cpu] = cpu_mask; |
512 | |
513 | /* |
514 | * Clear our mask from the other map entries in case they're |
515 | * still undefined. |
516 | */ |
517 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
518 | if (i != cpu) |
519 | gic_cpu_map[i] &= ~cpu_mask; |
520 | } |
521 | |
522 | gic_cpu_config(base: dist_base, nr: 32, NULL); |
523 | |
524 | writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); |
525 | gic_cpu_if_up(gic); |
526 | |
527 | return 0; |
528 | } |
529 | |
530 | int gic_cpu_if_down(unsigned int gic_nr) |
531 | { |
532 | void __iomem *cpu_base; |
533 | u32 val = 0; |
534 | |
535 | if (gic_nr >= CONFIG_ARM_GIC_MAX_NR) |
536 | return -EINVAL; |
537 | |
538 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
539 | val = readl(addr: cpu_base + GIC_CPU_CTRL); |
540 | val &= ~GICC_ENABLE; |
541 | writel_relaxed(val, cpu_base + GIC_CPU_CTRL); |
542 | |
543 | return 0; |
544 | } |
545 | |
546 | #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) |
547 | /* |
548 | * Saves the GIC distributor registers during suspend or idle. Must be called |
549 | * with interrupts disabled but before powering down the GIC. After calling |
550 | * this function, no interrupts will be delivered by the GIC, and another |
551 | * platform-specific wakeup source must be enabled. |
552 | */ |
553 | void gic_dist_save(struct gic_chip_data *gic) |
554 | { |
555 | unsigned int gic_irqs; |
556 | void __iomem *dist_base; |
557 | int i; |
558 | |
559 | if (WARN_ON(!gic)) |
560 | return; |
561 | |
562 | gic_irqs = gic->gic_irqs; |
563 | dist_base = gic_data_dist_base(gic); |
564 | |
565 | if (!dist_base) |
566 | return; |
567 | |
568 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
569 | gic->saved_spi_conf[i] = |
570 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
571 | |
572 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
573 | gic->saved_spi_target[i] = |
574 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
575 | |
576 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
577 | gic->saved_spi_enable[i] = |
578 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
579 | |
580 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
581 | gic->saved_spi_active[i] = |
582 | readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); |
583 | } |
584 | |
585 | /* |
586 | * Restores the GIC distributor registers during resume or when coming out of |
587 | * idle. Must be called before enabling interrupts. If a level interrupt |
588 | * that occurred while the GIC was suspended is still present, it will be |
589 | * handled normally, but any edge interrupts that occurred will not be seen by |
590 | * the GIC and need to be handled by the platform-specific wakeup source. |
591 | */ |
592 | void gic_dist_restore(struct gic_chip_data *gic) |
593 | { |
594 | unsigned int gic_irqs; |
595 | unsigned int i; |
596 | void __iomem *dist_base; |
597 | |
598 | if (WARN_ON(!gic)) |
599 | return; |
600 | |
601 | gic_irqs = gic->gic_irqs; |
602 | dist_base = gic_data_dist_base(gic); |
603 | |
604 | if (!dist_base) |
605 | return; |
606 | |
607 | writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); |
608 | |
609 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
610 | writel_relaxed(gic->saved_spi_conf[i], |
611 | dist_base + GIC_DIST_CONFIG + i * 4); |
612 | |
613 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
614 | writel_relaxed(GICD_INT_DEF_PRI_X4, |
615 | dist_base + GIC_DIST_PRI + i * 4); |
616 | |
617 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
618 | writel_relaxed(gic->saved_spi_target[i], |
619 | dist_base + GIC_DIST_TARGET + i * 4); |
620 | |
621 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { |
622 | writel_relaxed(GICD_INT_EN_CLR_X32, |
623 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); |
624 | writel_relaxed(gic->saved_spi_enable[i], |
625 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
626 | } |
627 | |
628 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { |
629 | writel_relaxed(GICD_INT_EN_CLR_X32, |
630 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); |
631 | writel_relaxed(gic->saved_spi_active[i], |
632 | dist_base + GIC_DIST_ACTIVE_SET + i * 4); |
633 | } |
634 | |
635 | writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); |
636 | } |
637 | |
638 | void gic_cpu_save(struct gic_chip_data *gic) |
639 | { |
640 | int i; |
641 | u32 *ptr; |
642 | void __iomem *dist_base; |
643 | void __iomem *cpu_base; |
644 | |
645 | if (WARN_ON(!gic)) |
646 | return; |
647 | |
648 | dist_base = gic_data_dist_base(gic); |
649 | cpu_base = gic_data_cpu_base(gic); |
650 | |
651 | if (!dist_base || !cpu_base) |
652 | return; |
653 | |
654 | ptr = raw_cpu_ptr(gic->saved_ppi_enable); |
655 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
656 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
657 | |
658 | ptr = raw_cpu_ptr(gic->saved_ppi_active); |
659 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
660 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); |
661 | |
662 | ptr = raw_cpu_ptr(gic->saved_ppi_conf); |
663 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
664 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
665 | |
666 | } |
667 | |
668 | void gic_cpu_restore(struct gic_chip_data *gic) |
669 | { |
670 | int i; |
671 | u32 *ptr; |
672 | void __iomem *dist_base; |
673 | void __iomem *cpu_base; |
674 | |
675 | if (WARN_ON(!gic)) |
676 | return; |
677 | |
678 | dist_base = gic_data_dist_base(gic); |
679 | cpu_base = gic_data_cpu_base(gic); |
680 | |
681 | if (!dist_base || !cpu_base) |
682 | return; |
683 | |
684 | ptr = raw_cpu_ptr(gic->saved_ppi_enable); |
685 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { |
686 | writel_relaxed(GICD_INT_EN_CLR_X32, |
687 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); |
688 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
689 | } |
690 | |
691 | ptr = raw_cpu_ptr(gic->saved_ppi_active); |
692 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { |
693 | writel_relaxed(GICD_INT_EN_CLR_X32, |
694 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); |
695 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); |
696 | } |
697 | |
698 | ptr = raw_cpu_ptr(gic->saved_ppi_conf); |
699 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
700 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); |
701 | |
702 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) |
703 | writel_relaxed(GICD_INT_DEF_PRI_X4, |
704 | dist_base + GIC_DIST_PRI + i * 4); |
705 | |
706 | writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); |
707 | gic_cpu_if_up(gic); |
708 | } |
709 | |
710 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
711 | { |
712 | int i; |
713 | |
714 | for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { |
715 | switch (cmd) { |
716 | case CPU_PM_ENTER: |
717 | gic_cpu_save(&gic_data[i]); |
718 | break; |
719 | case CPU_PM_ENTER_FAILED: |
720 | case CPU_PM_EXIT: |
721 | gic_cpu_restore(&gic_data[i]); |
722 | break; |
723 | case CPU_CLUSTER_PM_ENTER: |
724 | gic_dist_save(&gic_data[i]); |
725 | break; |
726 | case CPU_CLUSTER_PM_ENTER_FAILED: |
727 | case CPU_CLUSTER_PM_EXIT: |
728 | gic_dist_restore(&gic_data[i]); |
729 | break; |
730 | } |
731 | } |
732 | |
733 | return NOTIFY_OK; |
734 | } |
735 | |
736 | static struct notifier_block gic_notifier_block = { |
737 | .notifier_call = gic_notifier, |
738 | }; |
739 | |
740 | static int gic_pm_init(struct gic_chip_data *gic) |
741 | { |
742 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, |
743 | sizeof(u32)); |
744 | if (WARN_ON(!gic->saved_ppi_enable)) |
745 | return -ENOMEM; |
746 | |
747 | gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, |
748 | sizeof(u32)); |
749 | if (WARN_ON(!gic->saved_ppi_active)) |
750 | goto free_ppi_enable; |
751 | |
752 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
753 | sizeof(u32)); |
754 | if (WARN_ON(!gic->saved_ppi_conf)) |
755 | goto free_ppi_active; |
756 | |
757 | if (gic == &gic_data[0]) |
758 | cpu_pm_register_notifier(&gic_notifier_block); |
759 | |
760 | return 0; |
761 | |
762 | free_ppi_active: |
763 | free_percpu(gic->saved_ppi_active); |
764 | free_ppi_enable: |
765 | free_percpu(gic->saved_ppi_enable); |
766 | |
767 | return -ENOMEM; |
768 | } |
769 | #else |
770 | static int gic_pm_init(struct gic_chip_data *gic) |
771 | { |
772 | return 0; |
773 | } |
774 | #endif |
775 | |
776 | #ifdef CONFIG_SMP |
777 | static void rmw_writeb(u8 bval, void __iomem *addr) |
778 | { |
779 | static DEFINE_RAW_SPINLOCK(rmw_lock); |
780 | unsigned long offset = (unsigned long)addr & 3UL; |
781 | unsigned long shift = offset * 8; |
782 | unsigned long flags; |
783 | u32 val; |
784 | |
785 | raw_spin_lock_irqsave(&rmw_lock, flags); |
786 | |
787 | addr -= offset; |
788 | val = readl_relaxed(addr); |
789 | val &= ~GENMASK(shift + 7, shift); |
790 | val |= bval << shift; |
791 | writel_relaxed(val, addr); |
792 | |
793 | raw_spin_unlock_irqrestore(&rmw_lock, flags); |
794 | } |
795 | |
796 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
797 | bool force) |
798 | { |
799 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); |
800 | struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); |
801 | unsigned int cpu; |
802 | |
803 | if (unlikely(gic != &gic_data[0])) |
804 | return -EINVAL; |
805 | |
806 | if (!force) |
807 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
808 | else |
809 | cpu = cpumask_first(srcp: mask_val); |
810 | |
811 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
812 | return -EINVAL; |
813 | |
814 | if (static_branch_unlikely(&needs_rmw_access)) |
815 | rmw_writeb(bval: gic_cpu_map[cpu], addr: reg); |
816 | else |
817 | writeb_relaxed(gic_cpu_map[cpu], reg); |
818 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
819 | |
820 | return IRQ_SET_MASK_OK_DONE; |
821 | } |
822 | |
823 | static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) |
824 | { |
825 | int cpu; |
826 | unsigned long flags, map = 0; |
827 | |
828 | if (unlikely(nr_cpu_ids == 1)) { |
829 | /* Only one CPU? let's do a self-IPI... */ |
830 | writel_relaxed(2 << 24 | d->hwirq, |
831 | gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
832 | return; |
833 | } |
834 | |
835 | gic_lock_irqsave(flags); |
836 | |
837 | /* Convert our logical CPU mask into a physical one. */ |
838 | for_each_cpu(cpu, mask) |
839 | map |= gic_cpu_map[cpu]; |
840 | |
841 | /* |
842 | * Ensure that stores to Normal memory are visible to the |
843 | * other CPUs before they observe us issuing the IPI. |
844 | */ |
845 | dmb(ishst); |
846 | |
847 | /* this always happens on GIC0 */ |
848 | writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
849 | |
850 | gic_unlock_irqrestore(flags); |
851 | } |
852 | |
853 | static int gic_starting_cpu(unsigned int cpu) |
854 | { |
855 | gic_cpu_init(gic: &gic_data[0]); |
856 | return 0; |
857 | } |
858 | |
859 | static __init void gic_smp_init(void) |
860 | { |
861 | struct irq_fwspec sgi_fwspec = { |
862 | .fwnode = gic_data[0].domain->fwnode, |
863 | .param_count = 1, |
864 | }; |
865 | int base_sgi; |
866 | |
867 | cpuhp_setup_state_nocalls(state: CPUHP_AP_IRQ_GIC_STARTING, |
868 | name: "irqchip/arm/gic:starting" , |
869 | startup: gic_starting_cpu, NULL); |
870 | |
871 | base_sgi = irq_domain_alloc_irqs(domain: gic_data[0].domain, nr_irqs: 8, NUMA_NO_NODE, arg: &sgi_fwspec); |
872 | if (WARN_ON(base_sgi <= 0)) |
873 | return; |
874 | |
875 | set_smp_ipi_range(base_sgi, 8); |
876 | } |
877 | #else |
878 | #define gic_smp_init() do { } while(0) |
879 | #define gic_set_affinity NULL |
880 | #define gic_ipi_send_mask NULL |
881 | #endif |
882 | |
883 | static const struct irq_chip gic_chip = { |
884 | .irq_mask = gic_mask_irq, |
885 | .irq_unmask = gic_unmask_irq, |
886 | .irq_eoi = gic_eoi_irq, |
887 | .irq_set_type = gic_set_type, |
888 | .irq_retrigger = gic_retrigger, |
889 | .irq_set_affinity = gic_set_affinity, |
890 | .ipi_send_mask = gic_ipi_send_mask, |
891 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
892 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
893 | .irq_print_chip = gic_irq_print_chip, |
894 | .flags = IRQCHIP_SET_TYPE_MASKED | |
895 | IRQCHIP_SKIP_SET_WAKE | |
896 | IRQCHIP_MASK_ON_SUSPEND, |
897 | }; |
898 | |
899 | static const struct irq_chip gic_chip_mode1 = { |
900 | .name = "GICv2" , |
901 | .irq_mask = gic_eoimode1_mask_irq, |
902 | .irq_unmask = gic_unmask_irq, |
903 | .irq_eoi = gic_eoimode1_eoi_irq, |
904 | .irq_set_type = gic_set_type, |
905 | .irq_retrigger = gic_retrigger, |
906 | .irq_set_affinity = gic_set_affinity, |
907 | .ipi_send_mask = gic_ipi_send_mask, |
908 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
909 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
910 | .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, |
911 | .flags = IRQCHIP_SET_TYPE_MASKED | |
912 | IRQCHIP_SKIP_SET_WAKE | |
913 | IRQCHIP_MASK_ON_SUSPEND, |
914 | }; |
915 | |
916 | #ifdef CONFIG_BL_SWITCHER |
917 | /* |
918 | * gic_send_sgi - send a SGI directly to given CPU interface number |
919 | * |
920 | * cpu_id: the ID for the destination CPU interface |
921 | * irq: the IPI number to send a SGI for |
922 | */ |
923 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq) |
924 | { |
925 | BUG_ON(cpu_id >= NR_GIC_CPU_IF); |
926 | cpu_id = 1 << cpu_id; |
927 | /* this always happens on GIC0 */ |
928 | writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
929 | } |
930 | |
931 | /* |
932 | * gic_get_cpu_id - get the CPU interface ID for the specified CPU |
933 | * |
934 | * @cpu: the logical CPU number to get the GIC ID for. |
935 | * |
936 | * Return the CPU interface ID for the given logical CPU number, |
937 | * or -1 if the CPU number is too large or the interface ID is |
938 | * unknown (more than one bit set). |
939 | */ |
940 | int gic_get_cpu_id(unsigned int cpu) |
941 | { |
942 | unsigned int cpu_bit; |
943 | |
944 | if (cpu >= NR_GIC_CPU_IF) |
945 | return -1; |
946 | cpu_bit = gic_cpu_map[cpu]; |
947 | if (cpu_bit & (cpu_bit - 1)) |
948 | return -1; |
949 | return __ffs(cpu_bit); |
950 | } |
951 | |
952 | /* |
953 | * gic_migrate_target - migrate IRQs to another CPU interface |
954 | * |
955 | * @new_cpu_id: the CPU target ID to migrate IRQs to |
956 | * |
957 | * Migrate all peripheral interrupts with a target matching the current CPU |
958 | * to the interface corresponding to @new_cpu_id. The CPU interface mapping |
959 | * is also updated. Targets to other CPU interfaces are unchanged. |
960 | * This must be called with IRQs locally disabled. |
961 | */ |
962 | void gic_migrate_target(unsigned int new_cpu_id) |
963 | { |
964 | unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; |
965 | void __iomem *dist_base; |
966 | int i, ror_val, cpu = smp_processor_id(); |
967 | u32 val, cur_target_mask, active_mask; |
968 | |
969 | BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); |
970 | |
971 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
972 | if (!dist_base) |
973 | return; |
974 | gic_irqs = gic_data[gic_nr].gic_irqs; |
975 | |
976 | cur_cpu_id = __ffs(gic_cpu_map[cpu]); |
977 | cur_target_mask = 0x01010101 << cur_cpu_id; |
978 | ror_val = (cur_cpu_id - new_cpu_id) & 31; |
979 | |
980 | gic_lock(); |
981 | |
982 | /* Update the target interface for this logical CPU */ |
983 | gic_cpu_map[cpu] = 1 << new_cpu_id; |
984 | |
985 | /* |
986 | * Find all the peripheral interrupts targeting the current |
987 | * CPU interface and migrate them to the new CPU interface. |
988 | * We skip DIST_TARGET 0 to 7 as they are read-only. |
989 | */ |
990 | for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { |
991 | val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
992 | active_mask = val & cur_target_mask; |
993 | if (active_mask) { |
994 | val &= ~active_mask; |
995 | val |= ror32(active_mask, ror_val); |
996 | writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); |
997 | } |
998 | } |
999 | |
1000 | gic_unlock(); |
1001 | |
1002 | /* |
1003 | * Now let's migrate and clear any potential SGIs that might be |
1004 | * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET |
1005 | * is a banked register, we can only forward the SGI using |
1006 | * GIC_DIST_SOFTINT. The original SGI source is lost but Linux |
1007 | * doesn't use that information anyway. |
1008 | * |
1009 | * For the same reason we do not adjust SGI source information |
1010 | * for previously sent SGIs by us to other CPUs either. |
1011 | */ |
1012 | for (i = 0; i < 16; i += 4) { |
1013 | int j; |
1014 | val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); |
1015 | if (!val) |
1016 | continue; |
1017 | writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); |
1018 | for (j = i; j < i + 4; j++) { |
1019 | if (val & 0xff) |
1020 | writel_relaxed((1 << (new_cpu_id + 16)) | j, |
1021 | dist_base + GIC_DIST_SOFTINT); |
1022 | val >>= 8; |
1023 | } |
1024 | } |
1025 | } |
1026 | |
1027 | /* |
1028 | * gic_get_sgir_physaddr - get the physical address for the SGI register |
1029 | * |
1030 | * Return the physical address of the SGI register to be used |
1031 | * by some early assembly code when the kernel is not yet available. |
1032 | */ |
1033 | static unsigned long gic_dist_physaddr; |
1034 | |
1035 | unsigned long gic_get_sgir_physaddr(void) |
1036 | { |
1037 | if (!gic_dist_physaddr) |
1038 | return 0; |
1039 | return gic_dist_physaddr + GIC_DIST_SOFTINT; |
1040 | } |
1041 | |
1042 | static void __init gic_init_physaddr(struct device_node *node) |
1043 | { |
1044 | struct resource res; |
1045 | if (of_address_to_resource(node, 0, &res) == 0) { |
1046 | gic_dist_physaddr = res.start; |
1047 | pr_info("GIC physical location is %#lx\n" , gic_dist_physaddr); |
1048 | } |
1049 | } |
1050 | |
1051 | #else |
1052 | #define gic_init_physaddr(node) do { } while (0) |
1053 | #endif |
1054 | |
1055 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, |
1056 | irq_hw_number_t hw) |
1057 | { |
1058 | struct gic_chip_data *gic = d->host_data; |
1059 | struct irq_data *irqd = irq_desc_get_irq_data(desc: irq_to_desc(irq)); |
1060 | const struct irq_chip *chip; |
1061 | |
1062 | chip = (static_branch_likely(&supports_deactivate_key) && |
1063 | gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip; |
1064 | |
1065 | switch (hw) { |
1066 | case 0 ... 31: |
1067 | irq_set_percpu_devid(irq); |
1068 | irq_domain_set_info(domain: d, virq: irq, hwirq: hw, chip, chip_data: d->host_data, |
1069 | handler: handle_percpu_devid_irq, NULL, NULL); |
1070 | break; |
1071 | default: |
1072 | irq_domain_set_info(domain: d, virq: irq, hwirq: hw, chip, chip_data: d->host_data, |
1073 | handler: handle_fasteoi_irq, NULL, NULL); |
1074 | irq_set_probe(irq); |
1075 | irqd_set_single_target(d: irqd); |
1076 | break; |
1077 | } |
1078 | |
1079 | /* Prevents SW retriggers which mess up the ACK/EOI ordering */ |
1080 | irqd_set_handle_enforce_irqctx(d: irqd); |
1081 | return 0; |
1082 | } |
1083 | |
1084 | static int gic_irq_domain_translate(struct irq_domain *d, |
1085 | struct irq_fwspec *fwspec, |
1086 | unsigned long *hwirq, |
1087 | unsigned int *type) |
1088 | { |
1089 | if (fwspec->param_count == 1 && fwspec->param[0] < 16) { |
1090 | *hwirq = fwspec->param[0]; |
1091 | *type = IRQ_TYPE_EDGE_RISING; |
1092 | return 0; |
1093 | } |
1094 | |
1095 | if (is_of_node(fwnode: fwspec->fwnode)) { |
1096 | if (fwspec->param_count < 3) |
1097 | return -EINVAL; |
1098 | |
1099 | switch (fwspec->param[0]) { |
1100 | case 0: /* SPI */ |
1101 | *hwirq = fwspec->param[1] + 32; |
1102 | break; |
1103 | case 1: /* PPI */ |
1104 | *hwirq = fwspec->param[1] + 16; |
1105 | break; |
1106 | default: |
1107 | return -EINVAL; |
1108 | } |
1109 | |
1110 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
1111 | |
1112 | /* Make it clear that broken DTs are... broken */ |
1113 | WARN(*type == IRQ_TYPE_NONE, |
1114 | "HW irq %ld has invalid type\n" , *hwirq); |
1115 | return 0; |
1116 | } |
1117 | |
1118 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { |
1119 | if(fwspec->param_count != 2) |
1120 | return -EINVAL; |
1121 | |
1122 | if (fwspec->param[0] < 16) { |
1123 | pr_err(FW_BUG "Illegal GSI%d translation request\n" , |
1124 | fwspec->param[0]); |
1125 | return -EINVAL; |
1126 | } |
1127 | |
1128 | *hwirq = fwspec->param[0]; |
1129 | *type = fwspec->param[1]; |
1130 | |
1131 | WARN(*type == IRQ_TYPE_NONE, |
1132 | "HW irq %ld has invalid type\n" , *hwirq); |
1133 | return 0; |
1134 | } |
1135 | |
1136 | return -EINVAL; |
1137 | } |
1138 | |
1139 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
1140 | unsigned int nr_irqs, void *arg) |
1141 | { |
1142 | int i, ret; |
1143 | irq_hw_number_t hwirq; |
1144 | unsigned int type = IRQ_TYPE_NONE; |
1145 | struct irq_fwspec *fwspec = arg; |
1146 | |
1147 | ret = gic_irq_domain_translate(d: domain, fwspec, hwirq: &hwirq, type: &type); |
1148 | if (ret) |
1149 | return ret; |
1150 | |
1151 | for (i = 0; i < nr_irqs; i++) { |
1152 | ret = gic_irq_domain_map(d: domain, irq: virq + i, hw: hwirq + i); |
1153 | if (ret) |
1154 | return ret; |
1155 | } |
1156 | |
1157 | return 0; |
1158 | } |
1159 | |
1160 | static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { |
1161 | .translate = gic_irq_domain_translate, |
1162 | .alloc = gic_irq_domain_alloc, |
1163 | .free = irq_domain_free_irqs_top, |
1164 | }; |
1165 | |
1166 | static int gic_init_bases(struct gic_chip_data *gic, |
1167 | struct fwnode_handle *handle) |
1168 | { |
1169 | int gic_irqs, ret; |
1170 | |
1171 | if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { |
1172 | /* Frankein-GIC without banked registers... */ |
1173 | unsigned int cpu; |
1174 | |
1175 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); |
1176 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); |
1177 | if (WARN_ON(!gic->dist_base.percpu_base || |
1178 | !gic->cpu_base.percpu_base)) { |
1179 | ret = -ENOMEM; |
1180 | goto error; |
1181 | } |
1182 | |
1183 | for_each_possible_cpu(cpu) { |
1184 | u32 mpidr = cpu_logical_map(cpu); |
1185 | u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
1186 | unsigned long offset = gic->percpu_offset * core_id; |
1187 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = |
1188 | gic->raw_dist_base + offset; |
1189 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = |
1190 | gic->raw_cpu_base + offset; |
1191 | } |
1192 | |
1193 | enable_frankengic(); |
1194 | } else { |
1195 | /* Normal, sane GIC... */ |
1196 | WARN(gic->percpu_offset, |
1197 | "GIC_NON_BANKED not enabled, ignoring %08x offset!" , |
1198 | gic->percpu_offset); |
1199 | gic->dist_base.common_base = gic->raw_dist_base; |
1200 | gic->cpu_base.common_base = gic->raw_cpu_base; |
1201 | } |
1202 | |
1203 | /* |
1204 | * Find out how many interrupts are supported. |
1205 | * The GIC only supports up to 1020 interrupt sources. |
1206 | */ |
1207 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; |
1208 | gic_irqs = (gic_irqs + 1) * 32; |
1209 | if (gic_irqs > 1020) |
1210 | gic_irqs = 1020; |
1211 | gic->gic_irqs = gic_irqs; |
1212 | |
1213 | gic->domain = irq_domain_create_linear(fwnode: handle, size: gic_irqs, |
1214 | ops: &gic_irq_domain_hierarchy_ops, |
1215 | host_data: gic); |
1216 | if (WARN_ON(!gic->domain)) { |
1217 | ret = -ENODEV; |
1218 | goto error; |
1219 | } |
1220 | |
1221 | gic_dist_init(gic); |
1222 | ret = gic_cpu_init(gic); |
1223 | if (ret) |
1224 | goto error; |
1225 | |
1226 | ret = gic_pm_init(gic); |
1227 | if (ret) |
1228 | goto error; |
1229 | |
1230 | return 0; |
1231 | |
1232 | error: |
1233 | if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { |
1234 | free_percpu(pdata: gic->dist_base.percpu_base); |
1235 | free_percpu(pdata: gic->cpu_base.percpu_base); |
1236 | } |
1237 | |
1238 | return ret; |
1239 | } |
1240 | |
1241 | static int __init __gic_init_bases(struct gic_chip_data *gic, |
1242 | struct fwnode_handle *handle) |
1243 | { |
1244 | int i, ret; |
1245 | |
1246 | if (WARN_ON(!gic || gic->domain)) |
1247 | return -EINVAL; |
1248 | |
1249 | if (gic == &gic_data[0]) { |
1250 | /* |
1251 | * Initialize the CPU interface map to all CPUs. |
1252 | * It will be refined as each CPU probes its ID. |
1253 | * This is only necessary for the primary GIC. |
1254 | */ |
1255 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
1256 | gic_cpu_map[i] = 0xff; |
1257 | |
1258 | set_handle_irq(gic_handle_irq); |
1259 | if (static_branch_likely(&supports_deactivate_key)) |
1260 | pr_info("GIC: Using split EOI/Deactivate mode\n" ); |
1261 | } |
1262 | |
1263 | ret = gic_init_bases(gic, handle); |
1264 | if (gic == &gic_data[0]) |
1265 | gic_smp_init(); |
1266 | |
1267 | return ret; |
1268 | } |
1269 | |
1270 | static void gic_teardown(struct gic_chip_data *gic) |
1271 | { |
1272 | if (WARN_ON(!gic)) |
1273 | return; |
1274 | |
1275 | if (gic->raw_dist_base) |
1276 | iounmap(addr: gic->raw_dist_base); |
1277 | if (gic->raw_cpu_base) |
1278 | iounmap(addr: gic->raw_cpu_base); |
1279 | } |
1280 | |
1281 | static int gic_cnt __initdata; |
1282 | static bool gicv2_force_probe; |
1283 | |
1284 | static int __init gicv2_force_probe_cfg(char *buf) |
1285 | { |
1286 | return kstrtobool(s: buf, res: &gicv2_force_probe); |
1287 | } |
1288 | early_param("irqchip.gicv2_force_probe" , gicv2_force_probe_cfg); |
1289 | |
1290 | static bool gic_check_eoimode(struct device_node *node, void __iomem **base) |
1291 | { |
1292 | struct resource cpuif_res; |
1293 | |
1294 | of_address_to_resource(dev: node, index: 1, r: &cpuif_res); |
1295 | |
1296 | if (!is_hyp_mode_available()) |
1297 | return false; |
1298 | if (resource_size(res: &cpuif_res) < SZ_8K) { |
1299 | void __iomem *alt; |
1300 | /* |
1301 | * Check for a stupid firmware that only exposes the |
1302 | * first page of a GICv2. |
1303 | */ |
1304 | if (!gic_check_gicv2(base: *base)) |
1305 | return false; |
1306 | |
1307 | if (!gicv2_force_probe) { |
1308 | pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n" ); |
1309 | return false; |
1310 | } |
1311 | |
1312 | alt = ioremap(offset: cpuif_res.start, size: SZ_8K); |
1313 | if (!alt) |
1314 | return false; |
1315 | if (!gic_check_gicv2(base: alt + SZ_4K)) { |
1316 | /* |
1317 | * The first page was that of a GICv2, and |
1318 | * the second was *something*. Let's trust it |
1319 | * to be a GICv2, and update the mapping. |
1320 | */ |
1321 | pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n" , |
1322 | &cpuif_res.start); |
1323 | iounmap(addr: *base); |
1324 | *base = alt; |
1325 | return true; |
1326 | } |
1327 | |
1328 | /* |
1329 | * We detected *two* initial GICv2 pages in a |
1330 | * row. Could be a GICv2 aliased over two 64kB |
1331 | * pages. Update the resource, map the iospace, and |
1332 | * pray. |
1333 | */ |
1334 | iounmap(addr: alt); |
1335 | alt = ioremap(offset: cpuif_res.start, size: SZ_128K); |
1336 | if (!alt) |
1337 | return false; |
1338 | pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n" , |
1339 | &cpuif_res.start); |
1340 | cpuif_res.end = cpuif_res.start + SZ_128K -1; |
1341 | iounmap(addr: *base); |
1342 | *base = alt; |
1343 | } |
1344 | if (resource_size(res: &cpuif_res) == SZ_128K) { |
1345 | /* |
1346 | * Verify that we have the first 4kB of a GICv2 |
1347 | * aliased over the first 64kB by checking the |
1348 | * GICC_IIDR register on both ends. |
1349 | */ |
1350 | if (!gic_check_gicv2(base: *base) || |
1351 | !gic_check_gicv2(base: *base + 0xf000)) |
1352 | return false; |
1353 | |
1354 | /* |
1355 | * Move the base up by 60kB, so that we have a 8kB |
1356 | * contiguous region, which allows us to use GICC_DIR |
1357 | * at its normal offset. Please pass me that bucket. |
1358 | */ |
1359 | *base += 0xf000; |
1360 | cpuif_res.start += 0xf000; |
1361 | pr_warn("GIC: Adjusting CPU interface base to %pa\n" , |
1362 | &cpuif_res.start); |
1363 | } |
1364 | |
1365 | return true; |
1366 | } |
1367 | |
1368 | static bool gic_enable_rmw_access(void *data) |
1369 | { |
1370 | /* |
1371 | * The EMEV2 class of machines has a broken interconnect, and |
1372 | * locks up on accesses that are less than 32bit. So far, only |
1373 | * the affinity setting requires it. |
1374 | */ |
1375 | if (of_machine_is_compatible(compat: "renesas,emev2" )) { |
1376 | static_branch_enable(&needs_rmw_access); |
1377 | return true; |
1378 | } |
1379 | |
1380 | return false; |
1381 | } |
1382 | |
1383 | static const struct gic_quirk gic_quirks[] = { |
1384 | { |
1385 | .desc = "broken byte access" , |
1386 | .compatible = "arm,pl390" , |
1387 | .init = gic_enable_rmw_access, |
1388 | }, |
1389 | { }, |
1390 | }; |
1391 | |
1392 | static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) |
1393 | { |
1394 | if (!gic || !node) |
1395 | return -EINVAL; |
1396 | |
1397 | gic->raw_dist_base = of_iomap(node, index: 0); |
1398 | if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n" )) |
1399 | goto error; |
1400 | |
1401 | gic->raw_cpu_base = of_iomap(node, index: 1); |
1402 | if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n" )) |
1403 | goto error; |
1404 | |
1405 | if (of_property_read_u32(np: node, propname: "cpu-offset" , out_value: &gic->percpu_offset)) |
1406 | gic->percpu_offset = 0; |
1407 | |
1408 | gic_enable_of_quirks(np: node, quirks: gic_quirks, data: gic); |
1409 | |
1410 | return 0; |
1411 | |
1412 | error: |
1413 | gic_teardown(gic); |
1414 | |
1415 | return -ENOMEM; |
1416 | } |
1417 | |
1418 | int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) |
1419 | { |
1420 | int ret; |
1421 | |
1422 | if (!dev || !dev->of_node || !gic || !irq) |
1423 | return -EINVAL; |
1424 | |
1425 | *gic = devm_kzalloc(dev, size: sizeof(**gic), GFP_KERNEL); |
1426 | if (!*gic) |
1427 | return -ENOMEM; |
1428 | |
1429 | ret = gic_of_setup(gic: *gic, node: dev->of_node); |
1430 | if (ret) |
1431 | return ret; |
1432 | |
1433 | ret = gic_init_bases(gic: *gic, handle: &dev->of_node->fwnode); |
1434 | if (ret) { |
1435 | gic_teardown(gic: *gic); |
1436 | return ret; |
1437 | } |
1438 | |
1439 | irq_domain_set_pm_device(d: (*gic)->domain, dev); |
1440 | irq_set_chained_handler_and_data(irq, handle: gic_handle_cascade_irq, data: *gic); |
1441 | |
1442 | return 0; |
1443 | } |
1444 | |
1445 | static void __init gic_of_setup_kvm_info(struct device_node *node) |
1446 | { |
1447 | int ret; |
1448 | struct resource *vctrl_res = &gic_v2_kvm_info.vctrl; |
1449 | struct resource *vcpu_res = &gic_v2_kvm_info.vcpu; |
1450 | |
1451 | gic_v2_kvm_info.type = GIC_V2; |
1452 | |
1453 | gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, index: 0); |
1454 | if (!gic_v2_kvm_info.maint_irq) |
1455 | return; |
1456 | |
1457 | ret = of_address_to_resource(dev: node, index: 2, r: vctrl_res); |
1458 | if (ret) |
1459 | return; |
1460 | |
1461 | ret = of_address_to_resource(dev: node, index: 3, r: vcpu_res); |
1462 | if (ret) |
1463 | return; |
1464 | |
1465 | if (static_branch_likely(&supports_deactivate_key)) |
1466 | vgic_set_kvm_info(info: &gic_v2_kvm_info); |
1467 | } |
1468 | |
1469 | int __init |
1470 | gic_of_init(struct device_node *node, struct device_node *parent) |
1471 | { |
1472 | struct gic_chip_data *gic; |
1473 | int irq, ret; |
1474 | |
1475 | if (WARN_ON(!node)) |
1476 | return -ENODEV; |
1477 | |
1478 | if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR)) |
1479 | return -EINVAL; |
1480 | |
1481 | gic = &gic_data[gic_cnt]; |
1482 | |
1483 | ret = gic_of_setup(gic, node); |
1484 | if (ret) |
1485 | return ret; |
1486 | |
1487 | /* |
1488 | * Disable split EOI/Deactivate if either HYP is not available |
1489 | * or the CPU interface is too small. |
1490 | */ |
1491 | if (gic_cnt == 0 && !gic_check_eoimode(node, base: &gic->raw_cpu_base)) |
1492 | static_branch_disable(&supports_deactivate_key); |
1493 | |
1494 | ret = __gic_init_bases(gic, handle: &node->fwnode); |
1495 | if (ret) { |
1496 | gic_teardown(gic); |
1497 | return ret; |
1498 | } |
1499 | |
1500 | if (!gic_cnt) { |
1501 | gic_init_physaddr(node); |
1502 | gic_of_setup_kvm_info(node); |
1503 | } |
1504 | |
1505 | if (parent) { |
1506 | irq = irq_of_parse_and_map(node, index: 0); |
1507 | gic_cascade_irq(gic_nr: gic_cnt, irq); |
1508 | } |
1509 | |
1510 | if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) |
1511 | gicv2m_init(parent_handle: &node->fwnode, parent: gic_data[gic_cnt].domain); |
1512 | |
1513 | gic_cnt++; |
1514 | return 0; |
1515 | } |
1516 | IRQCHIP_DECLARE(gic_400, "arm,gic-400" , gic_of_init); |
1517 | IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic" , gic_of_init); |
1518 | IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic" , gic_of_init); |
1519 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic" , gic_of_init); |
1520 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic" , gic_of_init); |
1521 | IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic" , gic_of_init); |
1522 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic" , gic_of_init); |
1523 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2" , gic_of_init); |
1524 | IRQCHIP_DECLARE(pl390, "arm,pl390" , gic_of_init); |
1525 | |
1526 | #ifdef CONFIG_ACPI |
1527 | static struct |
1528 | { |
1529 | phys_addr_t cpu_phys_base; |
1530 | u32 maint_irq; |
1531 | int maint_irq_mode; |
1532 | phys_addr_t vctrl_base; |
1533 | phys_addr_t vcpu_base; |
1534 | } acpi_data __initdata; |
1535 | |
1536 | static int __init |
1537 | gic_acpi_parse_madt_cpu(union acpi_subtable_headers *, |
1538 | const unsigned long end) |
1539 | { |
1540 | struct acpi_madt_generic_interrupt *processor; |
1541 | phys_addr_t gic_cpu_base; |
1542 | static int cpu_base_assigned; |
1543 | |
1544 | processor = (struct acpi_madt_generic_interrupt *)header; |
1545 | |
1546 | if (BAD_MADT_GICC_ENTRY(processor, end)) |
1547 | return -EINVAL; |
1548 | |
1549 | /* |
1550 | * There is no support for non-banked GICv1/2 register in ACPI spec. |
1551 | * All CPU interface addresses have to be the same. |
1552 | */ |
1553 | gic_cpu_base = processor->base_address; |
1554 | if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base) |
1555 | return -EINVAL; |
1556 | |
1557 | acpi_data.cpu_phys_base = gic_cpu_base; |
1558 | acpi_data.maint_irq = processor->vgic_interrupt; |
1559 | acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ? |
1560 | ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; |
1561 | acpi_data.vctrl_base = processor->gich_base_address; |
1562 | acpi_data.vcpu_base = processor->gicv_base_address; |
1563 | |
1564 | cpu_base_assigned = 1; |
1565 | return 0; |
1566 | } |
1567 | |
1568 | /* The things you have to do to just *count* something... */ |
1569 | static int __init acpi_dummy_func(union acpi_subtable_headers *, |
1570 | const unsigned long end) |
1571 | { |
1572 | return 0; |
1573 | } |
1574 | |
1575 | static bool __init acpi_gic_redist_is_present(void) |
1576 | { |
1577 | return acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, |
1578 | handler: acpi_dummy_func, max_entries: 0) > 0; |
1579 | } |
1580 | |
1581 | static bool __init gic_validate_dist(struct acpi_subtable_header *, |
1582 | struct acpi_probe_entry *ape) |
1583 | { |
1584 | struct acpi_madt_generic_distributor *dist; |
1585 | dist = (struct acpi_madt_generic_distributor *)header; |
1586 | |
1587 | return (dist->version == ape->driver_data && |
1588 | (dist->version != ACPI_MADT_GIC_VERSION_NONE || |
1589 | !acpi_gic_redist_is_present())); |
1590 | } |
1591 | |
1592 | #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) |
1593 | #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) |
1594 | #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) |
1595 | #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) |
1596 | |
1597 | static void __init gic_acpi_setup_kvm_info(void) |
1598 | { |
1599 | int irq; |
1600 | struct resource *vctrl_res = &gic_v2_kvm_info.vctrl; |
1601 | struct resource *vcpu_res = &gic_v2_kvm_info.vcpu; |
1602 | |
1603 | gic_v2_kvm_info.type = GIC_V2; |
1604 | |
1605 | if (!acpi_data.vctrl_base) |
1606 | return; |
1607 | |
1608 | vctrl_res->flags = IORESOURCE_MEM; |
1609 | vctrl_res->start = acpi_data.vctrl_base; |
1610 | vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1; |
1611 | |
1612 | if (!acpi_data.vcpu_base) |
1613 | return; |
1614 | |
1615 | vcpu_res->flags = IORESOURCE_MEM; |
1616 | vcpu_res->start = acpi_data.vcpu_base; |
1617 | vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; |
1618 | |
1619 | irq = acpi_register_gsi(NULL, gsi: acpi_data.maint_irq, |
1620 | triggering: acpi_data.maint_irq_mode, |
1621 | ACPI_ACTIVE_HIGH); |
1622 | if (irq <= 0) |
1623 | return; |
1624 | |
1625 | gic_v2_kvm_info.maint_irq = irq; |
1626 | |
1627 | vgic_set_kvm_info(info: &gic_v2_kvm_info); |
1628 | } |
1629 | |
1630 | static struct fwnode_handle *gsi_domain_handle; |
1631 | |
1632 | static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi) |
1633 | { |
1634 | return gsi_domain_handle; |
1635 | } |
1636 | |
1637 | static int __init gic_v2_acpi_init(union acpi_subtable_headers *, |
1638 | const unsigned long end) |
1639 | { |
1640 | struct acpi_madt_generic_distributor *dist; |
1641 | struct gic_chip_data *gic = &gic_data[0]; |
1642 | int count, ret; |
1643 | |
1644 | /* Collect CPU base addresses */ |
1645 | count = acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_INTERRUPT, |
1646 | handler: gic_acpi_parse_madt_cpu, max_entries: 0); |
1647 | if (count <= 0) { |
1648 | pr_err("No valid GICC entries exist\n" ); |
1649 | return -EINVAL; |
1650 | } |
1651 | |
1652 | gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE); |
1653 | if (!gic->raw_cpu_base) { |
1654 | pr_err("Unable to map GICC registers\n" ); |
1655 | return -ENOMEM; |
1656 | } |
1657 | |
1658 | dist = (struct acpi_madt_generic_distributor *)header; |
1659 | gic->raw_dist_base = ioremap(dist->base_address, |
1660 | ACPI_GICV2_DIST_MEM_SIZE); |
1661 | if (!gic->raw_dist_base) { |
1662 | pr_err("Unable to map GICD registers\n" ); |
1663 | gic_teardown(gic); |
1664 | return -ENOMEM; |
1665 | } |
1666 | |
1667 | /* |
1668 | * Disable split EOI/Deactivate if HYP is not available. ACPI |
1669 | * guarantees that we'll always have a GICv2, so the CPU |
1670 | * interface will always be the right size. |
1671 | */ |
1672 | if (!is_hyp_mode_available()) |
1673 | static_branch_disable(&supports_deactivate_key); |
1674 | |
1675 | /* |
1676 | * Initialize GIC instance zero (no multi-GIC support). |
1677 | */ |
1678 | gsi_domain_handle = irq_domain_alloc_fwnode(pa: &dist->base_address); |
1679 | if (!gsi_domain_handle) { |
1680 | pr_err("Unable to allocate domain handle\n" ); |
1681 | gic_teardown(gic); |
1682 | return -ENOMEM; |
1683 | } |
1684 | |
1685 | ret = __gic_init_bases(gic, handle: gsi_domain_handle); |
1686 | if (ret) { |
1687 | pr_err("Failed to initialise GIC\n" ); |
1688 | irq_domain_free_fwnode(fwnode: gsi_domain_handle); |
1689 | gic_teardown(gic); |
1690 | return ret; |
1691 | } |
1692 | |
1693 | acpi_set_irq_model(model: ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id); |
1694 | |
1695 | if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) |
1696 | gicv2m_init(NULL, parent: gic_data[0].domain); |
1697 | |
1698 | if (static_branch_likely(&supports_deactivate_key)) |
1699 | gic_acpi_setup_kvm_info(); |
1700 | |
1701 | return 0; |
1702 | } |
1703 | IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, |
1704 | gic_validate_dist, ACPI_MADT_GIC_VERSION_V2, |
1705 | gic_v2_acpi_init); |
1706 | IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, |
1707 | gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE, |
1708 | gic_v2_acpi_init); |
1709 | #endif |
1710 | |