1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* irq.c: UltraSparc IRQ handling/init/registry. |
3 | * |
4 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) |
7 | */ |
8 | |
9 | #include <linux/sched.h> |
10 | #include <linux/linkage.h> |
11 | #include <linux/ptrace.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/random.h> |
19 | #include <linux/init.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/proc_fs.h> |
22 | #include <linux/seq_file.h> |
23 | #include <linux/ftrace.h> |
24 | #include <linux/irq.h> |
25 | |
26 | #include <asm/ptrace.h> |
27 | #include <asm/processor.h> |
28 | #include <linux/atomic.h> |
29 | #include <asm/irq.h> |
30 | #include <asm/io.h> |
31 | #include <asm/iommu.h> |
32 | #include <asm/upa.h> |
33 | #include <asm/oplib.h> |
34 | #include <asm/prom.h> |
35 | #include <asm/timer.h> |
36 | #include <asm/smp.h> |
37 | #include <asm/starfire.h> |
38 | #include <linux/uaccess.h> |
39 | #include <asm/cache.h> |
40 | #include <asm/cpudata.h> |
41 | #include <asm/auxio.h> |
42 | #include <asm/head.h> |
43 | #include <asm/hypervisor.h> |
44 | #include <asm/cacheflush.h> |
45 | #include <asm/softirq_stack.h> |
46 | |
47 | #include "entry.h" |
48 | #include "cpumap.h" |
49 | #include "kstack.h" |
50 | |
51 | struct ino_bucket *ivector_table; |
52 | unsigned long ivector_table_pa; |
53 | |
54 | /* On several sun4u processors, it is illegal to mix bypass and |
55 | * non-bypass accesses. Therefore we access all INO buckets |
56 | * using bypass accesses only. |
57 | */ |
58 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) |
59 | { |
60 | unsigned long ret; |
61 | |
62 | __asm__ __volatile__("ldxa [%1] %2, %0" |
63 | : "=&r" (ret) |
64 | : "r" (bucket_pa + |
65 | offsetof(struct ino_bucket, |
66 | __irq_chain_pa)), |
67 | "i" (ASI_PHYS_USE_EC)); |
68 | |
69 | return ret; |
70 | } |
71 | |
72 | static void bucket_clear_chain_pa(unsigned long bucket_pa) |
73 | { |
74 | __asm__ __volatile__("stxa %%g0, [%0] %1" |
75 | : /* no outputs */ |
76 | : "r" (bucket_pa + |
77 | offsetof(struct ino_bucket, |
78 | __irq_chain_pa)), |
79 | "i" (ASI_PHYS_USE_EC)); |
80 | } |
81 | |
82 | static unsigned int bucket_get_irq(unsigned long bucket_pa) |
83 | { |
84 | unsigned int ret; |
85 | |
86 | __asm__ __volatile__("lduwa [%1] %2, %0" |
87 | : "=&r" (ret) |
88 | : "r" (bucket_pa + |
89 | offsetof(struct ino_bucket, |
90 | __irq)), |
91 | "i" (ASI_PHYS_USE_EC)); |
92 | |
93 | return ret; |
94 | } |
95 | |
96 | static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) |
97 | { |
98 | __asm__ __volatile__("stwa %0, [%1] %2" |
99 | : /* no outputs */ |
100 | : "r" (irq), |
101 | "r" (bucket_pa + |
102 | offsetof(struct ino_bucket, |
103 | __irq)), |
104 | "i" (ASI_PHYS_USE_EC)); |
105 | } |
106 | |
107 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
108 | |
109 | static unsigned long hvirq_major __initdata; |
110 | static int __init early_hvirq_major(char *p) |
111 | { |
112 | int rc = kstrtoul(s: p, base: 10, res: &hvirq_major); |
113 | |
114 | return rc; |
115 | } |
116 | early_param("hvirq" , early_hvirq_major); |
117 | |
118 | static int hv_irq_version; |
119 | |
120 | /* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie |
121 | * based interfaces, but: |
122 | * |
123 | * 1) Several OSs, Solaris and Linux included, use them even when only |
124 | * negotiating version 1.0 (or failing to negotiate at all). So the |
125 | * hypervisor has a workaround that provides the VIRQ interfaces even |
126 | * when only verion 1.0 of the API is in use. |
127 | * |
128 | * 2) Second, and more importantly, with major version 2.0 these VIRQ |
129 | * interfaces only were actually hooked up for LDC interrupts, even |
130 | * though the Hypervisor specification clearly stated: |
131 | * |
132 | * The new interrupt API functions will be available to a guest |
133 | * when it negotiates version 2.0 in the interrupt API group 0x2. When |
134 | * a guest negotiates version 2.0, all interrupt sources will only |
135 | * support using the cookie interface, and any attempt to use the |
136 | * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the |
137 | * ENOTSUPPORTED error being returned. |
138 | * |
139 | * with an emphasis on "all interrupt sources". |
140 | * |
141 | * To correct this, major version 3.0 was created which does actually |
142 | * support VIRQs for all interrupt sources (not just LDC devices). So |
143 | * if we want to move completely over the cookie based VIRQs we must |
144 | * negotiate major version 3.0 or later of HV_GRP_INTR. |
145 | */ |
146 | static bool sun4v_cookie_only_virqs(void) |
147 | { |
148 | if (hv_irq_version >= 3) |
149 | return true; |
150 | return false; |
151 | } |
152 | |
153 | static void __init irq_init_hv(void) |
154 | { |
155 | unsigned long hv_error, major, minor = 0; |
156 | |
157 | if (tlb_type != hypervisor) |
158 | return; |
159 | |
160 | if (hvirq_major) |
161 | major = hvirq_major; |
162 | else |
163 | major = 3; |
164 | |
165 | hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor); |
166 | if (!hv_error) |
167 | hv_irq_version = major; |
168 | else |
169 | hv_irq_version = 1; |
170 | |
171 | pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n" , |
172 | hv_irq_version, |
173 | sun4v_cookie_only_virqs() ? "enabled" : "disabled" ); |
174 | } |
175 | |
176 | /* This function is for the timer interrupt.*/ |
177 | int __init arch_probe_nr_irqs(void) |
178 | { |
179 | return 1; |
180 | } |
181 | |
182 | #define DEFAULT_NUM_IVECS (0xfffU) |
183 | static unsigned int nr_ivec = DEFAULT_NUM_IVECS; |
184 | #define NUM_IVECS (nr_ivec) |
185 | |
186 | static unsigned int __init size_nr_ivec(void) |
187 | { |
188 | if (tlb_type == hypervisor) { |
189 | switch (sun4v_chip_type) { |
190 | /* Athena's devhandle|devino is large.*/ |
191 | case SUN4V_CHIP_SPARC64X: |
192 | nr_ivec = 0xffff; |
193 | break; |
194 | } |
195 | } |
196 | return nr_ivec; |
197 | } |
198 | |
199 | struct irq_handler_data { |
200 | union { |
201 | struct { |
202 | unsigned int dev_handle; |
203 | unsigned int dev_ino; |
204 | }; |
205 | unsigned long sysino; |
206 | }; |
207 | struct ino_bucket bucket; |
208 | unsigned long iclr; |
209 | unsigned long imap; |
210 | }; |
211 | |
212 | static inline unsigned int irq_data_to_handle(struct irq_data *data) |
213 | { |
214 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(d: data); |
215 | |
216 | return ihd->dev_handle; |
217 | } |
218 | |
219 | static inline unsigned int irq_data_to_ino(struct irq_data *data) |
220 | { |
221 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(d: data); |
222 | |
223 | return ihd->dev_ino; |
224 | } |
225 | |
226 | static inline unsigned long irq_data_to_sysino(struct irq_data *data) |
227 | { |
228 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(d: data); |
229 | |
230 | return ihd->sysino; |
231 | } |
232 | |
233 | void irq_free(unsigned int irq) |
234 | { |
235 | void *data = irq_get_handler_data(irq); |
236 | |
237 | kfree(objp: data); |
238 | irq_set_handler_data(irq, NULL); |
239 | irq_free_descs(irq, cnt: 1); |
240 | } |
241 | |
242 | unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
243 | { |
244 | int irq; |
245 | |
246 | irq = __irq_alloc_descs(irq: -1, from: 1, cnt: 1, node: numa_node_id(), NULL, NULL); |
247 | if (irq <= 0) |
248 | goto out; |
249 | |
250 | return irq; |
251 | out: |
252 | return 0; |
253 | } |
254 | |
255 | static unsigned int cookie_exists(u32 devhandle, unsigned int devino) |
256 | { |
257 | unsigned long hv_err, cookie; |
258 | struct ino_bucket *bucket; |
259 | unsigned int irq = 0U; |
260 | |
261 | hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie); |
262 | if (hv_err) { |
263 | pr_err("HV get cookie failed hv_err = %ld\n" , hv_err); |
264 | goto out; |
265 | } |
266 | |
267 | if (cookie & ((1UL << 63UL))) { |
268 | cookie = ~cookie; |
269 | bucket = (struct ino_bucket *) __va(cookie); |
270 | irq = bucket->__irq; |
271 | } |
272 | out: |
273 | return irq; |
274 | } |
275 | |
276 | static unsigned int sysino_exists(u32 devhandle, unsigned int devino) |
277 | { |
278 | unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); |
279 | struct ino_bucket *bucket; |
280 | unsigned int irq; |
281 | |
282 | bucket = &ivector_table[sysino]; |
283 | irq = bucket_get_irq(__pa(bucket)); |
284 | |
285 | return irq; |
286 | } |
287 | |
288 | void ack_bad_irq(unsigned int irq) |
289 | { |
290 | pr_crit("BAD IRQ ack %d\n" , irq); |
291 | } |
292 | |
293 | void irq_install_pre_handler(int irq, |
294 | void (*func)(unsigned int, void *, void *), |
295 | void *arg1, void *arg2) |
296 | { |
297 | pr_warn("IRQ pre handler NOT supported.\n" ); |
298 | } |
299 | |
300 | /* |
301 | * /proc/interrupts printing: |
302 | */ |
303 | int arch_show_interrupts(struct seq_file *p, int prec) |
304 | { |
305 | int j; |
306 | |
307 | seq_printf(m: p, fmt: "NMI: " ); |
308 | for_each_online_cpu(j) |
309 | seq_printf(m: p, fmt: "%10u " , cpu_data(j).__nmi_count); |
310 | seq_printf(m: p, fmt: " Non-maskable interrupts\n" ); |
311 | return 0; |
312 | } |
313 | |
314 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) |
315 | { |
316 | unsigned int tid; |
317 | |
318 | if (this_is_starfire) { |
319 | tid = starfire_translate(imap, cpuid); |
320 | tid <<= IMAP_TID_SHIFT; |
321 | tid &= IMAP_TID_UPA; |
322 | } else { |
323 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
324 | unsigned long ver; |
325 | |
326 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
327 | if ((ver >> 32UL) == __JALAPENO_ID || |
328 | (ver >> 32UL) == __SERRANO_ID) { |
329 | tid = cpuid << IMAP_TID_SHIFT; |
330 | tid &= IMAP_TID_JBUS; |
331 | } else { |
332 | unsigned int a = cpuid & 0x1f; |
333 | unsigned int n = (cpuid >> 5) & 0x1f; |
334 | |
335 | tid = ((a << IMAP_AID_SHIFT) | |
336 | (n << IMAP_NID_SHIFT)); |
337 | tid &= (IMAP_AID_SAFARI | |
338 | IMAP_NID_SAFARI); |
339 | } |
340 | } else { |
341 | tid = cpuid << IMAP_TID_SHIFT; |
342 | tid &= IMAP_TID_UPA; |
343 | } |
344 | } |
345 | |
346 | return tid; |
347 | } |
348 | |
349 | #ifdef CONFIG_SMP |
350 | static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) |
351 | { |
352 | cpumask_t mask; |
353 | int cpuid; |
354 | |
355 | cpumask_copy(dstp: &mask, srcp: affinity); |
356 | if (cpumask_equal(src1p: &mask, cpu_online_mask)) { |
357 | cpuid = map_to_cpu(index: irq); |
358 | } else { |
359 | cpumask_t tmp; |
360 | |
361 | cpumask_and(dstp: &tmp, cpu_online_mask, src2p: &mask); |
362 | cpuid = cpumask_empty(srcp: &tmp) ? map_to_cpu(index: irq) : cpumask_first(srcp: &tmp); |
363 | } |
364 | |
365 | return cpuid; |
366 | } |
367 | #else |
368 | #define irq_choose_cpu(irq, affinity) \ |
369 | real_hard_smp_processor_id() |
370 | #endif |
371 | |
372 | static void sun4u_irq_enable(struct irq_data *data) |
373 | { |
374 | struct irq_handler_data *handler_data; |
375 | |
376 | handler_data = irq_data_get_irq_handler_data(d: data); |
377 | if (likely(handler_data)) { |
378 | unsigned long cpuid, imap, val; |
379 | unsigned int tid; |
380 | |
381 | cpuid = irq_choose_cpu(irq: data->irq, |
382 | affinity: irq_data_get_affinity_mask(d: data)); |
383 | imap = handler_data->imap; |
384 | |
385 | tid = sun4u_compute_tid(imap, cpuid); |
386 | |
387 | val = upa_readq(imap); |
388 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | |
389 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); |
390 | val |= tid | IMAP_VALID; |
391 | upa_writeq(val, imap); |
392 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
393 | } |
394 | } |
395 | |
396 | static int sun4u_set_affinity(struct irq_data *data, |
397 | const struct cpumask *mask, bool force) |
398 | { |
399 | struct irq_handler_data *handler_data; |
400 | |
401 | handler_data = irq_data_get_irq_handler_data(d: data); |
402 | if (likely(handler_data)) { |
403 | unsigned long cpuid, imap, val; |
404 | unsigned int tid; |
405 | |
406 | cpuid = irq_choose_cpu(irq: data->irq, affinity: mask); |
407 | imap = handler_data->imap; |
408 | |
409 | tid = sun4u_compute_tid(imap, cpuid); |
410 | |
411 | val = upa_readq(imap); |
412 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | |
413 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); |
414 | val |= tid | IMAP_VALID; |
415 | upa_writeq(val, imap); |
416 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
417 | } |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | /* Don't do anything. The desc->status check for IRQ_DISABLED in |
423 | * handler_irq() will skip the handler call and that will leave the |
424 | * interrupt in the sent state. The next ->enable() call will hit the |
425 | * ICLR register to reset the state machine. |
426 | * |
427 | * This scheme is necessary, instead of clearing the Valid bit in the |
428 | * IMAP register, to handle the case of IMAP registers being shared by |
429 | * multiple INOs (and thus ICLR registers). Since we use a different |
430 | * virtual IRQ for each shared IMAP instance, the generic code thinks |
431 | * there is only one user so it prematurely calls ->disable() on |
432 | * free_irq(). |
433 | * |
434 | * We have to provide an explicit ->disable() method instead of using |
435 | * NULL to get the default. The reason is that if the generic code |
436 | * sees that, it also hooks up a default ->shutdown method which |
437 | * invokes ->mask() which we do not want. See irq_chip_set_defaults(). |
438 | */ |
439 | static void sun4u_irq_disable(struct irq_data *data) |
440 | { |
441 | } |
442 | |
443 | static void sun4u_irq_eoi(struct irq_data *data) |
444 | { |
445 | struct irq_handler_data *handler_data; |
446 | |
447 | handler_data = irq_data_get_irq_handler_data(d: data); |
448 | if (likely(handler_data)) |
449 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
450 | } |
451 | |
452 | static void sun4v_irq_enable(struct irq_data *data) |
453 | { |
454 | unsigned long cpuid = irq_choose_cpu(irq: data->irq, |
455 | affinity: irq_data_get_affinity_mask(d: data)); |
456 | unsigned int ino = irq_data_to_sysino(data); |
457 | int err; |
458 | |
459 | err = sun4v_intr_settarget(ino, cpuid); |
460 | if (err != HV_EOK) |
461 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
462 | "err(%d)\n" , ino, cpuid, err); |
463 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
464 | if (err != HV_EOK) |
465 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
466 | "err(%d)\n" , ino, err); |
467 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
468 | if (err != HV_EOK) |
469 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n" , |
470 | ino, err); |
471 | } |
472 | |
473 | static int sun4v_set_affinity(struct irq_data *data, |
474 | const struct cpumask *mask, bool force) |
475 | { |
476 | unsigned long cpuid = irq_choose_cpu(irq: data->irq, affinity: mask); |
477 | unsigned int ino = irq_data_to_sysino(data); |
478 | int err; |
479 | |
480 | err = sun4v_intr_settarget(ino, cpuid); |
481 | if (err != HV_EOK) |
482 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
483 | "err(%d)\n" , ino, cpuid, err); |
484 | |
485 | return 0; |
486 | } |
487 | |
488 | static void sun4v_irq_disable(struct irq_data *data) |
489 | { |
490 | unsigned int ino = irq_data_to_sysino(data); |
491 | int err; |
492 | |
493 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
494 | if (err != HV_EOK) |
495 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " |
496 | "err(%d)\n" , ino, err); |
497 | } |
498 | |
499 | static void sun4v_irq_eoi(struct irq_data *data) |
500 | { |
501 | unsigned int ino = irq_data_to_sysino(data); |
502 | int err; |
503 | |
504 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
505 | if (err != HV_EOK) |
506 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
507 | "err(%d)\n" , ino, err); |
508 | } |
509 | |
510 | static void sun4v_virq_enable(struct irq_data *data) |
511 | { |
512 | unsigned long dev_handle = irq_data_to_handle(data); |
513 | unsigned long dev_ino = irq_data_to_ino(data); |
514 | unsigned long cpuid; |
515 | int err; |
516 | |
517 | cpuid = irq_choose_cpu(irq: data->irq, affinity: irq_data_get_affinity_mask(d: data)); |
518 | |
519 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
520 | if (err != HV_EOK) |
521 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
522 | "err(%d)\n" , |
523 | dev_handle, dev_ino, cpuid, err); |
524 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
525 | HV_INTR_STATE_IDLE); |
526 | if (err != HV_EOK) |
527 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
528 | "HV_INTR_STATE_IDLE): err(%d)\n" , |
529 | dev_handle, dev_ino, err); |
530 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
531 | HV_INTR_ENABLED); |
532 | if (err != HV_EOK) |
533 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
534 | "HV_INTR_ENABLED): err(%d)\n" , |
535 | dev_handle, dev_ino, err); |
536 | } |
537 | |
538 | static int sun4v_virt_set_affinity(struct irq_data *data, |
539 | const struct cpumask *mask, bool force) |
540 | { |
541 | unsigned long dev_handle = irq_data_to_handle(data); |
542 | unsigned long dev_ino = irq_data_to_ino(data); |
543 | unsigned long cpuid; |
544 | int err; |
545 | |
546 | cpuid = irq_choose_cpu(irq: data->irq, affinity: mask); |
547 | |
548 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
549 | if (err != HV_EOK) |
550 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
551 | "err(%d)\n" , |
552 | dev_handle, dev_ino, cpuid, err); |
553 | |
554 | return 0; |
555 | } |
556 | |
557 | static void sun4v_virq_disable(struct irq_data *data) |
558 | { |
559 | unsigned long dev_handle = irq_data_to_handle(data); |
560 | unsigned long dev_ino = irq_data_to_ino(data); |
561 | int err; |
562 | |
563 | |
564 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
565 | HV_INTR_DISABLED); |
566 | if (err != HV_EOK) |
567 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
568 | "HV_INTR_DISABLED): err(%d)\n" , |
569 | dev_handle, dev_ino, err); |
570 | } |
571 | |
572 | static void sun4v_virq_eoi(struct irq_data *data) |
573 | { |
574 | unsigned long dev_handle = irq_data_to_handle(data); |
575 | unsigned long dev_ino = irq_data_to_ino(data); |
576 | int err; |
577 | |
578 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
579 | HV_INTR_STATE_IDLE); |
580 | if (err != HV_EOK) |
581 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
582 | "HV_INTR_STATE_IDLE): err(%d)\n" , |
583 | dev_handle, dev_ino, err); |
584 | } |
585 | |
586 | static struct irq_chip sun4u_irq = { |
587 | .name = "sun4u" , |
588 | .irq_enable = sun4u_irq_enable, |
589 | .irq_disable = sun4u_irq_disable, |
590 | .irq_eoi = sun4u_irq_eoi, |
591 | .irq_set_affinity = sun4u_set_affinity, |
592 | .flags = IRQCHIP_EOI_IF_HANDLED, |
593 | }; |
594 | |
595 | static struct irq_chip sun4v_irq = { |
596 | .name = "sun4v" , |
597 | .irq_enable = sun4v_irq_enable, |
598 | .irq_disable = sun4v_irq_disable, |
599 | .irq_eoi = sun4v_irq_eoi, |
600 | .irq_set_affinity = sun4v_set_affinity, |
601 | .flags = IRQCHIP_EOI_IF_HANDLED, |
602 | }; |
603 | |
604 | static struct irq_chip sun4v_virq = { |
605 | .name = "vsun4v" , |
606 | .irq_enable = sun4v_virq_enable, |
607 | .irq_disable = sun4v_virq_disable, |
608 | .irq_eoi = sun4v_virq_eoi, |
609 | .irq_set_affinity = sun4v_virt_set_affinity, |
610 | .flags = IRQCHIP_EOI_IF_HANDLED, |
611 | }; |
612 | |
613 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
614 | { |
615 | struct irq_handler_data *handler_data; |
616 | struct ino_bucket *bucket; |
617 | unsigned int irq; |
618 | int ino; |
619 | |
620 | BUG_ON(tlb_type == hypervisor); |
621 | |
622 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; |
623 | bucket = &ivector_table[ino]; |
624 | irq = bucket_get_irq(__pa(bucket)); |
625 | if (!irq) { |
626 | irq = irq_alloc(dev_handle: 0, dev_ino: ino); |
627 | bucket_set_irq(__pa(bucket), irq); |
628 | irq_set_chip_and_handler_name(irq, chip: &sun4u_irq, |
629 | handle: handle_fasteoi_irq, name: "IVEC" ); |
630 | } |
631 | |
632 | handler_data = irq_get_handler_data(irq); |
633 | if (unlikely(handler_data)) |
634 | goto out; |
635 | |
636 | handler_data = kzalloc(size: sizeof(struct irq_handler_data), GFP_ATOMIC); |
637 | if (unlikely(!handler_data)) { |
638 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n" ); |
639 | prom_halt(); |
640 | } |
641 | irq_set_handler_data(irq, data: handler_data); |
642 | |
643 | handler_data->imap = imap; |
644 | handler_data->iclr = iclr; |
645 | |
646 | out: |
647 | return irq; |
648 | } |
649 | |
650 | static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino, |
651 | void (*handler_data_init)(struct irq_handler_data *data, |
652 | u32 devhandle, unsigned int devino), |
653 | struct irq_chip *chip) |
654 | { |
655 | struct irq_handler_data *data; |
656 | unsigned int irq; |
657 | |
658 | irq = irq_alloc(dev_handle: devhandle, dev_ino: devino); |
659 | if (!irq) |
660 | goto out; |
661 | |
662 | data = kzalloc(size: sizeof(struct irq_handler_data), GFP_ATOMIC); |
663 | if (unlikely(!data)) { |
664 | pr_err("IRQ handler data allocation failed.\n" ); |
665 | irq_free(irq); |
666 | irq = 0; |
667 | goto out; |
668 | } |
669 | |
670 | irq_set_handler_data(irq, data); |
671 | handler_data_init(data, devhandle, devino); |
672 | irq_set_chip_and_handler_name(irq, chip, handle: handle_fasteoi_irq, name: "IVEC" ); |
673 | data->imap = ~0UL; |
674 | data->iclr = ~0UL; |
675 | out: |
676 | return irq; |
677 | } |
678 | |
679 | static unsigned long cookie_assign(unsigned int irq, u32 devhandle, |
680 | unsigned int devino) |
681 | { |
682 | struct irq_handler_data *ihd = irq_get_handler_data(irq); |
683 | unsigned long hv_error, cookie; |
684 | |
685 | /* handler_irq needs to find the irq. cookie is seen signed in |
686 | * sun4v_dev_mondo and treated as a non ivector_table delivery. |
687 | */ |
688 | ihd->bucket.__irq = irq; |
689 | cookie = ~__pa(&ihd->bucket); |
690 | |
691 | hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); |
692 | if (hv_error) |
693 | pr_err("HV vintr set cookie failed = %ld\n" , hv_error); |
694 | |
695 | return hv_error; |
696 | } |
697 | |
698 | static void cookie_handler_data(struct irq_handler_data *data, |
699 | u32 devhandle, unsigned int devino) |
700 | { |
701 | data->dev_handle = devhandle; |
702 | data->dev_ino = devino; |
703 | } |
704 | |
705 | static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino, |
706 | struct irq_chip *chip) |
707 | { |
708 | unsigned long hv_error; |
709 | unsigned int irq; |
710 | |
711 | irq = sun4v_build_common(devhandle, devino, handler_data_init: cookie_handler_data, chip); |
712 | |
713 | hv_error = cookie_assign(irq, devhandle, devino); |
714 | if (hv_error) { |
715 | irq_free(irq); |
716 | irq = 0; |
717 | } |
718 | |
719 | return irq; |
720 | } |
721 | |
722 | static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino) |
723 | { |
724 | unsigned int irq; |
725 | |
726 | irq = cookie_exists(devhandle, devino); |
727 | if (irq) |
728 | goto out; |
729 | |
730 | irq = cookie_build_irq(devhandle, devino, chip: &sun4v_virq); |
731 | |
732 | out: |
733 | return irq; |
734 | } |
735 | |
736 | static void sysino_set_bucket(unsigned int irq) |
737 | { |
738 | struct irq_handler_data *ihd = irq_get_handler_data(irq); |
739 | struct ino_bucket *bucket; |
740 | unsigned long sysino; |
741 | |
742 | sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino); |
743 | BUG_ON(sysino >= nr_ivec); |
744 | bucket = &ivector_table[sysino]; |
745 | bucket_set_irq(__pa(bucket), irq); |
746 | } |
747 | |
748 | static void sysino_handler_data(struct irq_handler_data *data, |
749 | u32 devhandle, unsigned int devino) |
750 | { |
751 | unsigned long sysino; |
752 | |
753 | sysino = sun4v_devino_to_sysino(devhandle, devino); |
754 | data->sysino = sysino; |
755 | } |
756 | |
757 | static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino, |
758 | struct irq_chip *chip) |
759 | { |
760 | unsigned int irq; |
761 | |
762 | irq = sun4v_build_common(devhandle, devino, handler_data_init: sysino_handler_data, chip); |
763 | if (!irq) |
764 | goto out; |
765 | |
766 | sysino_set_bucket(irq); |
767 | out: |
768 | return irq; |
769 | } |
770 | |
771 | static int sun4v_build_sysino(u32 devhandle, unsigned int devino) |
772 | { |
773 | int irq; |
774 | |
775 | irq = sysino_exists(devhandle, devino); |
776 | if (irq) |
777 | goto out; |
778 | |
779 | irq = sysino_build_irq(devhandle, devino, chip: &sun4v_irq); |
780 | out: |
781 | return irq; |
782 | } |
783 | |
784 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
785 | { |
786 | unsigned int irq; |
787 | |
788 | if (sun4v_cookie_only_virqs()) |
789 | irq = sun4v_build_cookie(devhandle, devino); |
790 | else |
791 | irq = sun4v_build_sysino(devhandle, devino); |
792 | |
793 | return irq; |
794 | } |
795 | |
796 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) |
797 | { |
798 | int irq; |
799 | |
800 | irq = cookie_build_irq(devhandle, devino, chip: &sun4v_virq); |
801 | if (!irq) |
802 | goto out; |
803 | |
804 | /* This is borrowed from the original function. |
805 | */ |
806 | irq_set_status_flags(irq, set: IRQ_NOAUTOEN); |
807 | |
808 | out: |
809 | return irq; |
810 | } |
811 | |
812 | void *hardirq_stack[NR_CPUS]; |
813 | void *softirq_stack[NR_CPUS]; |
814 | |
815 | void __irq_entry handler_irq(int pil, struct pt_regs *regs) |
816 | { |
817 | unsigned long pstate, bucket_pa; |
818 | struct pt_regs *old_regs; |
819 | void *orig_sp; |
820 | |
821 | clear_softint(1 << pil); |
822 | |
823 | old_regs = set_irq_regs(regs); |
824 | irq_enter(); |
825 | |
826 | /* Grab an atomic snapshot of the pending IVECs. */ |
827 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" |
828 | "wrpr %0, %3, %%pstate\n\t" |
829 | "ldx [%2], %1\n\t" |
830 | "stx %%g0, [%2]\n\t" |
831 | "wrpr %0, 0x0, %%pstate\n\t" |
832 | : "=&r" (pstate), "=&r" (bucket_pa) |
833 | : "r" (irq_work_pa(smp_processor_id())), |
834 | "i" (PSTATE_IE) |
835 | : "memory" ); |
836 | |
837 | orig_sp = set_hardirq_stack(); |
838 | |
839 | while (bucket_pa) { |
840 | unsigned long next_pa; |
841 | unsigned int irq; |
842 | |
843 | next_pa = bucket_get_chain_pa(bucket_pa); |
844 | irq = bucket_get_irq(bucket_pa); |
845 | bucket_clear_chain_pa(bucket_pa); |
846 | |
847 | generic_handle_irq(irq); |
848 | |
849 | bucket_pa = next_pa; |
850 | } |
851 | |
852 | restore_hardirq_stack(orig_sp); |
853 | |
854 | irq_exit(); |
855 | set_irq_regs(old_regs); |
856 | } |
857 | |
858 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
859 | void do_softirq_own_stack(void) |
860 | { |
861 | void *orig_sp, *sp = softirq_stack[smp_processor_id()]; |
862 | |
863 | sp += THREAD_SIZE - 192 - STACK_BIAS; |
864 | |
865 | __asm__ __volatile__("mov %%sp, %0\n\t" |
866 | "mov %1, %%sp" |
867 | : "=&r" (orig_sp) |
868 | : "r" (sp)); |
869 | __do_softirq(); |
870 | __asm__ __volatile__("mov %0, %%sp" |
871 | : : "r" (orig_sp)); |
872 | } |
873 | #endif |
874 | |
875 | #ifdef CONFIG_HOTPLUG_CPU |
876 | void fixup_irqs(void) |
877 | { |
878 | unsigned int irq; |
879 | |
880 | for (irq = 0; irq < NR_IRQS; irq++) { |
881 | struct irq_desc *desc = irq_to_desc(irq); |
882 | struct irq_data *data; |
883 | unsigned long flags; |
884 | |
885 | if (!desc) |
886 | continue; |
887 | data = irq_desc_get_irq_data(desc); |
888 | raw_spin_lock_irqsave(&desc->lock, flags); |
889 | if (desc->action && !irqd_is_per_cpu(d: data)) { |
890 | if (data->chip->irq_set_affinity) |
891 | data->chip->irq_set_affinity(data, |
892 | irq_data_get_affinity_mask(d: data), |
893 | false); |
894 | } |
895 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
896 | } |
897 | |
898 | tick_ops->disable_irq(); |
899 | } |
900 | #endif |
901 | |
902 | struct sun5_timer { |
903 | u64 count0; |
904 | u64 limit0; |
905 | u64 count1; |
906 | u64 limit1; |
907 | }; |
908 | |
909 | static struct sun5_timer *prom_timers; |
910 | static u64 prom_limit0, prom_limit1; |
911 | |
912 | static void map_prom_timers(void) |
913 | { |
914 | struct device_node *dp; |
915 | const unsigned int *addr; |
916 | |
917 | /* PROM timer node hangs out in the top level of device siblings... */ |
918 | dp = of_find_node_by_path(path: "/" ); |
919 | dp = dp->child; |
920 | while (dp) { |
921 | if (of_node_name_eq(np: dp, name: "counter-timer" )) |
922 | break; |
923 | dp = dp->sibling; |
924 | } |
925 | |
926 | /* Assume if node is not present, PROM uses different tick mechanism |
927 | * which we should not care about. |
928 | */ |
929 | if (!dp) { |
930 | prom_timers = (struct sun5_timer *) 0; |
931 | return; |
932 | } |
933 | |
934 | /* If PROM is really using this, it must be mapped by him. */ |
935 | addr = of_get_property(node: dp, name: "address" , NULL); |
936 | if (!addr) { |
937 | prom_printf("PROM does not have timer mapped, trying to continue.\n" ); |
938 | prom_timers = (struct sun5_timer *) 0; |
939 | return; |
940 | } |
941 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); |
942 | } |
943 | |
944 | static void kill_prom_timer(void) |
945 | { |
946 | if (!prom_timers) |
947 | return; |
948 | |
949 | /* Save them away for later. */ |
950 | prom_limit0 = prom_timers->limit0; |
951 | prom_limit1 = prom_timers->limit1; |
952 | |
953 | /* Just as in sun4c PROM uses timer which ticks at IRQ 14. |
954 | * We turn both off here just to be paranoid. |
955 | */ |
956 | prom_timers->limit0 = 0; |
957 | prom_timers->limit1 = 0; |
958 | |
959 | /* Wheee, eat the interrupt packet too... */ |
960 | __asm__ __volatile__( |
961 | " mov 0x40, %%g2\n" |
962 | " ldxa [%%g0] %0, %%g1\n" |
963 | " ldxa [%%g2] %1, %%g1\n" |
964 | " stxa %%g0, [%%g0] %0\n" |
965 | " membar #Sync\n" |
966 | : /* no outputs */ |
967 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) |
968 | : "g1" , "g2" ); |
969 | } |
970 | |
971 | void notrace init_irqwork_curcpu(void) |
972 | { |
973 | int cpu = hard_smp_processor_id(); |
974 | |
975 | trap_block[cpu].irq_worklist_pa = 0UL; |
976 | } |
977 | |
978 | /* Please be very careful with register_one_mondo() and |
979 | * sun4v_register_mondo_queues(). |
980 | * |
981 | * On SMP this gets invoked from the CPU trampoline before |
982 | * the cpu has fully taken over the trap table from OBP, |
983 | * and its kernel stack + %g6 thread register state is |
984 | * not fully cooked yet. |
985 | * |
986 | * Therefore you cannot make any OBP calls, not even prom_printf, |
987 | * from these two routines. |
988 | */ |
989 | static void notrace register_one_mondo(unsigned long paddr, unsigned long type, |
990 | unsigned long qmask) |
991 | { |
992 | unsigned long num_entries = (qmask + 1) / 64; |
993 | unsigned long status; |
994 | |
995 | status = sun4v_cpu_qconf(type, paddr, num_entries); |
996 | if (status != HV_EOK) { |
997 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " |
998 | "err %lu\n" , type, paddr, num_entries, status); |
999 | prom_halt(); |
1000 | } |
1001 | } |
1002 | |
1003 | void notrace sun4v_register_mondo_queues(int this_cpu) |
1004 | { |
1005 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
1006 | |
1007 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, |
1008 | tb->cpu_mondo_qmask); |
1009 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, |
1010 | tb->dev_mondo_qmask); |
1011 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, |
1012 | tb->resum_qmask); |
1013 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, |
1014 | tb->nonresum_qmask); |
1015 | } |
1016 | |
1017 | /* Each queue region must be a power of 2 multiple of 64 bytes in |
1018 | * size. The base real address must be aligned to the size of the |
1019 | * region. Thus, an 8KB queue must be 8KB aligned, for example. |
1020 | */ |
1021 | static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) |
1022 | { |
1023 | unsigned long size = PAGE_ALIGN(qmask + 1); |
1024 | unsigned long order = get_order(size); |
1025 | unsigned long p; |
1026 | |
1027 | p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
1028 | if (!p) { |
1029 | prom_printf("SUN4V: Error, cannot allocate queue.\n" ); |
1030 | prom_halt(); |
1031 | } |
1032 | |
1033 | *pa_ptr = __pa(p); |
1034 | } |
1035 | |
1036 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) |
1037 | { |
1038 | #ifdef CONFIG_SMP |
1039 | unsigned long page; |
1040 | void *mondo, *p; |
1041 | |
1042 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); |
1043 | |
1044 | /* Make sure mondo block is 64byte aligned */ |
1045 | p = kzalloc(size: 127, GFP_KERNEL); |
1046 | if (!p) { |
1047 | prom_printf("SUN4V: Error, cannot allocate mondo block.\n" ); |
1048 | prom_halt(); |
1049 | } |
1050 | mondo = (void *)(((unsigned long)p + 63) & ~0x3f); |
1051 | tb->cpu_mondo_block_pa = __pa(mondo); |
1052 | |
1053 | page = get_zeroed_page(GFP_KERNEL); |
1054 | if (!page) { |
1055 | prom_printf("SUN4V: Error, cannot allocate cpu list page.\n" ); |
1056 | prom_halt(); |
1057 | } |
1058 | |
1059 | tb->cpu_list_pa = __pa(page); |
1060 | #endif |
1061 | } |
1062 | |
1063 | /* Allocate mondo and error queues for all possible cpus. */ |
1064 | static void __init sun4v_init_mondo_queues(void) |
1065 | { |
1066 | int cpu; |
1067 | |
1068 | for_each_possible_cpu(cpu) { |
1069 | struct trap_per_cpu *tb = &trap_block[cpu]; |
1070 | |
1071 | alloc_one_queue(pa_ptr: &tb->cpu_mondo_pa, qmask: tb->cpu_mondo_qmask); |
1072 | alloc_one_queue(pa_ptr: &tb->dev_mondo_pa, qmask: tb->dev_mondo_qmask); |
1073 | alloc_one_queue(pa_ptr: &tb->resum_mondo_pa, qmask: tb->resum_qmask); |
1074 | alloc_one_queue(pa_ptr: &tb->resum_kernel_buf_pa, qmask: tb->resum_qmask); |
1075 | alloc_one_queue(pa_ptr: &tb->nonresum_mondo_pa, qmask: tb->nonresum_qmask); |
1076 | alloc_one_queue(pa_ptr: &tb->nonresum_kernel_buf_pa, |
1077 | qmask: tb->nonresum_qmask); |
1078 | } |
1079 | } |
1080 | |
1081 | static void __init init_send_mondo_info(void) |
1082 | { |
1083 | int cpu; |
1084 | |
1085 | for_each_possible_cpu(cpu) { |
1086 | struct trap_per_cpu *tb = &trap_block[cpu]; |
1087 | |
1088 | init_cpu_send_mondo_info(tb); |
1089 | } |
1090 | } |
1091 | |
1092 | static struct irqaction timer_irq_action = { |
1093 | .name = "timer" , |
1094 | }; |
1095 | |
1096 | static void __init irq_ivector_init(void) |
1097 | { |
1098 | unsigned long size, order; |
1099 | unsigned int ivecs; |
1100 | |
1101 | /* If we are doing cookie only VIRQs then we do not need the ivector |
1102 | * table to process interrupts. |
1103 | */ |
1104 | if (sun4v_cookie_only_virqs()) |
1105 | return; |
1106 | |
1107 | ivecs = size_nr_ivec(); |
1108 | size = sizeof(struct ino_bucket) * ivecs; |
1109 | order = get_order(size); |
1110 | ivector_table = (struct ino_bucket *) |
1111 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
1112 | if (!ivector_table) { |
1113 | prom_printf("Fatal error, cannot allocate ivector_table\n" ); |
1114 | prom_halt(); |
1115 | } |
1116 | __flush_dcache_range((unsigned long) ivector_table, |
1117 | ((unsigned long) ivector_table) + size); |
1118 | |
1119 | ivector_table_pa = __pa(ivector_table); |
1120 | } |
1121 | |
1122 | /* Only invoked on boot processor.*/ |
1123 | void __init init_IRQ(void) |
1124 | { |
1125 | irq_init_hv(); |
1126 | irq_ivector_init(); |
1127 | map_prom_timers(); |
1128 | kill_prom_timer(); |
1129 | |
1130 | if (tlb_type == hypervisor) |
1131 | sun4v_init_mondo_queues(); |
1132 | |
1133 | init_send_mondo_info(); |
1134 | |
1135 | if (tlb_type == hypervisor) { |
1136 | /* Load up the boot cpu's entries. */ |
1137 | sun4v_register_mondo_queues(this_cpu: hard_smp_processor_id()); |
1138 | } |
1139 | |
1140 | /* We need to clear any IRQ's pending in the soft interrupt |
1141 | * registers, a spurious one could be left around from the |
1142 | * PROM timer which we just disabled. |
1143 | */ |
1144 | clear_softint(get_softint()); |
1145 | |
1146 | /* Now that ivector table is initialized, it is safe |
1147 | * to receive IRQ vector traps. We will normally take |
1148 | * one or two right now, in case some device PROM used |
1149 | * to boot us wants to speak to us. We just ignore them. |
1150 | */ |
1151 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" |
1152 | "or %%g1, %0, %%g1\n\t" |
1153 | "wrpr %%g1, 0x0, %%pstate" |
1154 | : /* No outputs */ |
1155 | : "i" (PSTATE_IE) |
1156 | : "g1" ); |
1157 | |
1158 | irq_to_desc(irq: 0)->action = &timer_irq_action; |
1159 | } |
1160 | |