1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
5 | * |
6 | * This file contains the interrupt descriptor management code. Detailed |
7 | * information is available in Documentation/core-api/genericirq.rst |
8 | * |
9 | */ |
10 | #include <linux/irq.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/export.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> |
15 | #include <linux/maple_tree.h> |
16 | #include <linux/irqdomain.h> |
17 | #include <linux/sysfs.h> |
18 | |
19 | #include "internals.h" |
20 | |
21 | /* |
22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
23 | */ |
24 | static struct lock_class_key irq_desc_lock_class; |
25 | |
26 | #if defined(CONFIG_SMP) |
27 | static int __init irq_affinity_setup(char *str) |
28 | { |
29 | alloc_bootmem_cpumask_var(mask: &irq_default_affinity); |
30 | cpulist_parse(buf: str, dstp: irq_default_affinity); |
31 | /* |
32 | * Set at least the boot cpu. We don't want to end up with |
33 | * bugreports caused by random commandline masks |
34 | */ |
35 | cpumask_set_cpu(smp_processor_id(), dstp: irq_default_affinity); |
36 | return 1; |
37 | } |
38 | __setup("irqaffinity=" , irq_affinity_setup); |
39 | |
40 | static void __init init_irq_default_affinity(void) |
41 | { |
42 | if (!cpumask_available(mask: irq_default_affinity)) |
43 | zalloc_cpumask_var(mask: &irq_default_affinity, GFP_NOWAIT); |
44 | if (cpumask_empty(srcp: irq_default_affinity)) |
45 | cpumask_setall(dstp: irq_default_affinity); |
46 | } |
47 | #else |
48 | static void __init init_irq_default_affinity(void) |
49 | { |
50 | } |
51 | #endif |
52 | |
53 | #ifdef CONFIG_SMP |
54 | static int alloc_masks(struct irq_desc *desc, int node) |
55 | { |
56 | if (!zalloc_cpumask_var_node(mask: &desc->irq_common_data.affinity, |
57 | GFP_KERNEL, node)) |
58 | return -ENOMEM; |
59 | |
60 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
61 | if (!zalloc_cpumask_var_node(mask: &desc->irq_common_data.effective_affinity, |
62 | GFP_KERNEL, node)) { |
63 | free_cpumask_var(mask: desc->irq_common_data.affinity); |
64 | return -ENOMEM; |
65 | } |
66 | #endif |
67 | |
68 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
69 | if (!zalloc_cpumask_var_node(mask: &desc->pending_mask, GFP_KERNEL, node)) { |
70 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
71 | free_cpumask_var(mask: desc->irq_common_data.effective_affinity); |
72 | #endif |
73 | free_cpumask_var(mask: desc->irq_common_data.affinity); |
74 | return -ENOMEM; |
75 | } |
76 | #endif |
77 | return 0; |
78 | } |
79 | |
80 | static void desc_smp_init(struct irq_desc *desc, int node, |
81 | const struct cpumask *affinity) |
82 | { |
83 | if (!affinity) |
84 | affinity = irq_default_affinity; |
85 | cpumask_copy(dstp: desc->irq_common_data.affinity, srcp: affinity); |
86 | |
87 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
88 | cpumask_clear(dstp: desc->pending_mask); |
89 | #endif |
90 | #ifdef CONFIG_NUMA |
91 | desc->irq_common_data.node = node; |
92 | #endif |
93 | } |
94 | |
95 | static void free_masks(struct irq_desc *desc) |
96 | { |
97 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
98 | free_cpumask_var(mask: desc->pending_mask); |
99 | #endif |
100 | free_cpumask_var(mask: desc->irq_common_data.affinity); |
101 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
102 | free_cpumask_var(mask: desc->irq_common_data.effective_affinity); |
103 | #endif |
104 | } |
105 | |
106 | #else |
107 | static inline int |
108 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
109 | static inline void |
110 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } |
111 | static inline void free_masks(struct irq_desc *desc) { } |
112 | #endif |
113 | |
114 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
115 | const struct cpumask *affinity, struct module *owner) |
116 | { |
117 | int cpu; |
118 | |
119 | desc->irq_common_data.handler_data = NULL; |
120 | desc->irq_common_data.msi_desc = NULL; |
121 | |
122 | desc->irq_data.common = &desc->irq_common_data; |
123 | desc->irq_data.irq = irq; |
124 | desc->irq_data.chip = &no_irq_chip; |
125 | desc->irq_data.chip_data = NULL; |
126 | irq_settings_clr_and_set(desc, clr: ~0, set: _IRQ_DEFAULT_INIT_FLAGS); |
127 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_DISABLED); |
128 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_MASKED); |
129 | desc->handle_irq = handle_bad_irq; |
130 | desc->depth = 1; |
131 | desc->irq_count = 0; |
132 | desc->irqs_unhandled = 0; |
133 | desc->tot_count = 0; |
134 | desc->name = NULL; |
135 | desc->owner = owner; |
136 | for_each_possible_cpu(cpu) |
137 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
138 | desc_smp_init(desc, node, affinity); |
139 | } |
140 | |
141 | int nr_irqs = NR_IRQS; |
142 | EXPORT_SYMBOL_GPL(nr_irqs); |
143 | |
144 | static DEFINE_MUTEX(sparse_irq_lock); |
145 | static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, |
146 | MT_FLAGS_ALLOC_RANGE | |
147 | MT_FLAGS_LOCK_EXTERN | |
148 | MT_FLAGS_USE_RCU, |
149 | sparse_irq_lock); |
150 | |
151 | static int irq_find_free_area(unsigned int from, unsigned int cnt) |
152 | { |
153 | MA_STATE(mas, &sparse_irqs, 0, 0); |
154 | |
155 | if (mas_empty_area(mas: &mas, min: from, MAX_SPARSE_IRQS, size: cnt)) |
156 | return -ENOSPC; |
157 | return mas.index; |
158 | } |
159 | |
160 | static unsigned int irq_find_at_or_after(unsigned int offset) |
161 | { |
162 | unsigned long index = offset; |
163 | struct irq_desc *desc = mt_find(mt: &sparse_irqs, index: &index, max: nr_irqs); |
164 | |
165 | return desc ? irq_desc_get_irq(desc) : nr_irqs; |
166 | } |
167 | |
168 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
169 | { |
170 | MA_STATE(mas, &sparse_irqs, irq, irq); |
171 | WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); |
172 | } |
173 | |
174 | static void delete_irq_desc(unsigned int irq) |
175 | { |
176 | MA_STATE(mas, &sparse_irqs, irq, irq); |
177 | mas_erase(mas: &mas); |
178 | } |
179 | |
180 | #ifdef CONFIG_SPARSE_IRQ |
181 | static const struct kobj_type irq_kobj_type; |
182 | #endif |
183 | |
184 | static int init_desc(struct irq_desc *desc, int irq, int node, |
185 | unsigned int flags, |
186 | const struct cpumask *affinity, |
187 | struct module *owner) |
188 | { |
189 | desc->kstat_irqs = alloc_percpu(unsigned int); |
190 | if (!desc->kstat_irqs) |
191 | return -ENOMEM; |
192 | |
193 | if (alloc_masks(desc, node)) { |
194 | free_percpu(pdata: desc->kstat_irqs); |
195 | return -ENOMEM; |
196 | } |
197 | |
198 | raw_spin_lock_init(&desc->lock); |
199 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
200 | mutex_init(&desc->request_mutex); |
201 | init_waitqueue_head(&desc->wait_for_threads); |
202 | desc_set_defaults(irq, desc, node, affinity, owner); |
203 | irqd_set(d: &desc->irq_data, mask: flags); |
204 | irq_resend_init(desc); |
205 | #ifdef CONFIG_SPARSE_IRQ |
206 | kobject_init(kobj: &desc->kobj, ktype: &irq_kobj_type); |
207 | init_rcu_head(head: &desc->rcu); |
208 | #endif |
209 | |
210 | return 0; |
211 | } |
212 | |
213 | #ifdef CONFIG_SPARSE_IRQ |
214 | |
215 | static void irq_kobj_release(struct kobject *kobj); |
216 | |
217 | #ifdef CONFIG_SYSFS |
218 | static struct kobject *irq_kobj_base; |
219 | |
220 | #define IRQ_ATTR_RO(_name) \ |
221 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
222 | |
223 | static ssize_t per_cpu_count_show(struct kobject *kobj, |
224 | struct kobj_attribute *attr, char *buf) |
225 | { |
226 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
227 | ssize_t ret = 0; |
228 | char *p = "" ; |
229 | int cpu; |
230 | |
231 | for_each_possible_cpu(cpu) { |
232 | unsigned int c = irq_desc_kstat_cpu(desc, cpu); |
233 | |
234 | ret += scnprintf(buf: buf + ret, PAGE_SIZE - ret, fmt: "%s%u" , p, c); |
235 | p = "," ; |
236 | } |
237 | |
238 | ret += scnprintf(buf: buf + ret, PAGE_SIZE - ret, fmt: "\n" ); |
239 | return ret; |
240 | } |
241 | IRQ_ATTR_RO(per_cpu_count); |
242 | |
243 | static ssize_t chip_name_show(struct kobject *kobj, |
244 | struct kobj_attribute *attr, char *buf) |
245 | { |
246 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
247 | ssize_t ret = 0; |
248 | |
249 | raw_spin_lock_irq(&desc->lock); |
250 | if (desc->irq_data.chip && desc->irq_data.chip->name) { |
251 | ret = scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , |
252 | desc->irq_data.chip->name); |
253 | } |
254 | raw_spin_unlock_irq(&desc->lock); |
255 | |
256 | return ret; |
257 | } |
258 | IRQ_ATTR_RO(chip_name); |
259 | |
260 | static ssize_t hwirq_show(struct kobject *kobj, |
261 | struct kobj_attribute *attr, char *buf) |
262 | { |
263 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
264 | ssize_t ret = 0; |
265 | |
266 | raw_spin_lock_irq(&desc->lock); |
267 | if (desc->irq_data.domain) |
268 | ret = sprintf(buf, fmt: "%lu\n" , desc->irq_data.hwirq); |
269 | raw_spin_unlock_irq(&desc->lock); |
270 | |
271 | return ret; |
272 | } |
273 | IRQ_ATTR_RO(hwirq); |
274 | |
275 | static ssize_t type_show(struct kobject *kobj, |
276 | struct kobj_attribute *attr, char *buf) |
277 | { |
278 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
279 | ssize_t ret = 0; |
280 | |
281 | raw_spin_lock_irq(&desc->lock); |
282 | ret = sprintf(buf, fmt: "%s\n" , |
283 | irqd_is_level_type(d: &desc->irq_data) ? "level" : "edge" ); |
284 | raw_spin_unlock_irq(&desc->lock); |
285 | |
286 | return ret; |
287 | |
288 | } |
289 | IRQ_ATTR_RO(type); |
290 | |
291 | static ssize_t wakeup_show(struct kobject *kobj, |
292 | struct kobj_attribute *attr, char *buf) |
293 | { |
294 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
295 | ssize_t ret = 0; |
296 | |
297 | raw_spin_lock_irq(&desc->lock); |
298 | ret = sprintf(buf, fmt: "%s\n" , |
299 | irqd_is_wakeup_set(d: &desc->irq_data) ? "enabled" : "disabled" ); |
300 | raw_spin_unlock_irq(&desc->lock); |
301 | |
302 | return ret; |
303 | |
304 | } |
305 | IRQ_ATTR_RO(wakeup); |
306 | |
307 | static ssize_t name_show(struct kobject *kobj, |
308 | struct kobj_attribute *attr, char *buf) |
309 | { |
310 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
311 | ssize_t ret = 0; |
312 | |
313 | raw_spin_lock_irq(&desc->lock); |
314 | if (desc->name) |
315 | ret = scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , desc->name); |
316 | raw_spin_unlock_irq(&desc->lock); |
317 | |
318 | return ret; |
319 | } |
320 | IRQ_ATTR_RO(name); |
321 | |
322 | static ssize_t actions_show(struct kobject *kobj, |
323 | struct kobj_attribute *attr, char *buf) |
324 | { |
325 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
326 | struct irqaction *action; |
327 | ssize_t ret = 0; |
328 | char *p = "" ; |
329 | |
330 | raw_spin_lock_irq(&desc->lock); |
331 | for_each_action_of_desc(desc, action) { |
332 | ret += scnprintf(buf: buf + ret, PAGE_SIZE - ret, fmt: "%s%s" , |
333 | p, action->name); |
334 | p = "," ; |
335 | } |
336 | raw_spin_unlock_irq(&desc->lock); |
337 | |
338 | if (ret) |
339 | ret += scnprintf(buf: buf + ret, PAGE_SIZE - ret, fmt: "\n" ); |
340 | |
341 | return ret; |
342 | } |
343 | IRQ_ATTR_RO(actions); |
344 | |
345 | static struct attribute *irq_attrs[] = { |
346 | &per_cpu_count_attr.attr, |
347 | &chip_name_attr.attr, |
348 | &hwirq_attr.attr, |
349 | &type_attr.attr, |
350 | &wakeup_attr.attr, |
351 | &name_attr.attr, |
352 | &actions_attr.attr, |
353 | NULL |
354 | }; |
355 | ATTRIBUTE_GROUPS(irq); |
356 | |
357 | static const struct kobj_type irq_kobj_type = { |
358 | .release = irq_kobj_release, |
359 | .sysfs_ops = &kobj_sysfs_ops, |
360 | .default_groups = irq_groups, |
361 | }; |
362 | |
363 | static void irq_sysfs_add(int irq, struct irq_desc *desc) |
364 | { |
365 | if (irq_kobj_base) { |
366 | /* |
367 | * Continue even in case of failure as this is nothing |
368 | * crucial and failures in the late irq_sysfs_init() |
369 | * cannot be rolled back. |
370 | */ |
371 | if (kobject_add(kobj: &desc->kobj, parent: irq_kobj_base, fmt: "%d" , irq)) |
372 | pr_warn("Failed to add kobject for irq %d\n" , irq); |
373 | else |
374 | desc->istate |= IRQS_SYSFS; |
375 | } |
376 | } |
377 | |
378 | static void irq_sysfs_del(struct irq_desc *desc) |
379 | { |
380 | /* |
381 | * Only invoke kobject_del() when kobject_add() was successfully |
382 | * invoked for the descriptor. This covers both early boot, where |
383 | * sysfs is not initialized yet, and the case of a failed |
384 | * kobject_add() invocation. |
385 | */ |
386 | if (desc->istate & IRQS_SYSFS) |
387 | kobject_del(kobj: &desc->kobj); |
388 | } |
389 | |
390 | static int __init irq_sysfs_init(void) |
391 | { |
392 | struct irq_desc *desc; |
393 | int irq; |
394 | |
395 | /* Prevent concurrent irq alloc/free */ |
396 | irq_lock_sparse(); |
397 | |
398 | irq_kobj_base = kobject_create_and_add(name: "irq" , parent: kernel_kobj); |
399 | if (!irq_kobj_base) { |
400 | irq_unlock_sparse(); |
401 | return -ENOMEM; |
402 | } |
403 | |
404 | /* Add the already allocated interrupts */ |
405 | for_each_irq_desc(irq, desc) |
406 | irq_sysfs_add(irq, desc); |
407 | irq_unlock_sparse(); |
408 | |
409 | return 0; |
410 | } |
411 | postcore_initcall(irq_sysfs_init); |
412 | |
413 | #else /* !CONFIG_SYSFS */ |
414 | |
415 | static const struct kobj_type irq_kobj_type = { |
416 | .release = irq_kobj_release, |
417 | }; |
418 | |
419 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} |
420 | static void irq_sysfs_del(struct irq_desc *desc) {} |
421 | |
422 | #endif /* CONFIG_SYSFS */ |
423 | |
424 | struct irq_desc *irq_to_desc(unsigned int irq) |
425 | { |
426 | return mtree_load(mt: &sparse_irqs, index: irq); |
427 | } |
428 | #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE |
429 | EXPORT_SYMBOL_GPL(irq_to_desc); |
430 | #endif |
431 | |
432 | void irq_lock_sparse(void) |
433 | { |
434 | mutex_lock(&sparse_irq_lock); |
435 | } |
436 | |
437 | void irq_unlock_sparse(void) |
438 | { |
439 | mutex_unlock(lock: &sparse_irq_lock); |
440 | } |
441 | |
442 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
443 | const struct cpumask *affinity, |
444 | struct module *owner) |
445 | { |
446 | struct irq_desc *desc; |
447 | int ret; |
448 | |
449 | desc = kzalloc_node(size: sizeof(*desc), GFP_KERNEL, node); |
450 | if (!desc) |
451 | return NULL; |
452 | |
453 | ret = init_desc(desc, irq, node, flags, affinity, owner); |
454 | if (unlikely(ret)) { |
455 | kfree(objp: desc); |
456 | return NULL; |
457 | } |
458 | |
459 | return desc; |
460 | } |
461 | |
462 | static void irq_kobj_release(struct kobject *kobj) |
463 | { |
464 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
465 | |
466 | free_masks(desc); |
467 | free_percpu(pdata: desc->kstat_irqs); |
468 | kfree(objp: desc); |
469 | } |
470 | |
471 | static void delayed_free_desc(struct rcu_head *rhp) |
472 | { |
473 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); |
474 | |
475 | kobject_put(kobj: &desc->kobj); |
476 | } |
477 | |
478 | static void free_desc(unsigned int irq) |
479 | { |
480 | struct irq_desc *desc = irq_to_desc(irq); |
481 | |
482 | irq_remove_debugfs_entry(desc); |
483 | unregister_irq_proc(irq, desc); |
484 | |
485 | /* |
486 | * sparse_irq_lock protects also show_interrupts() and |
487 | * kstat_irq_usr(). Once we deleted the descriptor from the |
488 | * sparse tree we can free it. Access in proc will fail to |
489 | * lookup the descriptor. |
490 | * |
491 | * The sysfs entry must be serialized against a concurrent |
492 | * irq_sysfs_init() as well. |
493 | */ |
494 | irq_sysfs_del(desc); |
495 | delete_irq_desc(irq); |
496 | |
497 | /* |
498 | * We free the descriptor, masks and stat fields via RCU. That |
499 | * allows demultiplex interrupts to do rcu based management of |
500 | * the child interrupts. |
501 | * This also allows us to use rcu in kstat_irqs_usr(). |
502 | */ |
503 | call_rcu(head: &desc->rcu, func: delayed_free_desc); |
504 | } |
505 | |
506 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
507 | const struct irq_affinity_desc *affinity, |
508 | struct module *owner) |
509 | { |
510 | struct irq_desc *desc; |
511 | int i; |
512 | |
513 | /* Validate affinity mask(s) */ |
514 | if (affinity) { |
515 | for (i = 0; i < cnt; i++) { |
516 | if (cpumask_empty(srcp: &affinity[i].mask)) |
517 | return -EINVAL; |
518 | } |
519 | } |
520 | |
521 | for (i = 0; i < cnt; i++) { |
522 | const struct cpumask *mask = NULL; |
523 | unsigned int flags = 0; |
524 | |
525 | if (affinity) { |
526 | if (affinity->is_managed) { |
527 | flags = IRQD_AFFINITY_MANAGED | |
528 | IRQD_MANAGED_SHUTDOWN; |
529 | } |
530 | mask = &affinity->mask; |
531 | node = cpu_to_node(cpu: cpumask_first(srcp: mask)); |
532 | affinity++; |
533 | } |
534 | |
535 | desc = alloc_desc(irq: start + i, node, flags, affinity: mask, owner); |
536 | if (!desc) |
537 | goto err; |
538 | irq_insert_desc(irq: start + i, desc); |
539 | irq_sysfs_add(irq: start + i, desc); |
540 | irq_add_debugfs_entry(irq: start + i, desc); |
541 | } |
542 | return start; |
543 | |
544 | err: |
545 | for (i--; i >= 0; i--) |
546 | free_desc(irq: start + i); |
547 | return -ENOMEM; |
548 | } |
549 | |
550 | static int irq_expand_nr_irqs(unsigned int nr) |
551 | { |
552 | if (nr > MAX_SPARSE_IRQS) |
553 | return -ENOMEM; |
554 | nr_irqs = nr; |
555 | return 0; |
556 | } |
557 | |
558 | int __init early_irq_init(void) |
559 | { |
560 | int i, initcnt, node = first_online_node; |
561 | struct irq_desc *desc; |
562 | |
563 | init_irq_default_affinity(); |
564 | |
565 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
566 | initcnt = arch_probe_nr_irqs(); |
567 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n" , |
568 | NR_IRQS, nr_irqs, initcnt); |
569 | |
570 | if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS)) |
571 | nr_irqs = MAX_SPARSE_IRQS; |
572 | |
573 | if (WARN_ON(initcnt > MAX_SPARSE_IRQS)) |
574 | initcnt = MAX_SPARSE_IRQS; |
575 | |
576 | if (initcnt > nr_irqs) |
577 | nr_irqs = initcnt; |
578 | |
579 | for (i = 0; i < initcnt; i++) { |
580 | desc = alloc_desc(irq: i, node, flags: 0, NULL, NULL); |
581 | irq_insert_desc(irq: i, desc); |
582 | } |
583 | return arch_early_irq_init(); |
584 | } |
585 | |
586 | #else /* !CONFIG_SPARSE_IRQ */ |
587 | |
588 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
589 | [0 ... NR_IRQS-1] = { |
590 | .handle_irq = handle_bad_irq, |
591 | .depth = 1, |
592 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
593 | } |
594 | }; |
595 | |
596 | int __init early_irq_init(void) |
597 | { |
598 | int count, i, node = first_online_node; |
599 | int ret; |
600 | |
601 | init_irq_default_affinity(); |
602 | |
603 | printk(KERN_INFO "NR_IRQS: %d\n" , NR_IRQS); |
604 | |
605 | count = ARRAY_SIZE(irq_desc); |
606 | |
607 | for (i = 0; i < count; i++) { |
608 | ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); |
609 | if (unlikely(ret)) |
610 | goto __free_desc_res; |
611 | } |
612 | |
613 | return arch_early_irq_init(); |
614 | |
615 | __free_desc_res: |
616 | while (--i >= 0) { |
617 | free_masks(irq_desc + i); |
618 | free_percpu(irq_desc[i].kstat_irqs); |
619 | } |
620 | |
621 | return ret; |
622 | } |
623 | |
624 | struct irq_desc *irq_to_desc(unsigned int irq) |
625 | { |
626 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
627 | } |
628 | EXPORT_SYMBOL(irq_to_desc); |
629 | |
630 | static void free_desc(unsigned int irq) |
631 | { |
632 | struct irq_desc *desc = irq_to_desc(irq); |
633 | unsigned long flags; |
634 | |
635 | raw_spin_lock_irqsave(&desc->lock, flags); |
636 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
637 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
638 | delete_irq_desc(irq); |
639 | } |
640 | |
641 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
642 | const struct irq_affinity_desc *affinity, |
643 | struct module *owner) |
644 | { |
645 | u32 i; |
646 | |
647 | for (i = 0; i < cnt; i++) { |
648 | struct irq_desc *desc = irq_to_desc(start + i); |
649 | |
650 | desc->owner = owner; |
651 | irq_insert_desc(start + i, desc); |
652 | } |
653 | return start; |
654 | } |
655 | |
656 | static int irq_expand_nr_irqs(unsigned int nr) |
657 | { |
658 | return -ENOMEM; |
659 | } |
660 | |
661 | void irq_mark_irq(unsigned int irq) |
662 | { |
663 | mutex_lock(&sparse_irq_lock); |
664 | irq_insert_desc(irq, irq_desc + irq); |
665 | mutex_unlock(&sparse_irq_lock); |
666 | } |
667 | |
668 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
669 | void irq_init_desc(unsigned int irq) |
670 | { |
671 | free_desc(irq); |
672 | } |
673 | #endif |
674 | |
675 | #endif /* !CONFIG_SPARSE_IRQ */ |
676 | |
677 | int handle_irq_desc(struct irq_desc *desc) |
678 | { |
679 | struct irq_data *data; |
680 | |
681 | if (!desc) |
682 | return -EINVAL; |
683 | |
684 | data = irq_desc_get_irq_data(desc); |
685 | if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data))) |
686 | return -EPERM; |
687 | |
688 | generic_handle_irq_desc(desc); |
689 | return 0; |
690 | } |
691 | |
692 | /** |
693 | * generic_handle_irq - Invoke the handler for a particular irq |
694 | * @irq: The irq number to handle |
695 | * |
696 | * Returns: 0 on success, or -EINVAL if conversion has failed |
697 | * |
698 | * This function must be called from an IRQ context with irq regs |
699 | * initialized. |
700 | */ |
701 | int generic_handle_irq(unsigned int irq) |
702 | { |
703 | return handle_irq_desc(desc: irq_to_desc(irq)); |
704 | } |
705 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
706 | |
707 | /** |
708 | * generic_handle_irq_safe - Invoke the handler for a particular irq from any |
709 | * context. |
710 | * @irq: The irq number to handle |
711 | * |
712 | * Returns: 0 on success, a negative value on error. |
713 | * |
714 | * This function can be called from any context (IRQ or process context). It |
715 | * will report an error if not invoked from IRQ context and the irq has been |
716 | * marked to enforce IRQ-context only. |
717 | */ |
718 | int generic_handle_irq_safe(unsigned int irq) |
719 | { |
720 | unsigned long flags; |
721 | int ret; |
722 | |
723 | local_irq_save(flags); |
724 | ret = handle_irq_desc(desc: irq_to_desc(irq)); |
725 | local_irq_restore(flags); |
726 | return ret; |
727 | } |
728 | EXPORT_SYMBOL_GPL(generic_handle_irq_safe); |
729 | |
730 | #ifdef CONFIG_IRQ_DOMAIN |
731 | /** |
732 | * generic_handle_domain_irq - Invoke the handler for a HW irq belonging |
733 | * to a domain. |
734 | * @domain: The domain where to perform the lookup |
735 | * @hwirq: The HW irq number to convert to a logical one |
736 | * |
737 | * Returns: 0 on success, or -EINVAL if conversion has failed |
738 | * |
739 | * This function must be called from an IRQ context with irq regs |
740 | * initialized. |
741 | */ |
742 | int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) |
743 | { |
744 | return handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); |
745 | } |
746 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq); |
747 | |
748 | /** |
749 | * generic_handle_irq_safe - Invoke the handler for a HW irq belonging |
750 | * to a domain from any context. |
751 | * @domain: The domain where to perform the lookup |
752 | * @hwirq: The HW irq number to convert to a logical one |
753 | * |
754 | * Returns: 0 on success, a negative value on error. |
755 | * |
756 | * This function can be called from any context (IRQ or process |
757 | * context). If the interrupt is marked as 'enforce IRQ-context only' then |
758 | * the function must be invoked from hard interrupt context. |
759 | */ |
760 | int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) |
761 | { |
762 | unsigned long flags; |
763 | int ret; |
764 | |
765 | local_irq_save(flags); |
766 | ret = handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); |
767 | local_irq_restore(flags); |
768 | return ret; |
769 | } |
770 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); |
771 | |
772 | /** |
773 | * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging |
774 | * to a domain. |
775 | * @domain: The domain where to perform the lookup |
776 | * @hwirq: The HW irq number to convert to a logical one |
777 | * |
778 | * Returns: 0 on success, or -EINVAL if conversion has failed |
779 | * |
780 | * This function must be called from an NMI context with irq regs |
781 | * initialized. |
782 | **/ |
783 | int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) |
784 | { |
785 | WARN_ON_ONCE(!in_nmi()); |
786 | return handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); |
787 | } |
788 | #endif |
789 | |
790 | /* Dynamic interrupt handling */ |
791 | |
792 | /** |
793 | * irq_free_descs - free irq descriptors |
794 | * @from: Start of descriptor range |
795 | * @cnt: Number of consecutive irqs to free |
796 | */ |
797 | void irq_free_descs(unsigned int from, unsigned int cnt) |
798 | { |
799 | int i; |
800 | |
801 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
802 | return; |
803 | |
804 | mutex_lock(&sparse_irq_lock); |
805 | for (i = 0; i < cnt; i++) |
806 | free_desc(irq: from + i); |
807 | |
808 | mutex_unlock(lock: &sparse_irq_lock); |
809 | } |
810 | EXPORT_SYMBOL_GPL(irq_free_descs); |
811 | |
812 | /** |
813 | * __irq_alloc_descs - allocate and initialize a range of irq descriptors |
814 | * @irq: Allocate for specific irq number if irq >= 0 |
815 | * @from: Start the search from this irq number |
816 | * @cnt: Number of consecutive irqs to allocate. |
817 | * @node: Preferred node on which the irq descriptor should be allocated |
818 | * @owner: Owning module (can be NULL) |
819 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
820 | * hints where the irq descriptors should be allocated and which |
821 | * default affinities to use |
822 | * |
823 | * Returns the first irq number or error code |
824 | */ |
825 | int __ref |
826 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
827 | struct module *owner, const struct irq_affinity_desc *affinity) |
828 | { |
829 | int start, ret; |
830 | |
831 | if (!cnt) |
832 | return -EINVAL; |
833 | |
834 | if (irq >= 0) { |
835 | if (from > irq) |
836 | return -EINVAL; |
837 | from = irq; |
838 | } else { |
839 | /* |
840 | * For interrupts which are freely allocated the |
841 | * architecture can force a lower bound to the @from |
842 | * argument. x86 uses this to exclude the GSI space. |
843 | */ |
844 | from = arch_dynirq_lower_bound(from); |
845 | } |
846 | |
847 | mutex_lock(&sparse_irq_lock); |
848 | |
849 | start = irq_find_free_area(from, cnt); |
850 | ret = -EEXIST; |
851 | if (irq >=0 && start != irq) |
852 | goto unlock; |
853 | |
854 | if (start + cnt > nr_irqs) { |
855 | ret = irq_expand_nr_irqs(nr: start + cnt); |
856 | if (ret) |
857 | goto unlock; |
858 | } |
859 | ret = alloc_descs(start, cnt, node, affinity, owner); |
860 | unlock: |
861 | mutex_unlock(lock: &sparse_irq_lock); |
862 | return ret; |
863 | } |
864 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
865 | |
866 | /** |
867 | * irq_get_next_irq - get next allocated irq number |
868 | * @offset: where to start the search |
869 | * |
870 | * Returns next irq number after offset or nr_irqs if none is found. |
871 | */ |
872 | unsigned int irq_get_next_irq(unsigned int offset) |
873 | { |
874 | return irq_find_at_or_after(offset); |
875 | } |
876 | |
877 | struct irq_desc * |
878 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
879 | unsigned int check) |
880 | { |
881 | struct irq_desc *desc = irq_to_desc(irq); |
882 | |
883 | if (desc) { |
884 | if (check & _IRQ_DESC_CHECK) { |
885 | if ((check & _IRQ_DESC_PERCPU) && |
886 | !irq_settings_is_per_cpu_devid(desc)) |
887 | return NULL; |
888 | |
889 | if (!(check & _IRQ_DESC_PERCPU) && |
890 | irq_settings_is_per_cpu_devid(desc)) |
891 | return NULL; |
892 | } |
893 | |
894 | if (bus) |
895 | chip_bus_lock(desc); |
896 | raw_spin_lock_irqsave(&desc->lock, *flags); |
897 | } |
898 | return desc; |
899 | } |
900 | |
901 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
902 | __releases(&desc->lock) |
903 | { |
904 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
905 | if (bus) |
906 | chip_bus_sync_unlock(desc); |
907 | } |
908 | |
909 | int irq_set_percpu_devid_partition(unsigned int irq, |
910 | const struct cpumask *affinity) |
911 | { |
912 | struct irq_desc *desc = irq_to_desc(irq); |
913 | |
914 | if (!desc) |
915 | return -EINVAL; |
916 | |
917 | if (desc->percpu_enabled) |
918 | return -EINVAL; |
919 | |
920 | desc->percpu_enabled = kzalloc(size: sizeof(*desc->percpu_enabled), GFP_KERNEL); |
921 | |
922 | if (!desc->percpu_enabled) |
923 | return -ENOMEM; |
924 | |
925 | if (affinity) |
926 | desc->percpu_affinity = affinity; |
927 | else |
928 | desc->percpu_affinity = cpu_possible_mask; |
929 | |
930 | irq_set_percpu_devid_flags(irq); |
931 | return 0; |
932 | } |
933 | |
934 | int irq_set_percpu_devid(unsigned int irq) |
935 | { |
936 | return irq_set_percpu_devid_partition(irq, NULL); |
937 | } |
938 | |
939 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) |
940 | { |
941 | struct irq_desc *desc = irq_to_desc(irq); |
942 | |
943 | if (!desc || !desc->percpu_enabled) |
944 | return -EINVAL; |
945 | |
946 | if (affinity) |
947 | cpumask_copy(dstp: affinity, srcp: desc->percpu_affinity); |
948 | |
949 | return 0; |
950 | } |
951 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); |
952 | |
953 | void kstat_incr_irq_this_cpu(unsigned int irq) |
954 | { |
955 | kstat_incr_irqs_this_cpu(desc: irq_to_desc(irq)); |
956 | } |
957 | |
958 | /** |
959 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu |
960 | * @irq: The interrupt number |
961 | * @cpu: The cpu number |
962 | * |
963 | * Returns the sum of interrupt counts on @cpu since boot for |
964 | * @irq. The caller must ensure that the interrupt is not removed |
965 | * concurrently. |
966 | */ |
967 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
968 | { |
969 | struct irq_desc *desc = irq_to_desc(irq); |
970 | |
971 | return desc && desc->kstat_irqs ? |
972 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
973 | } |
974 | |
975 | static bool irq_is_nmi(struct irq_desc *desc) |
976 | { |
977 | return desc->istate & IRQS_NMI; |
978 | } |
979 | |
980 | static unsigned int kstat_irqs(unsigned int irq) |
981 | { |
982 | struct irq_desc *desc = irq_to_desc(irq); |
983 | unsigned int sum = 0; |
984 | int cpu; |
985 | |
986 | if (!desc || !desc->kstat_irqs) |
987 | return 0; |
988 | if (!irq_settings_is_per_cpu_devid(desc) && |
989 | !irq_settings_is_per_cpu(desc) && |
990 | !irq_is_nmi(desc)) |
991 | return data_race(desc->tot_count); |
992 | |
993 | for_each_possible_cpu(cpu) |
994 | sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); |
995 | return sum; |
996 | } |
997 | |
998 | /** |
999 | * kstat_irqs_usr - Get the statistics for an interrupt from thread context |
1000 | * @irq: The interrupt number |
1001 | * |
1002 | * Returns the sum of interrupt counts on all cpus since boot for @irq. |
1003 | * |
1004 | * It uses rcu to protect the access since a concurrent removal of an |
1005 | * interrupt descriptor is observing an rcu grace period before |
1006 | * delayed_free_desc()/irq_kobj_release(). |
1007 | */ |
1008 | unsigned int kstat_irqs_usr(unsigned int irq) |
1009 | { |
1010 | unsigned int sum; |
1011 | |
1012 | rcu_read_lock(); |
1013 | sum = kstat_irqs(irq); |
1014 | rcu_read_unlock(); |
1015 | return sum; |
1016 | } |
1017 | |
1018 | #ifdef CONFIG_LOCKDEP |
1019 | void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, |
1020 | struct lock_class_key *request_class) |
1021 | { |
1022 | struct irq_desc *desc = irq_to_desc(irq); |
1023 | |
1024 | if (desc) { |
1025 | lockdep_set_class(&desc->lock, lock_class); |
1026 | lockdep_set_class(&desc->request_mutex, request_class); |
1027 | } |
1028 | } |
1029 | EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); |
1030 | #endif |
1031 | |