1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
5 | * |
6 | * This file contains the core interrupt handling code, for irq-chip based |
7 | * architectures. Detailed information is available in |
8 | * Documentation/core-api/genericirq.rst |
9 | */ |
10 | |
11 | #include <linux/irq.h> |
12 | #include <linux/msi.h> |
13 | #include <linux/module.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/kernel_stat.h> |
16 | #include <linux/irqdomain.h> |
17 | |
18 | #include <trace/events/irq.h> |
19 | |
20 | #include "internals.h" |
21 | |
22 | static irqreturn_t bad_chained_irq(int irq, void *dev_id) |
23 | { |
24 | WARN_ONCE(1, "Chained irq %d should not call an action\n" , irq); |
25 | return IRQ_NONE; |
26 | } |
27 | |
28 | /* |
29 | * Chained handlers should never call action on their IRQ. This default |
30 | * action will emit warning if such thing happens. |
31 | */ |
32 | struct irqaction chained_action = { |
33 | .handler = bad_chained_irq, |
34 | }; |
35 | |
36 | /** |
37 | * irq_set_chip - set the irq chip for an irq |
38 | * @irq: irq number |
39 | * @chip: pointer to irq chip description structure |
40 | */ |
41 | int irq_set_chip(unsigned int irq, const struct irq_chip *chip) |
42 | { |
43 | unsigned long flags; |
44 | struct irq_desc *desc = irq_get_desc_lock(irq, flags: &flags, check: 0); |
45 | |
46 | if (!desc) |
47 | return -EINVAL; |
48 | |
49 | desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); |
50 | irq_put_desc_unlock(desc, flags); |
51 | /* |
52 | * For !CONFIG_SPARSE_IRQ make the irq show up in |
53 | * allocated_irqs. |
54 | */ |
55 | irq_mark_irq(irq); |
56 | return 0; |
57 | } |
58 | EXPORT_SYMBOL(irq_set_chip); |
59 | |
60 | /** |
61 | * irq_set_irq_type - set the irq trigger type for an irq |
62 | * @irq: irq number |
63 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
64 | */ |
65 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
66 | { |
67 | unsigned long flags; |
68 | struct irq_desc *desc = irq_get_desc_buslock(irq, flags: &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
69 | int ret = 0; |
70 | |
71 | if (!desc) |
72 | return -EINVAL; |
73 | |
74 | ret = __irq_set_trigger(desc, flags: type); |
75 | irq_put_desc_busunlock(desc, flags); |
76 | return ret; |
77 | } |
78 | EXPORT_SYMBOL(irq_set_irq_type); |
79 | |
80 | /** |
81 | * irq_set_handler_data - set irq handler data for an irq |
82 | * @irq: Interrupt number |
83 | * @data: Pointer to interrupt specific data |
84 | * |
85 | * Set the hardware irq controller data for an irq |
86 | */ |
87 | int irq_set_handler_data(unsigned int irq, void *data) |
88 | { |
89 | unsigned long flags; |
90 | struct irq_desc *desc = irq_get_desc_lock(irq, flags: &flags, check: 0); |
91 | |
92 | if (!desc) |
93 | return -EINVAL; |
94 | desc->irq_common_data.handler_data = data; |
95 | irq_put_desc_unlock(desc, flags); |
96 | return 0; |
97 | } |
98 | EXPORT_SYMBOL(irq_set_handler_data); |
99 | |
100 | /** |
101 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
102 | * @irq_base: Interrupt number base |
103 | * @irq_offset: Interrupt number offset |
104 | * @entry: Pointer to MSI descriptor data |
105 | * |
106 | * Set the MSI descriptor entry for an irq at offset |
107 | */ |
108 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
109 | struct msi_desc *entry) |
110 | { |
111 | unsigned long flags; |
112 | struct irq_desc *desc = irq_get_desc_lock(irq: irq_base + irq_offset, flags: &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
113 | |
114 | if (!desc) |
115 | return -EINVAL; |
116 | desc->irq_common_data.msi_desc = entry; |
117 | if (entry && !irq_offset) |
118 | entry->irq = irq_base; |
119 | irq_put_desc_unlock(desc, flags); |
120 | return 0; |
121 | } |
122 | |
123 | /** |
124 | * irq_set_msi_desc - set MSI descriptor data for an irq |
125 | * @irq: Interrupt number |
126 | * @entry: Pointer to MSI descriptor data |
127 | * |
128 | * Set the MSI descriptor entry for an irq |
129 | */ |
130 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
131 | { |
132 | return irq_set_msi_desc_off(irq_base: irq, irq_offset: 0, entry); |
133 | } |
134 | |
135 | /** |
136 | * irq_set_chip_data - set irq chip data for an irq |
137 | * @irq: Interrupt number |
138 | * @data: Pointer to chip specific data |
139 | * |
140 | * Set the hardware irq chip data for an irq |
141 | */ |
142 | int irq_set_chip_data(unsigned int irq, void *data) |
143 | { |
144 | unsigned long flags; |
145 | struct irq_desc *desc = irq_get_desc_lock(irq, flags: &flags, check: 0); |
146 | |
147 | if (!desc) |
148 | return -EINVAL; |
149 | desc->irq_data.chip_data = data; |
150 | irq_put_desc_unlock(desc, flags); |
151 | return 0; |
152 | } |
153 | EXPORT_SYMBOL(irq_set_chip_data); |
154 | |
155 | struct irq_data *irq_get_irq_data(unsigned int irq) |
156 | { |
157 | struct irq_desc *desc = irq_to_desc(irq); |
158 | |
159 | return desc ? &desc->irq_data : NULL; |
160 | } |
161 | EXPORT_SYMBOL_GPL(irq_get_irq_data); |
162 | |
163 | static void irq_state_clr_disabled(struct irq_desc *desc) |
164 | { |
165 | irqd_clear(d: &desc->irq_data, mask: IRQD_IRQ_DISABLED); |
166 | } |
167 | |
168 | static void irq_state_clr_masked(struct irq_desc *desc) |
169 | { |
170 | irqd_clear(d: &desc->irq_data, mask: IRQD_IRQ_MASKED); |
171 | } |
172 | |
173 | static void irq_state_clr_started(struct irq_desc *desc) |
174 | { |
175 | irqd_clear(d: &desc->irq_data, mask: IRQD_IRQ_STARTED); |
176 | } |
177 | |
178 | static void irq_state_set_started(struct irq_desc *desc) |
179 | { |
180 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_STARTED); |
181 | } |
182 | |
183 | enum { |
184 | IRQ_STARTUP_NORMAL, |
185 | IRQ_STARTUP_MANAGED, |
186 | IRQ_STARTUP_ABORT, |
187 | }; |
188 | |
189 | #ifdef CONFIG_SMP |
190 | static int |
191 | __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, |
192 | bool force) |
193 | { |
194 | struct irq_data *d = irq_desc_get_irq_data(desc); |
195 | |
196 | if (!irqd_affinity_is_managed(d)) |
197 | return IRQ_STARTUP_NORMAL; |
198 | |
199 | irqd_clr_managed_shutdown(d); |
200 | |
201 | if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { |
202 | /* |
203 | * Catch code which fiddles with enable_irq() on a managed |
204 | * and potentially shutdown IRQ. Chained interrupt |
205 | * installment or irq auto probing should not happen on |
206 | * managed irqs either. |
207 | */ |
208 | if (WARN_ON_ONCE(force)) |
209 | return IRQ_STARTUP_ABORT; |
210 | /* |
211 | * The interrupt was requested, but there is no online CPU |
212 | * in it's affinity mask. Put it into managed shutdown |
213 | * state and let the cpu hotplug mechanism start it up once |
214 | * a CPU in the mask becomes available. |
215 | */ |
216 | return IRQ_STARTUP_ABORT; |
217 | } |
218 | /* |
219 | * Managed interrupts have reserved resources, so this should not |
220 | * happen. |
221 | */ |
222 | if (WARN_ON(irq_domain_activate_irq(d, false))) |
223 | return IRQ_STARTUP_ABORT; |
224 | return IRQ_STARTUP_MANAGED; |
225 | } |
226 | #else |
227 | static __always_inline int |
228 | __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, |
229 | bool force) |
230 | { |
231 | return IRQ_STARTUP_NORMAL; |
232 | } |
233 | #endif |
234 | |
235 | static int __irq_startup(struct irq_desc *desc) |
236 | { |
237 | struct irq_data *d = irq_desc_get_irq_data(desc); |
238 | int ret = 0; |
239 | |
240 | /* Warn if this interrupt is not activated but try nevertheless */ |
241 | WARN_ON_ONCE(!irqd_is_activated(d)); |
242 | |
243 | if (d->chip->irq_startup) { |
244 | ret = d->chip->irq_startup(d); |
245 | irq_state_clr_disabled(desc); |
246 | irq_state_clr_masked(desc); |
247 | } else { |
248 | irq_enable(desc); |
249 | } |
250 | irq_state_set_started(desc); |
251 | return ret; |
252 | } |
253 | |
254 | int irq_startup(struct irq_desc *desc, bool resend, bool force) |
255 | { |
256 | struct irq_data *d = irq_desc_get_irq_data(desc); |
257 | const struct cpumask *aff = irq_data_get_affinity_mask(d); |
258 | int ret = 0; |
259 | |
260 | desc->depth = 0; |
261 | |
262 | if (irqd_is_started(d)) { |
263 | irq_enable(desc); |
264 | } else { |
265 | switch (__irq_startup_managed(desc, aff, force)) { |
266 | case IRQ_STARTUP_NORMAL: |
267 | if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) |
268 | irq_setup_affinity(desc); |
269 | ret = __irq_startup(desc); |
270 | if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) |
271 | irq_setup_affinity(desc); |
272 | break; |
273 | case IRQ_STARTUP_MANAGED: |
274 | irq_do_set_affinity(data: d, dest: aff, force: false); |
275 | ret = __irq_startup(desc); |
276 | break; |
277 | case IRQ_STARTUP_ABORT: |
278 | irqd_set_managed_shutdown(d); |
279 | return 0; |
280 | } |
281 | } |
282 | if (resend) |
283 | check_irq_resend(desc, inject: false); |
284 | |
285 | return ret; |
286 | } |
287 | |
288 | int irq_activate(struct irq_desc *desc) |
289 | { |
290 | struct irq_data *d = irq_desc_get_irq_data(desc); |
291 | |
292 | if (!irqd_affinity_is_managed(d)) |
293 | return irq_domain_activate_irq(irq_data: d, early: false); |
294 | return 0; |
295 | } |
296 | |
297 | int irq_activate_and_startup(struct irq_desc *desc, bool resend) |
298 | { |
299 | if (WARN_ON(irq_activate(desc))) |
300 | return 0; |
301 | return irq_startup(desc, resend, IRQ_START_FORCE); |
302 | } |
303 | |
304 | static void __irq_disable(struct irq_desc *desc, bool mask); |
305 | |
306 | void irq_shutdown(struct irq_desc *desc) |
307 | { |
308 | if (irqd_is_started(d: &desc->irq_data)) { |
309 | clear_irq_resend(desc); |
310 | desc->depth = 1; |
311 | if (desc->irq_data.chip->irq_shutdown) { |
312 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
313 | irq_state_set_disabled(desc); |
314 | irq_state_set_masked(desc); |
315 | } else { |
316 | __irq_disable(desc, mask: true); |
317 | } |
318 | irq_state_clr_started(desc); |
319 | } |
320 | } |
321 | |
322 | |
323 | void irq_shutdown_and_deactivate(struct irq_desc *desc) |
324 | { |
325 | irq_shutdown(desc); |
326 | /* |
327 | * This must be called even if the interrupt was never started up, |
328 | * because the activation can happen before the interrupt is |
329 | * available for request/startup. It has it's own state tracking so |
330 | * it's safe to call it unconditionally. |
331 | */ |
332 | irq_domain_deactivate_irq(irq_data: &desc->irq_data); |
333 | } |
334 | |
335 | void irq_enable(struct irq_desc *desc) |
336 | { |
337 | if (!irqd_irq_disabled(d: &desc->irq_data)) { |
338 | unmask_irq(desc); |
339 | } else { |
340 | irq_state_clr_disabled(desc); |
341 | if (desc->irq_data.chip->irq_enable) { |
342 | desc->irq_data.chip->irq_enable(&desc->irq_data); |
343 | irq_state_clr_masked(desc); |
344 | } else { |
345 | unmask_irq(desc); |
346 | } |
347 | } |
348 | } |
349 | |
350 | static void __irq_disable(struct irq_desc *desc, bool mask) |
351 | { |
352 | if (irqd_irq_disabled(d: &desc->irq_data)) { |
353 | if (mask) |
354 | mask_irq(desc); |
355 | } else { |
356 | irq_state_set_disabled(desc); |
357 | if (desc->irq_data.chip->irq_disable) { |
358 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
359 | irq_state_set_masked(desc); |
360 | } else if (mask) { |
361 | mask_irq(desc); |
362 | } |
363 | } |
364 | } |
365 | |
366 | /** |
367 | * irq_disable - Mark interrupt disabled |
368 | * @desc: irq descriptor which should be disabled |
369 | * |
370 | * If the chip does not implement the irq_disable callback, we |
371 | * use a lazy disable approach. That means we mark the interrupt |
372 | * disabled, but leave the hardware unmasked. That's an |
373 | * optimization because we avoid the hardware access for the |
374 | * common case where no interrupt happens after we marked it |
375 | * disabled. If an interrupt happens, then the interrupt flow |
376 | * handler masks the line at the hardware level and marks it |
377 | * pending. |
378 | * |
379 | * If the interrupt chip does not implement the irq_disable callback, |
380 | * a driver can disable the lazy approach for a particular irq line by |
381 | * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can |
382 | * be used for devices which cannot disable the interrupt at the |
383 | * device level under certain circumstances and have to use |
384 | * disable_irq[_nosync] instead. |
385 | */ |
386 | void irq_disable(struct irq_desc *desc) |
387 | { |
388 | __irq_disable(desc, mask: irq_settings_disable_unlazy(desc)); |
389 | } |
390 | |
391 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
392 | { |
393 | if (desc->irq_data.chip->irq_enable) |
394 | desc->irq_data.chip->irq_enable(&desc->irq_data); |
395 | else |
396 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
397 | cpumask_set_cpu(cpu, dstp: desc->percpu_enabled); |
398 | } |
399 | |
400 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) |
401 | { |
402 | if (desc->irq_data.chip->irq_disable) |
403 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
404 | else |
405 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
406 | cpumask_clear_cpu(cpu, dstp: desc->percpu_enabled); |
407 | } |
408 | |
409 | static inline void mask_ack_irq(struct irq_desc *desc) |
410 | { |
411 | if (desc->irq_data.chip->irq_mask_ack) { |
412 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
413 | irq_state_set_masked(desc); |
414 | } else { |
415 | mask_irq(desc); |
416 | if (desc->irq_data.chip->irq_ack) |
417 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
418 | } |
419 | } |
420 | |
421 | void mask_irq(struct irq_desc *desc) |
422 | { |
423 | if (irqd_irq_masked(d: &desc->irq_data)) |
424 | return; |
425 | |
426 | if (desc->irq_data.chip->irq_mask) { |
427 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
428 | irq_state_set_masked(desc); |
429 | } |
430 | } |
431 | |
432 | void unmask_irq(struct irq_desc *desc) |
433 | { |
434 | if (!irqd_irq_masked(d: &desc->irq_data)) |
435 | return; |
436 | |
437 | if (desc->irq_data.chip->irq_unmask) { |
438 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
439 | irq_state_clr_masked(desc); |
440 | } |
441 | } |
442 | |
443 | void unmask_threaded_irq(struct irq_desc *desc) |
444 | { |
445 | struct irq_chip *chip = desc->irq_data.chip; |
446 | |
447 | if (chip->flags & IRQCHIP_EOI_THREADED) |
448 | chip->irq_eoi(&desc->irq_data); |
449 | |
450 | unmask_irq(desc); |
451 | } |
452 | |
453 | /* |
454 | * handle_nested_irq - Handle a nested irq from a irq thread |
455 | * @irq: the interrupt number |
456 | * |
457 | * Handle interrupts which are nested into a threaded interrupt |
458 | * handler. The handler function is called inside the calling |
459 | * threads context. |
460 | */ |
461 | void handle_nested_irq(unsigned int irq) |
462 | { |
463 | struct irq_desc *desc = irq_to_desc(irq); |
464 | struct irqaction *action; |
465 | irqreturn_t action_ret; |
466 | |
467 | might_sleep(); |
468 | |
469 | raw_spin_lock_irq(&desc->lock); |
470 | |
471 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
472 | |
473 | action = desc->action; |
474 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
475 | desc->istate |= IRQS_PENDING; |
476 | raw_spin_unlock_irq(&desc->lock); |
477 | return; |
478 | } |
479 | |
480 | kstat_incr_irqs_this_cpu(desc); |
481 | atomic_inc(v: &desc->threads_active); |
482 | raw_spin_unlock_irq(&desc->lock); |
483 | |
484 | action_ret = IRQ_NONE; |
485 | for_each_action_of_desc(desc, action) |
486 | action_ret |= action->thread_fn(action->irq, action->dev_id); |
487 | |
488 | if (!irq_settings_no_debug(desc)) |
489 | note_interrupt(desc, action_ret); |
490 | |
491 | wake_threads_waitq(desc); |
492 | } |
493 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
494 | |
495 | static bool irq_check_poll(struct irq_desc *desc) |
496 | { |
497 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
498 | return false; |
499 | return irq_wait_for_poll(desc); |
500 | } |
501 | |
502 | static bool irq_may_run(struct irq_desc *desc) |
503 | { |
504 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; |
505 | |
506 | /* |
507 | * If the interrupt is not in progress and is not an armed |
508 | * wakeup interrupt, proceed. |
509 | */ |
510 | if (!irqd_has_set(d: &desc->irq_data, mask)) |
511 | return true; |
512 | |
513 | /* |
514 | * If the interrupt is an armed wakeup source, mark it pending |
515 | * and suspended, disable it and notify the pm core about the |
516 | * event. |
517 | */ |
518 | if (irq_pm_check_wakeup(desc)) |
519 | return false; |
520 | |
521 | /* |
522 | * Handle a potential concurrent poll on a different core. |
523 | */ |
524 | return irq_check_poll(desc); |
525 | } |
526 | |
527 | /** |
528 | * handle_simple_irq - Simple and software-decoded IRQs. |
529 | * @desc: the interrupt description structure for this irq |
530 | * |
531 | * Simple interrupts are either sent from a demultiplexing interrupt |
532 | * handler or come from hardware, where no interrupt hardware control |
533 | * is necessary. |
534 | * |
535 | * Note: The caller is expected to handle the ack, clear, mask and |
536 | * unmask issues if necessary. |
537 | */ |
538 | void handle_simple_irq(struct irq_desc *desc) |
539 | { |
540 | raw_spin_lock(&desc->lock); |
541 | |
542 | if (!irq_may_run(desc)) |
543 | goto out_unlock; |
544 | |
545 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
546 | |
547 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
548 | desc->istate |= IRQS_PENDING; |
549 | goto out_unlock; |
550 | } |
551 | |
552 | kstat_incr_irqs_this_cpu(desc); |
553 | handle_irq_event(desc); |
554 | |
555 | out_unlock: |
556 | raw_spin_unlock(&desc->lock); |
557 | } |
558 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
559 | |
560 | /** |
561 | * handle_untracked_irq - Simple and software-decoded IRQs. |
562 | * @desc: the interrupt description structure for this irq |
563 | * |
564 | * Untracked interrupts are sent from a demultiplexing interrupt |
565 | * handler when the demultiplexer does not know which device it its |
566 | * multiplexed irq domain generated the interrupt. IRQ's handled |
567 | * through here are not subjected to stats tracking, randomness, or |
568 | * spurious interrupt detection. |
569 | * |
570 | * Note: Like handle_simple_irq, the caller is expected to handle |
571 | * the ack, clear, mask and unmask issues if necessary. |
572 | */ |
573 | void handle_untracked_irq(struct irq_desc *desc) |
574 | { |
575 | raw_spin_lock(&desc->lock); |
576 | |
577 | if (!irq_may_run(desc)) |
578 | goto out_unlock; |
579 | |
580 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
581 | |
582 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
583 | desc->istate |= IRQS_PENDING; |
584 | goto out_unlock; |
585 | } |
586 | |
587 | desc->istate &= ~IRQS_PENDING; |
588 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_INPROGRESS); |
589 | raw_spin_unlock(&desc->lock); |
590 | |
591 | __handle_irq_event_percpu(desc); |
592 | |
593 | raw_spin_lock(&desc->lock); |
594 | irqd_clear(d: &desc->irq_data, mask: IRQD_IRQ_INPROGRESS); |
595 | |
596 | out_unlock: |
597 | raw_spin_unlock(&desc->lock); |
598 | } |
599 | EXPORT_SYMBOL_GPL(handle_untracked_irq); |
600 | |
601 | /* |
602 | * Called unconditionally from handle_level_irq() and only for oneshot |
603 | * interrupts from handle_fasteoi_irq() |
604 | */ |
605 | static void cond_unmask_irq(struct irq_desc *desc) |
606 | { |
607 | /* |
608 | * We need to unmask in the following cases: |
609 | * - Standard level irq (IRQF_ONESHOT is not set) |
610 | * - Oneshot irq which did not wake the thread (caused by a |
611 | * spurious interrupt or a primary handler handling it |
612 | * completely). |
613 | */ |
614 | if (!irqd_irq_disabled(d: &desc->irq_data) && |
615 | irqd_irq_masked(d: &desc->irq_data) && !desc->threads_oneshot) |
616 | unmask_irq(desc); |
617 | } |
618 | |
619 | /** |
620 | * handle_level_irq - Level type irq handler |
621 | * @desc: the interrupt description structure for this irq |
622 | * |
623 | * Level type interrupts are active as long as the hardware line has |
624 | * the active level. This may require to mask the interrupt and unmask |
625 | * it after the associated handler has acknowledged the device, so the |
626 | * interrupt line is back to inactive. |
627 | */ |
628 | void handle_level_irq(struct irq_desc *desc) |
629 | { |
630 | raw_spin_lock(&desc->lock); |
631 | mask_ack_irq(desc); |
632 | |
633 | if (!irq_may_run(desc)) |
634 | goto out_unlock; |
635 | |
636 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
637 | |
638 | /* |
639 | * If its disabled or no action available |
640 | * keep it masked and get out of here |
641 | */ |
642 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
643 | desc->istate |= IRQS_PENDING; |
644 | goto out_unlock; |
645 | } |
646 | |
647 | kstat_incr_irqs_this_cpu(desc); |
648 | handle_irq_event(desc); |
649 | |
650 | cond_unmask_irq(desc); |
651 | |
652 | out_unlock: |
653 | raw_spin_unlock(&desc->lock); |
654 | } |
655 | EXPORT_SYMBOL_GPL(handle_level_irq); |
656 | |
657 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
658 | { |
659 | if (!(desc->istate & IRQS_ONESHOT)) { |
660 | chip->irq_eoi(&desc->irq_data); |
661 | return; |
662 | } |
663 | /* |
664 | * We need to unmask in the following cases: |
665 | * - Oneshot irq which did not wake the thread (caused by a |
666 | * spurious interrupt or a primary handler handling it |
667 | * completely). |
668 | */ |
669 | if (!irqd_irq_disabled(d: &desc->irq_data) && |
670 | irqd_irq_masked(d: &desc->irq_data) && !desc->threads_oneshot) { |
671 | chip->irq_eoi(&desc->irq_data); |
672 | unmask_irq(desc); |
673 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { |
674 | chip->irq_eoi(&desc->irq_data); |
675 | } |
676 | } |
677 | |
678 | /** |
679 | * handle_fasteoi_irq - irq handler for transparent controllers |
680 | * @desc: the interrupt description structure for this irq |
681 | * |
682 | * Only a single callback will be issued to the chip: an ->eoi() |
683 | * call when the interrupt has been serviced. This enables support |
684 | * for modern forms of interrupt handlers, which handle the flow |
685 | * details in hardware, transparently. |
686 | */ |
687 | void handle_fasteoi_irq(struct irq_desc *desc) |
688 | { |
689 | struct irq_chip *chip = desc->irq_data.chip; |
690 | |
691 | raw_spin_lock(&desc->lock); |
692 | |
693 | /* |
694 | * When an affinity change races with IRQ handling, the next interrupt |
695 | * can arrive on the new CPU before the original CPU has completed |
696 | * handling the previous one - it may need to be resent. |
697 | */ |
698 | if (!irq_may_run(desc)) { |
699 | if (irqd_needs_resend_when_in_progress(d: &desc->irq_data)) |
700 | desc->istate |= IRQS_PENDING; |
701 | goto out; |
702 | } |
703 | |
704 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
705 | |
706 | /* |
707 | * If its disabled or no action available |
708 | * then mask it and get out of here: |
709 | */ |
710 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
711 | desc->istate |= IRQS_PENDING; |
712 | mask_irq(desc); |
713 | goto out; |
714 | } |
715 | |
716 | kstat_incr_irqs_this_cpu(desc); |
717 | if (desc->istate & IRQS_ONESHOT) |
718 | mask_irq(desc); |
719 | |
720 | handle_irq_event(desc); |
721 | |
722 | cond_unmask_eoi_irq(desc, chip); |
723 | |
724 | /* |
725 | * When the race described above happens this will resend the interrupt. |
726 | */ |
727 | if (unlikely(desc->istate & IRQS_PENDING)) |
728 | check_irq_resend(desc, inject: false); |
729 | |
730 | raw_spin_unlock(&desc->lock); |
731 | return; |
732 | out: |
733 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
734 | chip->irq_eoi(&desc->irq_data); |
735 | raw_spin_unlock(&desc->lock); |
736 | } |
737 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
738 | |
739 | /** |
740 | * handle_fasteoi_nmi - irq handler for NMI interrupt lines |
741 | * @desc: the interrupt description structure for this irq |
742 | * |
743 | * A simple NMI-safe handler, considering the restrictions |
744 | * from request_nmi. |
745 | * |
746 | * Only a single callback will be issued to the chip: an ->eoi() |
747 | * call when the interrupt has been serviced. This enables support |
748 | * for modern forms of interrupt handlers, which handle the flow |
749 | * details in hardware, transparently. |
750 | */ |
751 | void handle_fasteoi_nmi(struct irq_desc *desc) |
752 | { |
753 | struct irq_chip *chip = irq_desc_get_chip(desc); |
754 | struct irqaction *action = desc->action; |
755 | unsigned int irq = irq_desc_get_irq(desc); |
756 | irqreturn_t res; |
757 | |
758 | __kstat_incr_irqs_this_cpu(desc); |
759 | |
760 | trace_irq_handler_entry(irq, action); |
761 | /* |
762 | * NMIs cannot be shared, there is only one action. |
763 | */ |
764 | res = action->handler(irq, action->dev_id); |
765 | trace_irq_handler_exit(irq, action, ret: res); |
766 | |
767 | if (chip->irq_eoi) |
768 | chip->irq_eoi(&desc->irq_data); |
769 | } |
770 | EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); |
771 | |
772 | /** |
773 | * handle_edge_irq - edge type IRQ handler |
774 | * @desc: the interrupt description structure for this irq |
775 | * |
776 | * Interrupt occurs on the falling and/or rising edge of a hardware |
777 | * signal. The occurrence is latched into the irq controller hardware |
778 | * and must be acked in order to be reenabled. After the ack another |
779 | * interrupt can happen on the same source even before the first one |
780 | * is handled by the associated event handler. If this happens it |
781 | * might be necessary to disable (mask) the interrupt depending on the |
782 | * controller hardware. This requires to reenable the interrupt inside |
783 | * of the loop which handles the interrupts which have arrived while |
784 | * the handler was running. If all pending interrupts are handled, the |
785 | * loop is left. |
786 | */ |
787 | void handle_edge_irq(struct irq_desc *desc) |
788 | { |
789 | raw_spin_lock(&desc->lock); |
790 | |
791 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
792 | |
793 | if (!irq_may_run(desc)) { |
794 | desc->istate |= IRQS_PENDING; |
795 | mask_ack_irq(desc); |
796 | goto out_unlock; |
797 | } |
798 | |
799 | /* |
800 | * If its disabled or no action available then mask it and get |
801 | * out of here. |
802 | */ |
803 | if (irqd_irq_disabled(d: &desc->irq_data) || !desc->action) { |
804 | desc->istate |= IRQS_PENDING; |
805 | mask_ack_irq(desc); |
806 | goto out_unlock; |
807 | } |
808 | |
809 | kstat_incr_irqs_this_cpu(desc); |
810 | |
811 | /* Start handling the irq */ |
812 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
813 | |
814 | do { |
815 | if (unlikely(!desc->action)) { |
816 | mask_irq(desc); |
817 | goto out_unlock; |
818 | } |
819 | |
820 | /* |
821 | * When another irq arrived while we were handling |
822 | * one, we could have masked the irq. |
823 | * Reenable it, if it was not disabled in meantime. |
824 | */ |
825 | if (unlikely(desc->istate & IRQS_PENDING)) { |
826 | if (!irqd_irq_disabled(d: &desc->irq_data) && |
827 | irqd_irq_masked(d: &desc->irq_data)) |
828 | unmask_irq(desc); |
829 | } |
830 | |
831 | handle_irq_event(desc); |
832 | |
833 | } while ((desc->istate & IRQS_PENDING) && |
834 | !irqd_irq_disabled(d: &desc->irq_data)); |
835 | |
836 | out_unlock: |
837 | raw_spin_unlock(&desc->lock); |
838 | } |
839 | EXPORT_SYMBOL(handle_edge_irq); |
840 | |
841 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
842 | /** |
843 | * handle_edge_eoi_irq - edge eoi type IRQ handler |
844 | * @desc: the interrupt description structure for this irq |
845 | * |
846 | * Similar as the above handle_edge_irq, but using eoi and w/o the |
847 | * mask/unmask logic. |
848 | */ |
849 | void handle_edge_eoi_irq(struct irq_desc *desc) |
850 | { |
851 | struct irq_chip *chip = irq_desc_get_chip(desc); |
852 | |
853 | raw_spin_lock(&desc->lock); |
854 | |
855 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
856 | |
857 | if (!irq_may_run(desc)) { |
858 | desc->istate |= IRQS_PENDING; |
859 | goto out_eoi; |
860 | } |
861 | |
862 | /* |
863 | * If its disabled or no action available then mask it and get |
864 | * out of here. |
865 | */ |
866 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
867 | desc->istate |= IRQS_PENDING; |
868 | goto out_eoi; |
869 | } |
870 | |
871 | kstat_incr_irqs_this_cpu(desc); |
872 | |
873 | do { |
874 | if (unlikely(!desc->action)) |
875 | goto out_eoi; |
876 | |
877 | handle_irq_event(desc); |
878 | |
879 | } while ((desc->istate & IRQS_PENDING) && |
880 | !irqd_irq_disabled(&desc->irq_data)); |
881 | |
882 | out_eoi: |
883 | chip->irq_eoi(&desc->irq_data); |
884 | raw_spin_unlock(&desc->lock); |
885 | } |
886 | #endif |
887 | |
888 | /** |
889 | * handle_percpu_irq - Per CPU local irq handler |
890 | * @desc: the interrupt description structure for this irq |
891 | * |
892 | * Per CPU interrupts on SMP machines without locking requirements |
893 | */ |
894 | void handle_percpu_irq(struct irq_desc *desc) |
895 | { |
896 | struct irq_chip *chip = irq_desc_get_chip(desc); |
897 | |
898 | /* |
899 | * PER CPU interrupts are not serialized. Do not touch |
900 | * desc->tot_count. |
901 | */ |
902 | __kstat_incr_irqs_this_cpu(desc); |
903 | |
904 | if (chip->irq_ack) |
905 | chip->irq_ack(&desc->irq_data); |
906 | |
907 | handle_irq_event_percpu(desc); |
908 | |
909 | if (chip->irq_eoi) |
910 | chip->irq_eoi(&desc->irq_data); |
911 | } |
912 | |
913 | /** |
914 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids |
915 | * @desc: the interrupt description structure for this irq |
916 | * |
917 | * Per CPU interrupts on SMP machines without locking requirements. Same as |
918 | * handle_percpu_irq() above but with the following extras: |
919 | * |
920 | * action->percpu_dev_id is a pointer to percpu variables which |
921 | * contain the real device id for the cpu on which this handler is |
922 | * called |
923 | */ |
924 | void handle_percpu_devid_irq(struct irq_desc *desc) |
925 | { |
926 | struct irq_chip *chip = irq_desc_get_chip(desc); |
927 | struct irqaction *action = desc->action; |
928 | unsigned int irq = irq_desc_get_irq(desc); |
929 | irqreturn_t res; |
930 | |
931 | /* |
932 | * PER CPU interrupts are not serialized. Do not touch |
933 | * desc->tot_count. |
934 | */ |
935 | __kstat_incr_irqs_this_cpu(desc); |
936 | |
937 | if (chip->irq_ack) |
938 | chip->irq_ack(&desc->irq_data); |
939 | |
940 | if (likely(action)) { |
941 | trace_irq_handler_entry(irq, action); |
942 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); |
943 | trace_irq_handler_exit(irq, action, ret: res); |
944 | } else { |
945 | unsigned int cpu = smp_processor_id(); |
946 | bool enabled = cpumask_test_cpu(cpu, cpumask: desc->percpu_enabled); |
947 | |
948 | if (enabled) |
949 | irq_percpu_disable(desc, cpu); |
950 | |
951 | pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n" , |
952 | enabled ? " and unmasked" : "" , irq, cpu); |
953 | } |
954 | |
955 | if (chip->irq_eoi) |
956 | chip->irq_eoi(&desc->irq_data); |
957 | } |
958 | |
959 | /** |
960 | * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu |
961 | * dev ids |
962 | * @desc: the interrupt description structure for this irq |
963 | * |
964 | * Similar to handle_fasteoi_nmi, but handling the dev_id cookie |
965 | * as a percpu pointer. |
966 | */ |
967 | void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) |
968 | { |
969 | struct irq_chip *chip = irq_desc_get_chip(desc); |
970 | struct irqaction *action = desc->action; |
971 | unsigned int irq = irq_desc_get_irq(desc); |
972 | irqreturn_t res; |
973 | |
974 | __kstat_incr_irqs_this_cpu(desc); |
975 | |
976 | trace_irq_handler_entry(irq, action); |
977 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); |
978 | trace_irq_handler_exit(irq, action, ret: res); |
979 | |
980 | if (chip->irq_eoi) |
981 | chip->irq_eoi(&desc->irq_data); |
982 | } |
983 | |
984 | static void |
985 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
986 | int is_chained, const char *name) |
987 | { |
988 | if (!handle) { |
989 | handle = handle_bad_irq; |
990 | } else { |
991 | struct irq_data *irq_data = &desc->irq_data; |
992 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
993 | /* |
994 | * With hierarchical domains we might run into a |
995 | * situation where the outermost chip is not yet set |
996 | * up, but the inner chips are there. Instead of |
997 | * bailing we install the handler, but obviously we |
998 | * cannot enable/startup the interrupt at this point. |
999 | */ |
1000 | while (irq_data) { |
1001 | if (irq_data->chip != &no_irq_chip) |
1002 | break; |
1003 | /* |
1004 | * Bail out if the outer chip is not set up |
1005 | * and the interrupt supposed to be started |
1006 | * right away. |
1007 | */ |
1008 | if (WARN_ON(is_chained)) |
1009 | return; |
1010 | /* Try the parent */ |
1011 | irq_data = irq_data->parent_data; |
1012 | } |
1013 | #endif |
1014 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) |
1015 | return; |
1016 | } |
1017 | |
1018 | /* Uninstall? */ |
1019 | if (handle == handle_bad_irq) { |
1020 | if (desc->irq_data.chip != &no_irq_chip) |
1021 | mask_ack_irq(desc); |
1022 | irq_state_set_disabled(desc); |
1023 | if (is_chained) { |
1024 | desc->action = NULL; |
1025 | WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); |
1026 | } |
1027 | desc->depth = 1; |
1028 | } |
1029 | desc->handle_irq = handle; |
1030 | desc->name = name; |
1031 | |
1032 | if (handle != handle_bad_irq && is_chained) { |
1033 | unsigned int type = irqd_get_trigger_type(d: &desc->irq_data); |
1034 | |
1035 | /* |
1036 | * We're about to start this interrupt immediately, |
1037 | * hence the need to set the trigger configuration. |
1038 | * But the .set_type callback may have overridden the |
1039 | * flow handler, ignoring that we're dealing with a |
1040 | * chained interrupt. Reset it immediately because we |
1041 | * do know better. |
1042 | */ |
1043 | if (type != IRQ_TYPE_NONE) { |
1044 | __irq_set_trigger(desc, flags: type); |
1045 | desc->handle_irq = handle; |
1046 | } |
1047 | |
1048 | irq_settings_set_noprobe(desc); |
1049 | irq_settings_set_norequest(desc); |
1050 | irq_settings_set_nothread(desc); |
1051 | desc->action = &chained_action; |
1052 | WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); |
1053 | irq_activate_and_startup(desc, IRQ_RESEND); |
1054 | } |
1055 | } |
1056 | |
1057 | void |
1058 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
1059 | const char *name) |
1060 | { |
1061 | unsigned long flags; |
1062 | struct irq_desc *desc = irq_get_desc_buslock(irq, flags: &flags, check: 0); |
1063 | |
1064 | if (!desc) |
1065 | return; |
1066 | |
1067 | __irq_do_set_handler(desc, handle, is_chained, name); |
1068 | irq_put_desc_busunlock(desc, flags); |
1069 | } |
1070 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
1071 | |
1072 | void |
1073 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, |
1074 | void *data) |
1075 | { |
1076 | unsigned long flags; |
1077 | struct irq_desc *desc = irq_get_desc_buslock(irq, flags: &flags, check: 0); |
1078 | |
1079 | if (!desc) |
1080 | return; |
1081 | |
1082 | desc->irq_common_data.handler_data = data; |
1083 | __irq_do_set_handler(desc, handle, is_chained: 1, NULL); |
1084 | |
1085 | irq_put_desc_busunlock(desc, flags); |
1086 | } |
1087 | EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); |
1088 | |
1089 | void |
1090 | irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, |
1091 | irq_flow_handler_t handle, const char *name) |
1092 | { |
1093 | irq_set_chip(irq, chip); |
1094 | __irq_set_handler(irq, handle, 0, name); |
1095 | } |
1096 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
1097 | |
1098 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
1099 | { |
1100 | unsigned long flags, trigger, tmp; |
1101 | struct irq_desc *desc = irq_get_desc_lock(irq, flags: &flags, check: 0); |
1102 | |
1103 | if (!desc) |
1104 | return; |
1105 | |
1106 | /* |
1107 | * Warn when a driver sets the no autoenable flag on an already |
1108 | * active interrupt. |
1109 | */ |
1110 | WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); |
1111 | |
1112 | irq_settings_clr_and_set(desc, clr, set); |
1113 | |
1114 | trigger = irqd_get_trigger_type(d: &desc->irq_data); |
1115 | |
1116 | irqd_clear(d: &desc->irq_data, mask: IRQD_NO_BALANCING | IRQD_PER_CPU | |
1117 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
1118 | if (irq_settings_has_no_balance_set(desc)) |
1119 | irqd_set(d: &desc->irq_data, mask: IRQD_NO_BALANCING); |
1120 | if (irq_settings_is_per_cpu(desc)) |
1121 | irqd_set(d: &desc->irq_data, mask: IRQD_PER_CPU); |
1122 | if (irq_settings_can_move_pcntxt(desc)) |
1123 | irqd_set(d: &desc->irq_data, mask: IRQD_MOVE_PCNTXT); |
1124 | if (irq_settings_is_level(desc)) |
1125 | irqd_set(d: &desc->irq_data, mask: IRQD_LEVEL); |
1126 | |
1127 | tmp = irq_settings_get_trigger_mask(desc); |
1128 | if (tmp != IRQ_TYPE_NONE) |
1129 | trigger = tmp; |
1130 | |
1131 | irqd_set(d: &desc->irq_data, mask: trigger); |
1132 | |
1133 | irq_put_desc_unlock(desc, flags); |
1134 | } |
1135 | EXPORT_SYMBOL_GPL(irq_modify_status); |
1136 | |
1137 | #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE |
1138 | /** |
1139 | * irq_cpu_online - Invoke all irq_cpu_online functions. |
1140 | * |
1141 | * Iterate through all irqs and invoke the chip.irq_cpu_online() |
1142 | * for each. |
1143 | */ |
1144 | void irq_cpu_online(void) |
1145 | { |
1146 | struct irq_desc *desc; |
1147 | struct irq_chip *chip; |
1148 | unsigned long flags; |
1149 | unsigned int irq; |
1150 | |
1151 | for_each_active_irq(irq) { |
1152 | desc = irq_to_desc(irq); |
1153 | if (!desc) |
1154 | continue; |
1155 | |
1156 | raw_spin_lock_irqsave(&desc->lock, flags); |
1157 | |
1158 | chip = irq_data_get_irq_chip(&desc->irq_data); |
1159 | if (chip && chip->irq_cpu_online && |
1160 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
1161 | !irqd_irq_disabled(&desc->irq_data))) |
1162 | chip->irq_cpu_online(&desc->irq_data); |
1163 | |
1164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1165 | } |
1166 | } |
1167 | |
1168 | /** |
1169 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. |
1170 | * |
1171 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() |
1172 | * for each. |
1173 | */ |
1174 | void irq_cpu_offline(void) |
1175 | { |
1176 | struct irq_desc *desc; |
1177 | struct irq_chip *chip; |
1178 | unsigned long flags; |
1179 | unsigned int irq; |
1180 | |
1181 | for_each_active_irq(irq) { |
1182 | desc = irq_to_desc(irq); |
1183 | if (!desc) |
1184 | continue; |
1185 | |
1186 | raw_spin_lock_irqsave(&desc->lock, flags); |
1187 | |
1188 | chip = irq_data_get_irq_chip(&desc->irq_data); |
1189 | if (chip && chip->irq_cpu_offline && |
1190 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
1191 | !irqd_irq_disabled(&desc->irq_data))) |
1192 | chip->irq_cpu_offline(&desc->irq_data); |
1193 | |
1194 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1195 | } |
1196 | } |
1197 | #endif |
1198 | |
1199 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
1200 | |
1201 | #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS |
1202 | /** |
1203 | * handle_fasteoi_ack_irq - irq handler for edge hierarchy |
1204 | * stacked on transparent controllers |
1205 | * |
1206 | * @desc: the interrupt description structure for this irq |
1207 | * |
1208 | * Like handle_fasteoi_irq(), but for use with hierarchy where |
1209 | * the irq_chip also needs to have its ->irq_ack() function |
1210 | * called. |
1211 | */ |
1212 | void handle_fasteoi_ack_irq(struct irq_desc *desc) |
1213 | { |
1214 | struct irq_chip *chip = desc->irq_data.chip; |
1215 | |
1216 | raw_spin_lock(&desc->lock); |
1217 | |
1218 | if (!irq_may_run(desc)) |
1219 | goto out; |
1220 | |
1221 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
1222 | |
1223 | /* |
1224 | * If its disabled or no action available |
1225 | * then mask it and get out of here: |
1226 | */ |
1227 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
1228 | desc->istate |= IRQS_PENDING; |
1229 | mask_irq(desc); |
1230 | goto out; |
1231 | } |
1232 | |
1233 | kstat_incr_irqs_this_cpu(desc); |
1234 | if (desc->istate & IRQS_ONESHOT) |
1235 | mask_irq(desc); |
1236 | |
1237 | /* Start handling the irq */ |
1238 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
1239 | |
1240 | handle_irq_event(desc); |
1241 | |
1242 | cond_unmask_eoi_irq(desc, chip); |
1243 | |
1244 | raw_spin_unlock(&desc->lock); |
1245 | return; |
1246 | out: |
1247 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
1248 | chip->irq_eoi(&desc->irq_data); |
1249 | raw_spin_unlock(&desc->lock); |
1250 | } |
1251 | EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); |
1252 | |
1253 | /** |
1254 | * handle_fasteoi_mask_irq - irq handler for level hierarchy |
1255 | * stacked on transparent controllers |
1256 | * |
1257 | * @desc: the interrupt description structure for this irq |
1258 | * |
1259 | * Like handle_fasteoi_irq(), but for use with hierarchy where |
1260 | * the irq_chip also needs to have its ->irq_mask_ack() function |
1261 | * called. |
1262 | */ |
1263 | void handle_fasteoi_mask_irq(struct irq_desc *desc) |
1264 | { |
1265 | struct irq_chip *chip = desc->irq_data.chip; |
1266 | |
1267 | raw_spin_lock(&desc->lock); |
1268 | mask_ack_irq(desc); |
1269 | |
1270 | if (!irq_may_run(desc)) |
1271 | goto out; |
1272 | |
1273 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
1274 | |
1275 | /* |
1276 | * If its disabled or no action available |
1277 | * then mask it and get out of here: |
1278 | */ |
1279 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
1280 | desc->istate |= IRQS_PENDING; |
1281 | mask_irq(desc); |
1282 | goto out; |
1283 | } |
1284 | |
1285 | kstat_incr_irqs_this_cpu(desc); |
1286 | if (desc->istate & IRQS_ONESHOT) |
1287 | mask_irq(desc); |
1288 | |
1289 | handle_irq_event(desc); |
1290 | |
1291 | cond_unmask_eoi_irq(desc, chip); |
1292 | |
1293 | raw_spin_unlock(&desc->lock); |
1294 | return; |
1295 | out: |
1296 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
1297 | chip->irq_eoi(&desc->irq_data); |
1298 | raw_spin_unlock(&desc->lock); |
1299 | } |
1300 | EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); |
1301 | |
1302 | #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ |
1303 | |
1304 | /** |
1305 | * irq_chip_set_parent_state - set the state of a parent interrupt. |
1306 | * |
1307 | * @data: Pointer to interrupt specific data |
1308 | * @which: State to be restored (one of IRQCHIP_STATE_*) |
1309 | * @val: Value corresponding to @which |
1310 | * |
1311 | * Conditional success, if the underlying irqchip does not implement it. |
1312 | */ |
1313 | int irq_chip_set_parent_state(struct irq_data *data, |
1314 | enum irqchip_irq_state which, |
1315 | bool val) |
1316 | { |
1317 | data = data->parent_data; |
1318 | |
1319 | if (!data || !data->chip->irq_set_irqchip_state) |
1320 | return 0; |
1321 | |
1322 | return data->chip->irq_set_irqchip_state(data, which, val); |
1323 | } |
1324 | EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); |
1325 | |
1326 | /** |
1327 | * irq_chip_get_parent_state - get the state of a parent interrupt. |
1328 | * |
1329 | * @data: Pointer to interrupt specific data |
1330 | * @which: one of IRQCHIP_STATE_* the caller wants to know |
1331 | * @state: a pointer to a boolean where the state is to be stored |
1332 | * |
1333 | * Conditional success, if the underlying irqchip does not implement it. |
1334 | */ |
1335 | int irq_chip_get_parent_state(struct irq_data *data, |
1336 | enum irqchip_irq_state which, |
1337 | bool *state) |
1338 | { |
1339 | data = data->parent_data; |
1340 | |
1341 | if (!data || !data->chip->irq_get_irqchip_state) |
1342 | return 0; |
1343 | |
1344 | return data->chip->irq_get_irqchip_state(data, which, state); |
1345 | } |
1346 | EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); |
1347 | |
1348 | /** |
1349 | * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if |
1350 | * NULL) |
1351 | * @data: Pointer to interrupt specific data |
1352 | */ |
1353 | void irq_chip_enable_parent(struct irq_data *data) |
1354 | { |
1355 | data = data->parent_data; |
1356 | if (data->chip->irq_enable) |
1357 | data->chip->irq_enable(data); |
1358 | else |
1359 | data->chip->irq_unmask(data); |
1360 | } |
1361 | EXPORT_SYMBOL_GPL(irq_chip_enable_parent); |
1362 | |
1363 | /** |
1364 | * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if |
1365 | * NULL) |
1366 | * @data: Pointer to interrupt specific data |
1367 | */ |
1368 | void irq_chip_disable_parent(struct irq_data *data) |
1369 | { |
1370 | data = data->parent_data; |
1371 | if (data->chip->irq_disable) |
1372 | data->chip->irq_disable(data); |
1373 | else |
1374 | data->chip->irq_mask(data); |
1375 | } |
1376 | EXPORT_SYMBOL_GPL(irq_chip_disable_parent); |
1377 | |
1378 | /** |
1379 | * irq_chip_ack_parent - Acknowledge the parent interrupt |
1380 | * @data: Pointer to interrupt specific data |
1381 | */ |
1382 | void irq_chip_ack_parent(struct irq_data *data) |
1383 | { |
1384 | data = data->parent_data; |
1385 | data->chip->irq_ack(data); |
1386 | } |
1387 | EXPORT_SYMBOL_GPL(irq_chip_ack_parent); |
1388 | |
1389 | /** |
1390 | * irq_chip_mask_parent - Mask the parent interrupt |
1391 | * @data: Pointer to interrupt specific data |
1392 | */ |
1393 | void irq_chip_mask_parent(struct irq_data *data) |
1394 | { |
1395 | data = data->parent_data; |
1396 | data->chip->irq_mask(data); |
1397 | } |
1398 | EXPORT_SYMBOL_GPL(irq_chip_mask_parent); |
1399 | |
1400 | /** |
1401 | * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt |
1402 | * @data: Pointer to interrupt specific data |
1403 | */ |
1404 | void irq_chip_mask_ack_parent(struct irq_data *data) |
1405 | { |
1406 | data = data->parent_data; |
1407 | data->chip->irq_mask_ack(data); |
1408 | } |
1409 | EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); |
1410 | |
1411 | /** |
1412 | * irq_chip_unmask_parent - Unmask the parent interrupt |
1413 | * @data: Pointer to interrupt specific data |
1414 | */ |
1415 | void irq_chip_unmask_parent(struct irq_data *data) |
1416 | { |
1417 | data = data->parent_data; |
1418 | data->chip->irq_unmask(data); |
1419 | } |
1420 | EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); |
1421 | |
1422 | /** |
1423 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt |
1424 | * @data: Pointer to interrupt specific data |
1425 | */ |
1426 | void irq_chip_eoi_parent(struct irq_data *data) |
1427 | { |
1428 | data = data->parent_data; |
1429 | data->chip->irq_eoi(data); |
1430 | } |
1431 | EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); |
1432 | |
1433 | /** |
1434 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt |
1435 | * @data: Pointer to interrupt specific data |
1436 | * @dest: The affinity mask to set |
1437 | * @force: Flag to enforce setting (disable online checks) |
1438 | * |
1439 | * Conditional, as the underlying parent chip might not implement it. |
1440 | */ |
1441 | int irq_chip_set_affinity_parent(struct irq_data *data, |
1442 | const struct cpumask *dest, bool force) |
1443 | { |
1444 | data = data->parent_data; |
1445 | if (data->chip->irq_set_affinity) |
1446 | return data->chip->irq_set_affinity(data, dest, force); |
1447 | |
1448 | return -ENOSYS; |
1449 | } |
1450 | EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); |
1451 | |
1452 | /** |
1453 | * irq_chip_set_type_parent - Set IRQ type on the parent interrupt |
1454 | * @data: Pointer to interrupt specific data |
1455 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
1456 | * |
1457 | * Conditional, as the underlying parent chip might not implement it. |
1458 | */ |
1459 | int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) |
1460 | { |
1461 | data = data->parent_data; |
1462 | |
1463 | if (data->chip->irq_set_type) |
1464 | return data->chip->irq_set_type(data, type); |
1465 | |
1466 | return -ENOSYS; |
1467 | } |
1468 | EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); |
1469 | |
1470 | /** |
1471 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware |
1472 | * @data: Pointer to interrupt specific data |
1473 | * |
1474 | * Iterate through the domain hierarchy of the interrupt and check |
1475 | * whether a hw retrigger function exists. If yes, invoke it. |
1476 | */ |
1477 | int irq_chip_retrigger_hierarchy(struct irq_data *data) |
1478 | { |
1479 | for (data = data->parent_data; data; data = data->parent_data) |
1480 | if (data->chip && data->chip->irq_retrigger) |
1481 | return data->chip->irq_retrigger(data); |
1482 | |
1483 | return 0; |
1484 | } |
1485 | EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); |
1486 | |
1487 | /** |
1488 | * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt |
1489 | * @data: Pointer to interrupt specific data |
1490 | * @vcpu_info: The vcpu affinity information |
1491 | */ |
1492 | int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) |
1493 | { |
1494 | data = data->parent_data; |
1495 | if (data->chip->irq_set_vcpu_affinity) |
1496 | return data->chip->irq_set_vcpu_affinity(data, vcpu_info); |
1497 | |
1498 | return -ENOSYS; |
1499 | } |
1500 | EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); |
1501 | /** |
1502 | * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt |
1503 | * @data: Pointer to interrupt specific data |
1504 | * @on: Whether to set or reset the wake-up capability of this irq |
1505 | * |
1506 | * Conditional, as the underlying parent chip might not implement it. |
1507 | */ |
1508 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) |
1509 | { |
1510 | data = data->parent_data; |
1511 | |
1512 | if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) |
1513 | return 0; |
1514 | |
1515 | if (data->chip->irq_set_wake) |
1516 | return data->chip->irq_set_wake(data, on); |
1517 | |
1518 | return -ENOSYS; |
1519 | } |
1520 | EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); |
1521 | |
1522 | /** |
1523 | * irq_chip_request_resources_parent - Request resources on the parent interrupt |
1524 | * @data: Pointer to interrupt specific data |
1525 | */ |
1526 | int irq_chip_request_resources_parent(struct irq_data *data) |
1527 | { |
1528 | data = data->parent_data; |
1529 | |
1530 | if (data->chip->irq_request_resources) |
1531 | return data->chip->irq_request_resources(data); |
1532 | |
1533 | /* no error on missing optional irq_chip::irq_request_resources */ |
1534 | return 0; |
1535 | } |
1536 | EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); |
1537 | |
1538 | /** |
1539 | * irq_chip_release_resources_parent - Release resources on the parent interrupt |
1540 | * @data: Pointer to interrupt specific data |
1541 | */ |
1542 | void irq_chip_release_resources_parent(struct irq_data *data) |
1543 | { |
1544 | data = data->parent_data; |
1545 | if (data->chip->irq_release_resources) |
1546 | data->chip->irq_release_resources(data); |
1547 | } |
1548 | EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); |
1549 | #endif |
1550 | |
1551 | /** |
1552 | * irq_chip_compose_msi_msg - Compose msi message for a irq chip |
1553 | * @data: Pointer to interrupt specific data |
1554 | * @msg: Pointer to the MSI message |
1555 | * |
1556 | * For hierarchical domains we find the first chip in the hierarchy |
1557 | * which implements the irq_compose_msi_msg callback. For non |
1558 | * hierarchical we use the top level chip. |
1559 | */ |
1560 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
1561 | { |
1562 | struct irq_data *pos; |
1563 | |
1564 | for (pos = NULL; !pos && data; data = irqd_get_parent_data(irqd: data)) { |
1565 | if (data->chip && data->chip->irq_compose_msi_msg) |
1566 | pos = data; |
1567 | } |
1568 | |
1569 | if (!pos) |
1570 | return -ENOSYS; |
1571 | |
1572 | pos->chip->irq_compose_msi_msg(pos, msg); |
1573 | return 0; |
1574 | } |
1575 | |
1576 | static struct device *irq_get_pm_device(struct irq_data *data) |
1577 | { |
1578 | if (data->domain) |
1579 | return data->domain->pm_dev; |
1580 | |
1581 | return NULL; |
1582 | } |
1583 | |
1584 | /** |
1585 | * irq_chip_pm_get - Enable power for an IRQ chip |
1586 | * @data: Pointer to interrupt specific data |
1587 | * |
1588 | * Enable the power to the IRQ chip referenced by the interrupt data |
1589 | * structure. |
1590 | */ |
1591 | int irq_chip_pm_get(struct irq_data *data) |
1592 | { |
1593 | struct device *dev = irq_get_pm_device(data); |
1594 | int retval = 0; |
1595 | |
1596 | if (IS_ENABLED(CONFIG_PM) && dev) |
1597 | retval = pm_runtime_resume_and_get(dev); |
1598 | |
1599 | return retval; |
1600 | } |
1601 | |
1602 | /** |
1603 | * irq_chip_pm_put - Disable power for an IRQ chip |
1604 | * @data: Pointer to interrupt specific data |
1605 | * |
1606 | * Disable the power to the IRQ chip referenced by the interrupt data |
1607 | * structure, belongs. Note that power will only be disabled, once this |
1608 | * function has been called for all IRQs that have called irq_chip_pm_get(). |
1609 | */ |
1610 | int irq_chip_pm_put(struct irq_data *data) |
1611 | { |
1612 | struct device *dev = irq_get_pm_device(data); |
1613 | int retval = 0; |
1614 | |
1615 | if (IS_ENABLED(CONFIG_PM) && dev) |
1616 | retval = pm_runtime_put(dev); |
1617 | |
1618 | return (retval < 0) ? retval : 0; |
1619 | } |
1620 | |