1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/kernel/softirq.c |
4 | * |
5 | * Copyright (C) 1992 Linus Torvalds |
6 | * |
7 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/export.h> |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/init.h> |
16 | #include <linux/local_lock.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/notifier.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/freezer.h> |
22 | #include <linux/kthread.h> |
23 | #include <linux/rcupdate.h> |
24 | #include <linux/ftrace.h> |
25 | #include <linux/smp.h> |
26 | #include <linux/smpboot.h> |
27 | #include <linux/tick.h> |
28 | #include <linux/irq.h> |
29 | #include <linux/wait_bit.h> |
30 | |
31 | #include <asm/softirq_stack.h> |
32 | |
33 | #define CREATE_TRACE_POINTS |
34 | #include <trace/events/irq.h> |
35 | |
36 | /* |
37 | - No shared variables, all the data are CPU local. |
38 | - If a softirq needs serialization, let it serialize itself |
39 | by its own spinlocks. |
40 | - Even if softirq is serialized, only local cpu is marked for |
41 | execution. Hence, we get something sort of weak cpu binding. |
42 | Though it is still not clear, will it result in better locality |
43 | or will not. |
44 | |
45 | Examples: |
46 | - NET RX softirq. It is multithreaded and does not require |
47 | any global serialization. |
48 | - NET TX softirq. It kicks software netdevice queues, hence |
49 | it is logically serialized per device, but this serialization |
50 | is invisible to common code. |
51 | - Tasklets: serialized wrt itself. |
52 | */ |
53 | |
54 | #ifndef __ARCH_IRQ_STAT |
55 | DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); |
56 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
57 | #endif |
58 | |
59 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
60 | |
61 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
62 | |
63 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
64 | "HI" , "TIMER" , "NET_TX" , "NET_RX" , "BLOCK" , "IRQ_POLL" , |
65 | "TASKLET" , "SCHED" , "HRTIMER" , "RCU" |
66 | }; |
67 | |
68 | /* |
69 | * we cannot loop indefinitely here to avoid userspace starvation, |
70 | * but we also don't want to introduce a worst case 1/HZ latency |
71 | * to the pending events, so lets the scheduler to balance |
72 | * the softirq load for us. |
73 | */ |
74 | static void wakeup_softirqd(void) |
75 | { |
76 | /* Interrupts are disabled: no need to stop preemption */ |
77 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
78 | |
79 | if (tsk) |
80 | wake_up_process(tsk); |
81 | } |
82 | |
83 | #ifdef CONFIG_TRACE_IRQFLAGS |
84 | DEFINE_PER_CPU(int, hardirqs_enabled); |
85 | DEFINE_PER_CPU(int, hardirq_context); |
86 | EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); |
87 | EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); |
88 | #endif |
89 | |
90 | /* |
91 | * SOFTIRQ_OFFSET usage: |
92 | * |
93 | * On !RT kernels 'count' is the preempt counter, on RT kernels this applies |
94 | * to a per CPU counter and to task::softirqs_disabled_cnt. |
95 | * |
96 | * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq |
97 | * processing. |
98 | * |
99 | * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
100 | * on local_bh_disable or local_bh_enable. |
101 | * |
102 | * This lets us distinguish between whether we are currently processing |
103 | * softirq and whether we just have bh disabled. |
104 | */ |
105 | #ifdef CONFIG_PREEMPT_RT |
106 | |
107 | /* |
108 | * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and |
109 | * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a |
110 | * softirq disabled section to be preempted. |
111 | * |
112 | * The per task counter is used for softirq_count(), in_softirq() and |
113 | * in_serving_softirqs() because these counts are only valid when the task |
114 | * holding softirq_ctrl::lock is running. |
115 | * |
116 | * The per CPU counter prevents pointless wakeups of ksoftirqd in case that |
117 | * the task which is in a softirq disabled section is preempted or blocks. |
118 | */ |
119 | struct softirq_ctrl { |
120 | local_lock_t lock; |
121 | int cnt; |
122 | }; |
123 | |
124 | static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { |
125 | .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), |
126 | }; |
127 | |
128 | /** |
129 | * local_bh_blocked() - Check for idle whether BH processing is blocked |
130 | * |
131 | * Returns false if the per CPU softirq::cnt is 0 otherwise true. |
132 | * |
133 | * This is invoked from the idle task to guard against false positive |
134 | * softirq pending warnings, which would happen when the task which holds |
135 | * softirq_ctrl::lock was the only running task on the CPU and blocks on |
136 | * some other lock. |
137 | */ |
138 | bool local_bh_blocked(void) |
139 | { |
140 | return __this_cpu_read(softirq_ctrl.cnt) != 0; |
141 | } |
142 | |
143 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
144 | { |
145 | unsigned long flags; |
146 | int newcnt; |
147 | |
148 | WARN_ON_ONCE(in_hardirq()); |
149 | |
150 | /* First entry of a task into a BH disabled section? */ |
151 | if (!current->softirq_disable_cnt) { |
152 | if (preemptible()) { |
153 | local_lock(&softirq_ctrl.lock); |
154 | /* Required to meet the RCU bottomhalf requirements. */ |
155 | rcu_read_lock(); |
156 | } else { |
157 | DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); |
158 | } |
159 | } |
160 | |
161 | /* |
162 | * Track the per CPU softirq disabled state. On RT this is per CPU |
163 | * state to allow preemption of bottom half disabled sections. |
164 | */ |
165 | newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); |
166 | /* |
167 | * Reflect the result in the task state to prevent recursion on the |
168 | * local lock and to make softirq_count() & al work. |
169 | */ |
170 | current->softirq_disable_cnt = newcnt; |
171 | |
172 | if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { |
173 | raw_local_irq_save(flags); |
174 | lockdep_softirqs_off(ip); |
175 | raw_local_irq_restore(flags); |
176 | } |
177 | } |
178 | EXPORT_SYMBOL(__local_bh_disable_ip); |
179 | |
180 | static void __local_bh_enable(unsigned int cnt, bool unlock) |
181 | { |
182 | unsigned long flags; |
183 | int newcnt; |
184 | |
185 | DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != |
186 | this_cpu_read(softirq_ctrl.cnt)); |
187 | |
188 | if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { |
189 | raw_local_irq_save(flags); |
190 | lockdep_softirqs_on(_RET_IP_); |
191 | raw_local_irq_restore(flags); |
192 | } |
193 | |
194 | newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); |
195 | current->softirq_disable_cnt = newcnt; |
196 | |
197 | if (!newcnt && unlock) { |
198 | rcu_read_unlock(); |
199 | local_unlock(&softirq_ctrl.lock); |
200 | } |
201 | } |
202 | |
203 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
204 | { |
205 | bool preempt_on = preemptible(); |
206 | unsigned long flags; |
207 | u32 pending; |
208 | int curcnt; |
209 | |
210 | WARN_ON_ONCE(in_hardirq()); |
211 | lockdep_assert_irqs_enabled(); |
212 | |
213 | local_irq_save(flags); |
214 | curcnt = __this_cpu_read(softirq_ctrl.cnt); |
215 | |
216 | /* |
217 | * If this is not reenabling soft interrupts, no point in trying to |
218 | * run pending ones. |
219 | */ |
220 | if (curcnt != cnt) |
221 | goto out; |
222 | |
223 | pending = local_softirq_pending(); |
224 | if (!pending) |
225 | goto out; |
226 | |
227 | /* |
228 | * If this was called from non preemptible context, wake up the |
229 | * softirq daemon. |
230 | */ |
231 | if (!preempt_on) { |
232 | wakeup_softirqd(); |
233 | goto out; |
234 | } |
235 | |
236 | /* |
237 | * Adjust softirq count to SOFTIRQ_OFFSET which makes |
238 | * in_serving_softirq() become true. |
239 | */ |
240 | cnt = SOFTIRQ_OFFSET; |
241 | __local_bh_enable(cnt, false); |
242 | __do_softirq(); |
243 | |
244 | out: |
245 | __local_bh_enable(cnt, preempt_on); |
246 | local_irq_restore(flags); |
247 | } |
248 | EXPORT_SYMBOL(__local_bh_enable_ip); |
249 | |
250 | /* |
251 | * Invoked from ksoftirqd_run() outside of the interrupt disabled section |
252 | * to acquire the per CPU local lock for reentrancy protection. |
253 | */ |
254 | static inline void ksoftirqd_run_begin(void) |
255 | { |
256 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
257 | local_irq_disable(); |
258 | } |
259 | |
260 | /* Counterpart to ksoftirqd_run_begin() */ |
261 | static inline void ksoftirqd_run_end(void) |
262 | { |
263 | __local_bh_enable(SOFTIRQ_OFFSET, true); |
264 | WARN_ON_ONCE(in_interrupt()); |
265 | local_irq_enable(); |
266 | } |
267 | |
268 | static inline void softirq_handle_begin(void) { } |
269 | static inline void softirq_handle_end(void) { } |
270 | |
271 | static inline bool should_wake_ksoftirqd(void) |
272 | { |
273 | return !this_cpu_read(softirq_ctrl.cnt); |
274 | } |
275 | |
276 | static inline void invoke_softirq(void) |
277 | { |
278 | if (should_wake_ksoftirqd()) |
279 | wakeup_softirqd(); |
280 | } |
281 | |
282 | /* |
283 | * flush_smp_call_function_queue() can raise a soft interrupt in a function |
284 | * call. On RT kernels this is undesired and the only known functionality |
285 | * in the block layer which does this is disabled on RT. If soft interrupts |
286 | * get raised which haven't been raised before the flush, warn so it can be |
287 | * investigated. |
288 | */ |
289 | void do_softirq_post_smp_call_flush(unsigned int was_pending) |
290 | { |
291 | if (WARN_ON_ONCE(was_pending != local_softirq_pending())) |
292 | invoke_softirq(); |
293 | } |
294 | |
295 | #else /* CONFIG_PREEMPT_RT */ |
296 | |
297 | /* |
298 | * This one is for softirq.c-internal use, where hardirqs are disabled |
299 | * legitimately: |
300 | */ |
301 | #ifdef CONFIG_TRACE_IRQFLAGS |
302 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
303 | { |
304 | unsigned long flags; |
305 | |
306 | WARN_ON_ONCE(in_hardirq()); |
307 | |
308 | raw_local_irq_save(flags); |
309 | /* |
310 | * The preempt tracer hooks into preempt_count_add and will break |
311 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
312 | * is set and before current->softirq_enabled is cleared. |
313 | * We must manually increment preempt_count here and manually |
314 | * call the trace_preempt_off later. |
315 | */ |
316 | __preempt_count_add(val: cnt); |
317 | /* |
318 | * Were softirqs turned off above: |
319 | */ |
320 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
321 | lockdep_softirqs_off(ip); |
322 | raw_local_irq_restore(flags); |
323 | |
324 | if (preempt_count() == cnt) { |
325 | #ifdef CONFIG_DEBUG_PREEMPT |
326 | current->preempt_disable_ip = get_lock_parent_ip(); |
327 | #endif |
328 | trace_preempt_off(CALLER_ADDR0, a1: get_lock_parent_ip()); |
329 | } |
330 | } |
331 | EXPORT_SYMBOL(__local_bh_disable_ip); |
332 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
333 | |
334 | static void __local_bh_enable(unsigned int cnt) |
335 | { |
336 | lockdep_assert_irqs_disabled(); |
337 | |
338 | if (preempt_count() == cnt) |
339 | trace_preempt_on(CALLER_ADDR0, a1: get_lock_parent_ip()); |
340 | |
341 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
342 | lockdep_softirqs_on(_RET_IP_); |
343 | |
344 | __preempt_count_sub(val: cnt); |
345 | } |
346 | |
347 | /* |
348 | * Special-case - softirqs can safely be enabled by __do_softirq(), |
349 | * without processing still-pending softirqs: |
350 | */ |
351 | void _local_bh_enable(void) |
352 | { |
353 | WARN_ON_ONCE(in_hardirq()); |
354 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
355 | } |
356 | EXPORT_SYMBOL(_local_bh_enable); |
357 | |
358 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
359 | { |
360 | WARN_ON_ONCE(in_hardirq()); |
361 | lockdep_assert_irqs_enabled(); |
362 | #ifdef CONFIG_TRACE_IRQFLAGS |
363 | local_irq_disable(); |
364 | #endif |
365 | /* |
366 | * Are softirqs going to be turned on now: |
367 | */ |
368 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
369 | lockdep_softirqs_on(ip); |
370 | /* |
371 | * Keep preemption disabled until we are done with |
372 | * softirq processing: |
373 | */ |
374 | __preempt_count_sub(val: cnt - 1); |
375 | |
376 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
377 | /* |
378 | * Run softirq if any pending. And do it in its own stack |
379 | * as we may be calling this deep in a task call stack already. |
380 | */ |
381 | do_softirq(); |
382 | } |
383 | |
384 | preempt_count_dec(); |
385 | #ifdef CONFIG_TRACE_IRQFLAGS |
386 | local_irq_enable(); |
387 | #endif |
388 | preempt_check_resched(); |
389 | } |
390 | EXPORT_SYMBOL(__local_bh_enable_ip); |
391 | |
392 | static inline void softirq_handle_begin(void) |
393 | { |
394 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
395 | } |
396 | |
397 | static inline void softirq_handle_end(void) |
398 | { |
399 | __local_bh_enable(SOFTIRQ_OFFSET); |
400 | WARN_ON_ONCE(in_interrupt()); |
401 | } |
402 | |
403 | static inline void ksoftirqd_run_begin(void) |
404 | { |
405 | local_irq_disable(); |
406 | } |
407 | |
408 | static inline void ksoftirqd_run_end(void) |
409 | { |
410 | local_irq_enable(); |
411 | } |
412 | |
413 | static inline bool should_wake_ksoftirqd(void) |
414 | { |
415 | return true; |
416 | } |
417 | |
418 | static inline void invoke_softirq(void) |
419 | { |
420 | if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { |
421 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
422 | /* |
423 | * We can safely execute softirq on the current stack if |
424 | * it is the irq stack, because it should be near empty |
425 | * at this stage. |
426 | */ |
427 | __do_softirq(); |
428 | #else |
429 | /* |
430 | * Otherwise, irq_exit() is called on the task stack that can |
431 | * be potentially deep already. So call softirq in its own stack |
432 | * to prevent from any overrun. |
433 | */ |
434 | do_softirq_own_stack(); |
435 | #endif |
436 | } else { |
437 | wakeup_softirqd(); |
438 | } |
439 | } |
440 | |
441 | asmlinkage __visible void do_softirq(void) |
442 | { |
443 | __u32 pending; |
444 | unsigned long flags; |
445 | |
446 | if (in_interrupt()) |
447 | return; |
448 | |
449 | local_irq_save(flags); |
450 | |
451 | pending = local_softirq_pending(); |
452 | |
453 | if (pending) |
454 | do_softirq_own_stack(); |
455 | |
456 | local_irq_restore(flags); |
457 | } |
458 | |
459 | #endif /* !CONFIG_PREEMPT_RT */ |
460 | |
461 | /* |
462 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
463 | * but break the loop if need_resched() is set or after 2 ms. |
464 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in |
465 | * certain cases, such as stop_machine(), jiffies may cease to |
466 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as |
467 | * well to make sure we eventually return from this method. |
468 | * |
469 | * These limits have been established via experimentation. |
470 | * The two things to balance is latency against fairness - |
471 | * we want to handle softirqs as soon as possible, but they |
472 | * should not be able to lock up the box. |
473 | */ |
474 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
475 | #define MAX_SOFTIRQ_RESTART 10 |
476 | |
477 | #ifdef CONFIG_TRACE_IRQFLAGS |
478 | /* |
479 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
480 | * to keep the lockdep irq context tracking as tight as possible in order to |
481 | * not miss-qualify lock contexts and miss possible deadlocks. |
482 | */ |
483 | |
484 | static inline bool lockdep_softirq_start(void) |
485 | { |
486 | bool in_hardirq = false; |
487 | |
488 | if (lockdep_hardirq_context()) { |
489 | in_hardirq = true; |
490 | lockdep_hardirq_exit(); |
491 | } |
492 | |
493 | lockdep_softirq_enter(); |
494 | |
495 | return in_hardirq; |
496 | } |
497 | |
498 | static inline void lockdep_softirq_end(bool in_hardirq) |
499 | { |
500 | lockdep_softirq_exit(); |
501 | |
502 | if (in_hardirq) |
503 | lockdep_hardirq_enter(); |
504 | } |
505 | #else |
506 | static inline bool lockdep_softirq_start(void) { return false; } |
507 | static inline void lockdep_softirq_end(bool in_hardirq) { } |
508 | #endif |
509 | |
510 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
511 | { |
512 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
513 | unsigned long old_flags = current->flags; |
514 | int max_restart = MAX_SOFTIRQ_RESTART; |
515 | struct softirq_action *h; |
516 | bool in_hardirq; |
517 | __u32 pending; |
518 | int softirq_bit; |
519 | |
520 | /* |
521 | * Mask out PF_MEMALLOC as the current task context is borrowed for the |
522 | * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC |
523 | * again if the socket is related to swapping. |
524 | */ |
525 | current->flags &= ~PF_MEMALLOC; |
526 | |
527 | pending = local_softirq_pending(); |
528 | |
529 | softirq_handle_begin(); |
530 | in_hardirq = lockdep_softirq_start(); |
531 | account_softirq_enter(current); |
532 | |
533 | restart: |
534 | /* Reset the pending bitmask before enabling irqs */ |
535 | set_softirq_pending(0); |
536 | |
537 | local_irq_enable(); |
538 | |
539 | h = softirq_vec; |
540 | |
541 | while ((softirq_bit = ffs(pending))) { |
542 | unsigned int vec_nr; |
543 | int prev_count; |
544 | |
545 | h += softirq_bit - 1; |
546 | |
547 | vec_nr = h - softirq_vec; |
548 | prev_count = preempt_count(); |
549 | |
550 | kstat_incr_softirqs_this_cpu(irq: vec_nr); |
551 | |
552 | trace_softirq_entry(vec_nr); |
553 | h->action(h); |
554 | trace_softirq_exit(vec_nr); |
555 | if (unlikely(prev_count != preempt_count())) { |
556 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n" , |
557 | vec_nr, softirq_to_name[vec_nr], h->action, |
558 | prev_count, preempt_count()); |
559 | preempt_count_set(pc: prev_count); |
560 | } |
561 | h++; |
562 | pending >>= softirq_bit; |
563 | } |
564 | |
565 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && |
566 | __this_cpu_read(ksoftirqd) == current) |
567 | rcu_softirq_qs(); |
568 | |
569 | local_irq_disable(); |
570 | |
571 | pending = local_softirq_pending(); |
572 | if (pending) { |
573 | if (time_before(jiffies, end) && !need_resched() && |
574 | --max_restart) |
575 | goto restart; |
576 | |
577 | wakeup_softirqd(); |
578 | } |
579 | |
580 | account_softirq_exit(current); |
581 | lockdep_softirq_end(in_hardirq); |
582 | softirq_handle_end(); |
583 | current_restore_flags(orig_flags: old_flags, PF_MEMALLOC); |
584 | } |
585 | |
586 | /** |
587 | * irq_enter_rcu - Enter an interrupt context with RCU watching |
588 | */ |
589 | void irq_enter_rcu(void) |
590 | { |
591 | __irq_enter_raw(); |
592 | |
593 | if (tick_nohz_full_cpu(smp_processor_id()) || |
594 | (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) |
595 | tick_irq_enter(); |
596 | |
597 | account_hardirq_enter(current); |
598 | } |
599 | |
600 | /** |
601 | * irq_enter - Enter an interrupt context including RCU update |
602 | */ |
603 | void irq_enter(void) |
604 | { |
605 | ct_irq_enter(); |
606 | irq_enter_rcu(); |
607 | } |
608 | |
609 | static inline void tick_irq_exit(void) |
610 | { |
611 | #ifdef CONFIG_NO_HZ_COMMON |
612 | int cpu = smp_processor_id(); |
613 | |
614 | /* Make sure that timer wheel updates are propagated */ |
615 | if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { |
616 | if (!in_hardirq()) |
617 | tick_nohz_irq_exit(); |
618 | } |
619 | #endif |
620 | } |
621 | |
622 | static inline void __irq_exit_rcu(void) |
623 | { |
624 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
625 | local_irq_disable(); |
626 | #else |
627 | lockdep_assert_irqs_disabled(); |
628 | #endif |
629 | account_hardirq_exit(current); |
630 | preempt_count_sub(HARDIRQ_OFFSET); |
631 | if (!in_interrupt() && local_softirq_pending()) |
632 | invoke_softirq(); |
633 | |
634 | tick_irq_exit(); |
635 | } |
636 | |
637 | /** |
638 | * irq_exit_rcu() - Exit an interrupt context without updating RCU |
639 | * |
640 | * Also processes softirqs if needed and possible. |
641 | */ |
642 | void irq_exit_rcu(void) |
643 | { |
644 | __irq_exit_rcu(); |
645 | /* must be last! */ |
646 | lockdep_hardirq_exit(); |
647 | } |
648 | |
649 | /** |
650 | * irq_exit - Exit an interrupt context, update RCU and lockdep |
651 | * |
652 | * Also processes softirqs if needed and possible. |
653 | */ |
654 | void irq_exit(void) |
655 | { |
656 | __irq_exit_rcu(); |
657 | ct_irq_exit(); |
658 | /* must be last! */ |
659 | lockdep_hardirq_exit(); |
660 | } |
661 | |
662 | /* |
663 | * This function must run with irqs disabled! |
664 | */ |
665 | inline void raise_softirq_irqoff(unsigned int nr) |
666 | { |
667 | __raise_softirq_irqoff(nr); |
668 | |
669 | /* |
670 | * If we're in an interrupt or softirq, we're done |
671 | * (this also catches softirq-disabled code). We will |
672 | * actually run the softirq once we return from |
673 | * the irq or softirq. |
674 | * |
675 | * Otherwise we wake up ksoftirqd to make sure we |
676 | * schedule the softirq soon. |
677 | */ |
678 | if (!in_interrupt() && should_wake_ksoftirqd()) |
679 | wakeup_softirqd(); |
680 | } |
681 | |
682 | void raise_softirq(unsigned int nr) |
683 | { |
684 | unsigned long flags; |
685 | |
686 | local_irq_save(flags); |
687 | raise_softirq_irqoff(nr); |
688 | local_irq_restore(flags); |
689 | } |
690 | |
691 | void __raise_softirq_irqoff(unsigned int nr) |
692 | { |
693 | lockdep_assert_irqs_disabled(); |
694 | trace_softirq_raise(vec_nr: nr); |
695 | or_softirq_pending(1UL << nr); |
696 | } |
697 | |
698 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
699 | { |
700 | softirq_vec[nr].action = action; |
701 | } |
702 | |
703 | /* |
704 | * Tasklets |
705 | */ |
706 | struct tasklet_head { |
707 | struct tasklet_struct *head; |
708 | struct tasklet_struct **tail; |
709 | }; |
710 | |
711 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
712 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
713 | |
714 | static void __tasklet_schedule_common(struct tasklet_struct *t, |
715 | struct tasklet_head __percpu *headp, |
716 | unsigned int softirq_nr) |
717 | { |
718 | struct tasklet_head *head; |
719 | unsigned long flags; |
720 | |
721 | local_irq_save(flags); |
722 | head = this_cpu_ptr(headp); |
723 | t->next = NULL; |
724 | *head->tail = t; |
725 | head->tail = &(t->next); |
726 | raise_softirq_irqoff(nr: softirq_nr); |
727 | local_irq_restore(flags); |
728 | } |
729 | |
730 | void __tasklet_schedule(struct tasklet_struct *t) |
731 | { |
732 | __tasklet_schedule_common(t, headp: &tasklet_vec, |
733 | softirq_nr: TASKLET_SOFTIRQ); |
734 | } |
735 | EXPORT_SYMBOL(__tasklet_schedule); |
736 | |
737 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
738 | { |
739 | __tasklet_schedule_common(t, headp: &tasklet_hi_vec, |
740 | softirq_nr: HI_SOFTIRQ); |
741 | } |
742 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
743 | |
744 | static bool tasklet_clear_sched(struct tasklet_struct *t) |
745 | { |
746 | if (test_and_clear_bit(nr: TASKLET_STATE_SCHED, addr: &t->state)) { |
747 | wake_up_var(var: &t->state); |
748 | return true; |
749 | } |
750 | |
751 | WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n" , |
752 | t->use_callback ? "callback" : "func" , |
753 | t->use_callback ? (void *)t->callback : (void *)t->func); |
754 | |
755 | return false; |
756 | } |
757 | |
758 | static void tasklet_action_common(struct softirq_action *a, |
759 | struct tasklet_head *tl_head, |
760 | unsigned int softirq_nr) |
761 | { |
762 | struct tasklet_struct *list; |
763 | |
764 | local_irq_disable(); |
765 | list = tl_head->head; |
766 | tl_head->head = NULL; |
767 | tl_head->tail = &tl_head->head; |
768 | local_irq_enable(); |
769 | |
770 | while (list) { |
771 | struct tasklet_struct *t = list; |
772 | |
773 | list = list->next; |
774 | |
775 | if (tasklet_trylock(t)) { |
776 | if (!atomic_read(v: &t->count)) { |
777 | if (tasklet_clear_sched(t)) { |
778 | if (t->use_callback) { |
779 | trace_tasklet_entry(t, func: t->callback); |
780 | t->callback(t); |
781 | trace_tasklet_exit(t, func: t->callback); |
782 | } else { |
783 | trace_tasklet_entry(t, func: t->func); |
784 | t->func(t->data); |
785 | trace_tasklet_exit(t, func: t->func); |
786 | } |
787 | } |
788 | tasklet_unlock(t); |
789 | continue; |
790 | } |
791 | tasklet_unlock(t); |
792 | } |
793 | |
794 | local_irq_disable(); |
795 | t->next = NULL; |
796 | *tl_head->tail = t; |
797 | tl_head->tail = &t->next; |
798 | __raise_softirq_irqoff(nr: softirq_nr); |
799 | local_irq_enable(); |
800 | } |
801 | } |
802 | |
803 | static __latent_entropy void tasklet_action(struct softirq_action *a) |
804 | { |
805 | tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), softirq_nr: TASKLET_SOFTIRQ); |
806 | } |
807 | |
808 | static __latent_entropy void tasklet_hi_action(struct softirq_action *a) |
809 | { |
810 | tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), softirq_nr: HI_SOFTIRQ); |
811 | } |
812 | |
813 | void tasklet_setup(struct tasklet_struct *t, |
814 | void (*callback)(struct tasklet_struct *)) |
815 | { |
816 | t->next = NULL; |
817 | t->state = 0; |
818 | atomic_set(v: &t->count, i: 0); |
819 | t->callback = callback; |
820 | t->use_callback = true; |
821 | t->data = 0; |
822 | } |
823 | EXPORT_SYMBOL(tasklet_setup); |
824 | |
825 | void tasklet_init(struct tasklet_struct *t, |
826 | void (*func)(unsigned long), unsigned long data) |
827 | { |
828 | t->next = NULL; |
829 | t->state = 0; |
830 | atomic_set(v: &t->count, i: 0); |
831 | t->func = func; |
832 | t->use_callback = false; |
833 | t->data = data; |
834 | } |
835 | EXPORT_SYMBOL(tasklet_init); |
836 | |
837 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
838 | /* |
839 | * Do not use in new code. Waiting for tasklets from atomic contexts is |
840 | * error prone and should be avoided. |
841 | */ |
842 | void tasklet_unlock_spin_wait(struct tasklet_struct *t) |
843 | { |
844 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { |
845 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
846 | /* |
847 | * Prevent a live lock when current preempted soft |
848 | * interrupt processing or prevents ksoftirqd from |
849 | * running. If the tasklet runs on a different CPU |
850 | * then this has no effect other than doing the BH |
851 | * disable/enable dance for nothing. |
852 | */ |
853 | local_bh_disable(); |
854 | local_bh_enable(); |
855 | } else { |
856 | cpu_relax(); |
857 | } |
858 | } |
859 | } |
860 | EXPORT_SYMBOL(tasklet_unlock_spin_wait); |
861 | #endif |
862 | |
863 | void tasklet_kill(struct tasklet_struct *t) |
864 | { |
865 | if (in_interrupt()) |
866 | pr_notice("Attempt to kill tasklet from interrupt\n" ); |
867 | |
868 | while (test_and_set_bit(nr: TASKLET_STATE_SCHED, addr: &t->state)) |
869 | wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); |
870 | |
871 | tasklet_unlock_wait(t); |
872 | tasklet_clear_sched(t); |
873 | } |
874 | EXPORT_SYMBOL(tasklet_kill); |
875 | |
876 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
877 | void tasklet_unlock(struct tasklet_struct *t) |
878 | { |
879 | smp_mb__before_atomic(); |
880 | clear_bit(nr: TASKLET_STATE_RUN, addr: &t->state); |
881 | smp_mb__after_atomic(); |
882 | wake_up_var(var: &t->state); |
883 | } |
884 | EXPORT_SYMBOL_GPL(tasklet_unlock); |
885 | |
886 | void tasklet_unlock_wait(struct tasklet_struct *t) |
887 | { |
888 | wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); |
889 | } |
890 | EXPORT_SYMBOL_GPL(tasklet_unlock_wait); |
891 | #endif |
892 | |
893 | void __init softirq_init(void) |
894 | { |
895 | int cpu; |
896 | |
897 | for_each_possible_cpu(cpu) { |
898 | per_cpu(tasklet_vec, cpu).tail = |
899 | &per_cpu(tasklet_vec, cpu).head; |
900 | per_cpu(tasklet_hi_vec, cpu).tail = |
901 | &per_cpu(tasklet_hi_vec, cpu).head; |
902 | } |
903 | |
904 | open_softirq(nr: TASKLET_SOFTIRQ, action: tasklet_action); |
905 | open_softirq(nr: HI_SOFTIRQ, action: tasklet_hi_action); |
906 | } |
907 | |
908 | static int ksoftirqd_should_run(unsigned int cpu) |
909 | { |
910 | return local_softirq_pending(); |
911 | } |
912 | |
913 | static void run_ksoftirqd(unsigned int cpu) |
914 | { |
915 | ksoftirqd_run_begin(); |
916 | if (local_softirq_pending()) { |
917 | /* |
918 | * We can safely run softirq on inline stack, as we are not deep |
919 | * in the task stack here. |
920 | */ |
921 | __do_softirq(); |
922 | ksoftirqd_run_end(); |
923 | cond_resched(); |
924 | return; |
925 | } |
926 | ksoftirqd_run_end(); |
927 | } |
928 | |
929 | #ifdef CONFIG_HOTPLUG_CPU |
930 | static int takeover_tasklets(unsigned int cpu) |
931 | { |
932 | /* CPU is dead, so no lock needed. */ |
933 | local_irq_disable(); |
934 | |
935 | /* Find end, append list for that CPU. */ |
936 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
937 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
938 | __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
939 | per_cpu(tasklet_vec, cpu).head = NULL; |
940 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
941 | } |
942 | raise_softirq_irqoff(nr: TASKLET_SOFTIRQ); |
943 | |
944 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
945 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
946 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
947 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
948 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
949 | } |
950 | raise_softirq_irqoff(nr: HI_SOFTIRQ); |
951 | |
952 | local_irq_enable(); |
953 | return 0; |
954 | } |
955 | #else |
956 | #define takeover_tasklets NULL |
957 | #endif /* CONFIG_HOTPLUG_CPU */ |
958 | |
959 | static struct smp_hotplug_thread softirq_threads = { |
960 | .store = &ksoftirqd, |
961 | .thread_should_run = ksoftirqd_should_run, |
962 | .thread_fn = run_ksoftirqd, |
963 | .thread_comm = "ksoftirqd/%u" , |
964 | }; |
965 | |
966 | static __init int spawn_ksoftirqd(void) |
967 | { |
968 | cpuhp_setup_state_nocalls(state: CPUHP_SOFTIRQ_DEAD, name: "softirq:dead" , NULL, |
969 | teardown: takeover_tasklets); |
970 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
971 | |
972 | return 0; |
973 | } |
974 | early_initcall(spawn_ksoftirqd); |
975 | |
976 | /* |
977 | * [ These __weak aliases are kept in a separate compilation unit, so that |
978 | * GCC does not inline them incorrectly. ] |
979 | */ |
980 | |
981 | int __init __weak early_irq_init(void) |
982 | { |
983 | return 0; |
984 | } |
985 | |
986 | int __init __weak arch_probe_nr_irqs(void) |
987 | { |
988 | return NR_IRQS_LEGACY; |
989 | } |
990 | |
991 | int __init __weak arch_early_irq_init(void) |
992 | { |
993 | return 0; |
994 | } |
995 | |
996 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) |
997 | { |
998 | return from; |
999 | } |
1000 | |