1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
4 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
5 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
6 | * |
7 | * NOHZ implementation for low and high resolution timers |
8 | * |
9 | * Started by: Thomas Gleixner and Ingo Molnar |
10 | */ |
11 | #include <linux/compiler.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/err.h> |
14 | #include <linux/hrtimer.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/kernel_stat.h> |
17 | #include <linux/percpu.h> |
18 | #include <linux/nmi.h> |
19 | #include <linux/profile.h> |
20 | #include <linux/sched/signal.h> |
21 | #include <linux/sched/clock.h> |
22 | #include <linux/sched/stat.h> |
23 | #include <linux/sched/nohz.h> |
24 | #include <linux/sched/loadavg.h> |
25 | #include <linux/module.h> |
26 | #include <linux/irq_work.h> |
27 | #include <linux/posix-timers.h> |
28 | #include <linux/context_tracking.h> |
29 | #include <linux/mm.h> |
30 | |
31 | #include <asm/irq_regs.h> |
32 | |
33 | #include "tick-internal.h" |
34 | |
35 | #include <trace/events/timer.h> |
36 | |
37 | /* |
38 | * Per-CPU nohz control structure |
39 | */ |
40 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
41 | |
42 | struct tick_sched *tick_get_tick_sched(int cpu) |
43 | { |
44 | return &per_cpu(tick_cpu_sched, cpu); |
45 | } |
46 | |
47 | /* |
48 | * The time when the last jiffy update happened. Write access must hold |
49 | * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a |
50 | * consistent view of jiffies and last_jiffies_update. |
51 | */ |
52 | static ktime_t last_jiffies_update; |
53 | |
54 | /* |
55 | * Must be called with interrupts disabled ! |
56 | */ |
57 | static void tick_do_update_jiffies64(ktime_t now) |
58 | { |
59 | unsigned long ticks = 1; |
60 | ktime_t delta, nextp; |
61 | |
62 | /* |
63 | * 64-bit can do a quick check without holding the jiffies lock and |
64 | * without looking at the sequence count. The smp_load_acquire() |
65 | * pairs with the update done later in this function. |
66 | * |
67 | * 32-bit cannot do that because the store of 'tick_next_period' |
68 | * consists of two 32-bit stores, and the first store could be |
69 | * moved by the CPU to a random point in the future. |
70 | */ |
71 | if (IS_ENABLED(CONFIG_64BIT)) { |
72 | if (ktime_before(cmp1: now, smp_load_acquire(&tick_next_period))) |
73 | return; |
74 | } else { |
75 | unsigned int seq; |
76 | |
77 | /* |
78 | * Avoid contention on 'jiffies_lock' and protect the quick |
79 | * check with the sequence count. |
80 | */ |
81 | do { |
82 | seq = read_seqcount_begin(&jiffies_seq); |
83 | nextp = tick_next_period; |
84 | } while (read_seqcount_retry(&jiffies_seq, seq)); |
85 | |
86 | if (ktime_before(cmp1: now, cmp2: nextp)) |
87 | return; |
88 | } |
89 | |
90 | /* Quick check failed, i.e. update is required. */ |
91 | raw_spin_lock(&jiffies_lock); |
92 | /* |
93 | * Re-evaluate with the lock held. Another CPU might have done the |
94 | * update already. |
95 | */ |
96 | if (ktime_before(cmp1: now, cmp2: tick_next_period)) { |
97 | raw_spin_unlock(&jiffies_lock); |
98 | return; |
99 | } |
100 | |
101 | write_seqcount_begin(&jiffies_seq); |
102 | |
103 | delta = ktime_sub(now, tick_next_period); |
104 | if (unlikely(delta >= TICK_NSEC)) { |
105 | /* Slow path for long idle sleep times */ |
106 | s64 incr = TICK_NSEC; |
107 | |
108 | ticks += ktime_divns(kt: delta, div: incr); |
109 | |
110 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
111 | incr * ticks); |
112 | } else { |
113 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
114 | TICK_NSEC); |
115 | } |
116 | |
117 | /* Advance jiffies to complete the 'jiffies_seq' protected job */ |
118 | jiffies_64 += ticks; |
119 | |
120 | /* Keep the tick_next_period variable up to date */ |
121 | nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); |
122 | |
123 | if (IS_ENABLED(CONFIG_64BIT)) { |
124 | /* |
125 | * Pairs with smp_load_acquire() in the lockless quick |
126 | * check above, and ensures that the update to 'jiffies_64' is |
127 | * not reordered vs. the store to 'tick_next_period', neither |
128 | * by the compiler nor by the CPU. |
129 | */ |
130 | smp_store_release(&tick_next_period, nextp); |
131 | } else { |
132 | /* |
133 | * A plain store is good enough on 32-bit, as the quick check |
134 | * above is protected by the sequence count. |
135 | */ |
136 | tick_next_period = nextp; |
137 | } |
138 | |
139 | /* |
140 | * Release the sequence count. calc_global_load() below is not |
141 | * protected by it, but 'jiffies_lock' needs to be held to prevent |
142 | * concurrent invocations. |
143 | */ |
144 | write_seqcount_end(&jiffies_seq); |
145 | |
146 | calc_global_load(); |
147 | |
148 | raw_spin_unlock(&jiffies_lock); |
149 | update_wall_time(); |
150 | } |
151 | |
152 | /* |
153 | * Initialize and return retrieve the jiffies update. |
154 | */ |
155 | static ktime_t tick_init_jiffy_update(void) |
156 | { |
157 | ktime_t period; |
158 | |
159 | raw_spin_lock(&jiffies_lock); |
160 | write_seqcount_begin(&jiffies_seq); |
161 | |
162 | /* Have we started the jiffies update yet ? */ |
163 | if (last_jiffies_update == 0) { |
164 | u32 rem; |
165 | |
166 | /* |
167 | * Ensure that the tick is aligned to a multiple of |
168 | * TICK_NSEC. |
169 | */ |
170 | div_u64_rem(dividend: tick_next_period, TICK_NSEC, remainder: &rem); |
171 | if (rem) |
172 | tick_next_period += TICK_NSEC - rem; |
173 | |
174 | last_jiffies_update = tick_next_period; |
175 | } |
176 | period = last_jiffies_update; |
177 | |
178 | write_seqcount_end(&jiffies_seq); |
179 | raw_spin_unlock(&jiffies_lock); |
180 | |
181 | return period; |
182 | } |
183 | |
184 | static inline int tick_sched_flag_test(struct tick_sched *ts, |
185 | unsigned long flag) |
186 | { |
187 | return !!(ts->flags & flag); |
188 | } |
189 | |
190 | static inline void tick_sched_flag_set(struct tick_sched *ts, |
191 | unsigned long flag) |
192 | { |
193 | lockdep_assert_irqs_disabled(); |
194 | ts->flags |= flag; |
195 | } |
196 | |
197 | static inline void tick_sched_flag_clear(struct tick_sched *ts, |
198 | unsigned long flag) |
199 | { |
200 | lockdep_assert_irqs_disabled(); |
201 | ts->flags &= ~flag; |
202 | } |
203 | |
204 | #define MAX_STALLED_JIFFIES 5 |
205 | |
206 | static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) |
207 | { |
208 | int tick_cpu, cpu = smp_processor_id(); |
209 | |
210 | /* |
211 | * Check if the do_timer duty was dropped. We don't care about |
212 | * concurrency: This happens only when the CPU in charge went |
213 | * into a long sleep. If two CPUs happen to assign themselves to |
214 | * this duty, then the jiffies update is still serialized by |
215 | * 'jiffies_lock'. |
216 | * |
217 | * If nohz_full is enabled, this should not happen because the |
218 | * 'tick_do_timer_cpu' CPU never relinquishes. |
219 | */ |
220 | tick_cpu = READ_ONCE(tick_do_timer_cpu); |
221 | |
222 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) { |
223 | #ifdef CONFIG_NO_HZ_FULL |
224 | WARN_ON_ONCE(tick_nohz_full_running); |
225 | #endif |
226 | WRITE_ONCE(tick_do_timer_cpu, cpu); |
227 | tick_cpu = cpu; |
228 | } |
229 | |
230 | /* Check if jiffies need an update */ |
231 | if (tick_cpu == cpu) |
232 | tick_do_update_jiffies64(now); |
233 | |
234 | /* |
235 | * If the jiffies update stalled for too long (timekeeper in stop_machine() |
236 | * or VMEXIT'ed for several msecs), force an update. |
237 | */ |
238 | if (ts->last_tick_jiffies != jiffies) { |
239 | ts->stalled_jiffies = 0; |
240 | ts->last_tick_jiffies = READ_ONCE(jiffies); |
241 | } else { |
242 | if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { |
243 | tick_do_update_jiffies64(now); |
244 | ts->stalled_jiffies = 0; |
245 | ts->last_tick_jiffies = READ_ONCE(jiffies); |
246 | } |
247 | } |
248 | |
249 | if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) |
250 | ts->got_idle_tick = 1; |
251 | } |
252 | |
253 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
254 | { |
255 | /* |
256 | * When we are idle and the tick is stopped, we have to touch |
257 | * the watchdog as we might not schedule for a really long |
258 | * time. This happens on completely idle SMP systems while |
259 | * waiting on the login prompt. We also increment the "start of |
260 | * idle" jiffy stamp so the idle accounting adjustment we do |
261 | * when we go busy again does not account too many ticks. |
262 | */ |
263 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && |
264 | tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
265 | touch_softlockup_watchdog_sched(); |
266 | if (is_idle_task(current)) |
267 | ts->idle_jiffies++; |
268 | /* |
269 | * In case the current tick fired too early past its expected |
270 | * expiration, make sure we don't bypass the next clock reprogramming |
271 | * to the same deadline. |
272 | */ |
273 | ts->next_tick = 0; |
274 | } |
275 | |
276 | update_process_times(user: user_mode(regs)); |
277 | profile_tick(CPU_PROFILING); |
278 | } |
279 | |
280 | /* |
281 | * We rearm the timer until we get disabled by the idle code. |
282 | * Called with interrupts disabled. |
283 | */ |
284 | static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) |
285 | { |
286 | struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); |
287 | struct pt_regs *regs = get_irq_regs(); |
288 | ktime_t now = ktime_get(); |
289 | |
290 | tick_sched_do_timer(ts, now); |
291 | |
292 | /* |
293 | * Do not call when we are not in IRQ context and have |
294 | * no valid 'regs' pointer |
295 | */ |
296 | if (regs) |
297 | tick_sched_handle(ts, regs); |
298 | else |
299 | ts->next_tick = 0; |
300 | |
301 | /* |
302 | * In dynticks mode, tick reprogram is deferred: |
303 | * - to the idle task if in dynticks-idle |
304 | * - to IRQ exit if in full-dynticks. |
305 | */ |
306 | if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) |
307 | return HRTIMER_NORESTART; |
308 | |
309 | hrtimer_forward(timer, now, TICK_NSEC); |
310 | |
311 | return HRTIMER_RESTART; |
312 | } |
313 | |
314 | static void tick_sched_timer_cancel(struct tick_sched *ts) |
315 | { |
316 | if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) |
317 | hrtimer_cancel(timer: &ts->sched_timer); |
318 | else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) |
319 | tick_program_event(KTIME_MAX, force: 1); |
320 | } |
321 | |
322 | #ifdef CONFIG_NO_HZ_FULL |
323 | cpumask_var_t tick_nohz_full_mask; |
324 | EXPORT_SYMBOL_GPL(tick_nohz_full_mask); |
325 | bool tick_nohz_full_running; |
326 | EXPORT_SYMBOL_GPL(tick_nohz_full_running); |
327 | static atomic_t tick_dep_mask; |
328 | |
329 | static bool check_tick_dependency(atomic_t *dep) |
330 | { |
331 | int val = atomic_read(dep); |
332 | |
333 | if (val & TICK_DEP_MASK_POSIX_TIMER) { |
334 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
335 | return true; |
336 | } |
337 | |
338 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
339 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
340 | return true; |
341 | } |
342 | |
343 | if (val & TICK_DEP_MASK_SCHED) { |
344 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
345 | return true; |
346 | } |
347 | |
348 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
349 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
350 | return true; |
351 | } |
352 | |
353 | if (val & TICK_DEP_MASK_RCU) { |
354 | trace_tick_stop(0, TICK_DEP_MASK_RCU); |
355 | return true; |
356 | } |
357 | |
358 | if (val & TICK_DEP_MASK_RCU_EXP) { |
359 | trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); |
360 | return true; |
361 | } |
362 | |
363 | return false; |
364 | } |
365 | |
366 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
367 | { |
368 | lockdep_assert_irqs_disabled(); |
369 | |
370 | if (unlikely(!cpu_online(cpu))) |
371 | return false; |
372 | |
373 | if (check_tick_dependency(&tick_dep_mask)) |
374 | return false; |
375 | |
376 | if (check_tick_dependency(&ts->tick_dep_mask)) |
377 | return false; |
378 | |
379 | if (check_tick_dependency(¤t->tick_dep_mask)) |
380 | return false; |
381 | |
382 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
383 | return false; |
384 | |
385 | return true; |
386 | } |
387 | |
388 | static void nohz_full_kick_func(struct irq_work *work) |
389 | { |
390 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
391 | } |
392 | |
393 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = |
394 | IRQ_WORK_INIT_HARD(nohz_full_kick_func); |
395 | |
396 | /* |
397 | * Kick this CPU if it's full dynticks in order to force it to |
398 | * re-evaluate its dependency on the tick and restart it if necessary. |
399 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), |
400 | * is NMI safe. |
401 | */ |
402 | static void tick_nohz_full_kick(void) |
403 | { |
404 | if (!tick_nohz_full_cpu(smp_processor_id())) |
405 | return; |
406 | |
407 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
408 | } |
409 | |
410 | /* |
411 | * Kick the CPU if it's full dynticks in order to force it to |
412 | * re-evaluate its dependency on the tick and restart it if necessary. |
413 | */ |
414 | void tick_nohz_full_kick_cpu(int cpu) |
415 | { |
416 | if (!tick_nohz_full_cpu(cpu)) |
417 | return; |
418 | |
419 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); |
420 | } |
421 | |
422 | static void tick_nohz_kick_task(struct task_struct *tsk) |
423 | { |
424 | int cpu; |
425 | |
426 | /* |
427 | * If the task is not running, run_posix_cpu_timers() |
428 | * has nothing to elapse, and an IPI can then be optimized out. |
429 | * |
430 | * activate_task() STORE p->tick_dep_mask |
431 | * STORE p->on_rq |
432 | * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) |
433 | * LOCK rq->lock LOAD p->on_rq |
434 | * smp_mb__after_spin_lock() |
435 | * tick_nohz_task_switch() |
436 | * LOAD p->tick_dep_mask |
437 | */ |
438 | if (!sched_task_on_rq(tsk)) |
439 | return; |
440 | |
441 | /* |
442 | * If the task concurrently migrates to another CPU, |
443 | * we guarantee it sees the new tick dependency upon |
444 | * schedule. |
445 | * |
446 | * set_task_cpu(p, cpu); |
447 | * STORE p->cpu = @cpu |
448 | * __schedule() (switch to task 'p') |
449 | * LOCK rq->lock |
450 | * smp_mb__after_spin_lock() STORE p->tick_dep_mask |
451 | * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) |
452 | * LOAD p->tick_dep_mask LOAD p->cpu |
453 | */ |
454 | cpu = task_cpu(tsk); |
455 | |
456 | preempt_disable(); |
457 | if (cpu_online(cpu)) |
458 | tick_nohz_full_kick_cpu(cpu); |
459 | preempt_enable(); |
460 | } |
461 | |
462 | /* |
463 | * Kick all full dynticks CPUs in order to force these to re-evaluate |
464 | * their dependency on the tick and restart it if necessary. |
465 | */ |
466 | static void tick_nohz_full_kick_all(void) |
467 | { |
468 | int cpu; |
469 | |
470 | if (!tick_nohz_full_running) |
471 | return; |
472 | |
473 | preempt_disable(); |
474 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
475 | tick_nohz_full_kick_cpu(cpu); |
476 | preempt_enable(); |
477 | } |
478 | |
479 | static void tick_nohz_dep_set_all(atomic_t *dep, |
480 | enum tick_dep_bits bit) |
481 | { |
482 | int prev; |
483 | |
484 | prev = atomic_fetch_or(BIT(bit), dep); |
485 | if (!prev) |
486 | tick_nohz_full_kick_all(); |
487 | } |
488 | |
489 | /* |
490 | * Set a global tick dependency. Used by perf events that rely on freq and |
491 | * unstable clocks. |
492 | */ |
493 | void tick_nohz_dep_set(enum tick_dep_bits bit) |
494 | { |
495 | tick_nohz_dep_set_all(&tick_dep_mask, bit); |
496 | } |
497 | |
498 | void tick_nohz_dep_clear(enum tick_dep_bits bit) |
499 | { |
500 | atomic_andnot(BIT(bit), &tick_dep_mask); |
501 | } |
502 | |
503 | /* |
504 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to |
505 | * manage event-throttling. |
506 | */ |
507 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) |
508 | { |
509 | int prev; |
510 | struct tick_sched *ts; |
511 | |
512 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
513 | |
514 | prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); |
515 | if (!prev) { |
516 | preempt_disable(); |
517 | /* Perf needs local kick that is NMI safe */ |
518 | if (cpu == smp_processor_id()) { |
519 | tick_nohz_full_kick(); |
520 | } else { |
521 | /* Remote IRQ work not NMI-safe */ |
522 | if (!WARN_ON_ONCE(in_nmi())) |
523 | tick_nohz_full_kick_cpu(cpu); |
524 | } |
525 | preempt_enable(); |
526 | } |
527 | } |
528 | EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); |
529 | |
530 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) |
531 | { |
532 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
533 | |
534 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
535 | } |
536 | EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); |
537 | |
538 | /* |
539 | * Set a per-task tick dependency. RCU needs this. Also posix CPU timers |
540 | * in order to elapse per task timers. |
541 | */ |
542 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) |
543 | { |
544 | if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) |
545 | tick_nohz_kick_task(tsk); |
546 | } |
547 | EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); |
548 | |
549 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) |
550 | { |
551 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
552 | } |
553 | EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); |
554 | |
555 | /* |
556 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse |
557 | * per process timers. |
558 | */ |
559 | void tick_nohz_dep_set_signal(struct task_struct *tsk, |
560 | enum tick_dep_bits bit) |
561 | { |
562 | int prev; |
563 | struct signal_struct *sig = tsk->signal; |
564 | |
565 | prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); |
566 | if (!prev) { |
567 | struct task_struct *t; |
568 | |
569 | lockdep_assert_held(&tsk->sighand->siglock); |
570 | __for_each_thread(sig, t) |
571 | tick_nohz_kick_task(t); |
572 | } |
573 | } |
574 | |
575 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
576 | { |
577 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
578 | } |
579 | |
580 | /* |
581 | * Re-evaluate the need for the tick as we switch the current task. |
582 | * It might need the tick due to per task/process properties: |
583 | * perf events, posix CPU timers, ... |
584 | */ |
585 | void __tick_nohz_task_switch(void) |
586 | { |
587 | struct tick_sched *ts; |
588 | |
589 | if (!tick_nohz_full_cpu(smp_processor_id())) |
590 | return; |
591 | |
592 | ts = this_cpu_ptr(&tick_cpu_sched); |
593 | |
594 | if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
595 | if (atomic_read(¤t->tick_dep_mask) || |
596 | atomic_read(¤t->signal->tick_dep_mask)) |
597 | tick_nohz_full_kick(); |
598 | } |
599 | } |
600 | |
601 | /* Get the boot-time nohz CPU list from the kernel parameters. */ |
602 | void __init tick_nohz_full_setup(cpumask_var_t cpumask) |
603 | { |
604 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
605 | cpumask_copy(tick_nohz_full_mask, cpumask); |
606 | tick_nohz_full_running = true; |
607 | } |
608 | |
609 | bool tick_nohz_cpu_hotpluggable(unsigned int cpu) |
610 | { |
611 | /* |
612 | * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound |
613 | * timers, workqueues, timekeeping, ...) on behalf of full dynticks |
614 | * CPUs. It must remain online when nohz full is enabled. |
615 | */ |
616 | if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu) |
617 | return false; |
618 | return true; |
619 | } |
620 | |
621 | static int tick_nohz_cpu_down(unsigned int cpu) |
622 | { |
623 | return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; |
624 | } |
625 | |
626 | void __init tick_nohz_init(void) |
627 | { |
628 | int cpu, ret; |
629 | |
630 | if (!tick_nohz_full_running) |
631 | return; |
632 | |
633 | /* |
634 | * Full dynticks uses IRQ work to drive the tick rescheduling on safe |
635 | * locking contexts. But then we need IRQ work to raise its own |
636 | * interrupts to avoid circular dependency on the tick. |
637 | */ |
638 | if (!arch_irq_work_has_interrupt()) { |
639 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n" ); |
640 | cpumask_clear(tick_nohz_full_mask); |
641 | tick_nohz_full_running = false; |
642 | return; |
643 | } |
644 | |
645 | if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && |
646 | !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { |
647 | cpu = smp_processor_id(); |
648 | |
649 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { |
650 | pr_warn("NO_HZ: Clearing %d from nohz_full range " |
651 | "for timekeeping\n" , cpu); |
652 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
653 | } |
654 | } |
655 | |
656 | for_each_cpu(cpu, tick_nohz_full_mask) |
657 | ct_cpu_track_user(cpu); |
658 | |
659 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
660 | "kernel/nohz:predown" , NULL, |
661 | tick_nohz_cpu_down); |
662 | WARN_ON(ret < 0); |
663 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n" , |
664 | cpumask_pr_args(tick_nohz_full_mask)); |
665 | } |
666 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ |
667 | |
668 | /* |
669 | * NOHZ - aka dynamic tick functionality |
670 | */ |
671 | #ifdef CONFIG_NO_HZ_COMMON |
672 | /* |
673 | * NO HZ enabled ? |
674 | */ |
675 | bool tick_nohz_enabled __read_mostly = true; |
676 | unsigned long tick_nohz_active __read_mostly; |
677 | /* |
678 | * Enable / Disable tickless mode |
679 | */ |
680 | static int __init setup_tick_nohz(char *str) |
681 | { |
682 | return (kstrtobool(s: str, res: &tick_nohz_enabled) == 0); |
683 | } |
684 | |
685 | __setup("nohz=" , setup_tick_nohz); |
686 | |
687 | bool tick_nohz_tick_stopped(void) |
688 | { |
689 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
690 | |
691 | return tick_sched_flag_test(ts, TS_FLAG_STOPPED); |
692 | } |
693 | |
694 | bool tick_nohz_tick_stopped_cpu(int cpu) |
695 | { |
696 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
697 | |
698 | return tick_sched_flag_test(ts, TS_FLAG_STOPPED); |
699 | } |
700 | |
701 | /** |
702 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
703 | * @now: current ktime_t |
704 | * |
705 | * Called from interrupt entry when the CPU was idle |
706 | * |
707 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
708 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
709 | * value. We do this unconditionally on any CPU, as we don't know whether the |
710 | * CPU, which has the update task assigned, is in a long sleep. |
711 | */ |
712 | static void tick_nohz_update_jiffies(ktime_t now) |
713 | { |
714 | unsigned long flags; |
715 | |
716 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
717 | |
718 | local_irq_save(flags); |
719 | tick_do_update_jiffies64(now); |
720 | local_irq_restore(flags); |
721 | |
722 | touch_softlockup_watchdog_sched(); |
723 | } |
724 | |
725 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
726 | { |
727 | ktime_t delta; |
728 | |
729 | if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) |
730 | return; |
731 | |
732 | delta = ktime_sub(now, ts->idle_entrytime); |
733 | |
734 | write_seqcount_begin(&ts->idle_sleeptime_seq); |
735 | if (nr_iowait_cpu(smp_processor_id()) > 0) |
736 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
737 | else |
738 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
739 | |
740 | ts->idle_entrytime = now; |
741 | tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); |
742 | write_seqcount_end(&ts->idle_sleeptime_seq); |
743 | |
744 | sched_clock_idle_wakeup_event(); |
745 | } |
746 | |
747 | static void tick_nohz_start_idle(struct tick_sched *ts) |
748 | { |
749 | write_seqcount_begin(&ts->idle_sleeptime_seq); |
750 | ts->idle_entrytime = ktime_get(); |
751 | tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); |
752 | write_seqcount_end(&ts->idle_sleeptime_seq); |
753 | |
754 | sched_clock_idle_sleep_event(); |
755 | } |
756 | |
757 | static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, |
758 | bool compute_delta, u64 *last_update_time) |
759 | { |
760 | ktime_t now, idle; |
761 | unsigned int seq; |
762 | |
763 | if (!tick_nohz_active) |
764 | return -1; |
765 | |
766 | now = ktime_get(); |
767 | if (last_update_time) |
768 | *last_update_time = ktime_to_us(kt: now); |
769 | |
770 | do { |
771 | seq = read_seqcount_begin(&ts->idle_sleeptime_seq); |
772 | |
773 | if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { |
774 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
775 | |
776 | idle = ktime_add(*sleeptime, delta); |
777 | } else { |
778 | idle = *sleeptime; |
779 | } |
780 | } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); |
781 | |
782 | return ktime_to_us(kt: idle); |
783 | |
784 | } |
785 | |
786 | /** |
787 | * get_cpu_idle_time_us - get the total idle time of a CPU |
788 | * @cpu: CPU number to query |
789 | * @last_update_time: variable to store update time in. Do not update |
790 | * counters if NULL. |
791 | * |
792 | * Return the cumulative idle time (since boot) for a given |
793 | * CPU, in microseconds. Note that this is partially broken due to |
794 | * the counter of iowait tasks that can be remotely updated without |
795 | * any synchronization. Therefore it is possible to observe backward |
796 | * values within two consecutive reads. |
797 | * |
798 | * This time is measured via accounting rather than sampling, |
799 | * and is as accurate as ktime_get() is. |
800 | * |
801 | * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu |
802 | */ |
803 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
804 | { |
805 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
806 | |
807 | return get_cpu_sleep_time_us(ts, sleeptime: &ts->idle_sleeptime, |
808 | compute_delta: !nr_iowait_cpu(cpu), last_update_time); |
809 | } |
810 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
811 | |
812 | /** |
813 | * get_cpu_iowait_time_us - get the total iowait time of a CPU |
814 | * @cpu: CPU number to query |
815 | * @last_update_time: variable to store update time in. Do not update |
816 | * counters if NULL. |
817 | * |
818 | * Return the cumulative iowait time (since boot) for a given |
819 | * CPU, in microseconds. Note this is partially broken due to |
820 | * the counter of iowait tasks that can be remotely updated without |
821 | * any synchronization. Therefore it is possible to observe backward |
822 | * values within two consecutive reads. |
823 | * |
824 | * This time is measured via accounting rather than sampling, |
825 | * and is as accurate as ktime_get() is. |
826 | * |
827 | * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu |
828 | */ |
829 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) |
830 | { |
831 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
832 | |
833 | return get_cpu_sleep_time_us(ts, sleeptime: &ts->iowait_sleeptime, |
834 | compute_delta: nr_iowait_cpu(cpu), last_update_time); |
835 | } |
836 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
837 | |
838 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
839 | { |
840 | hrtimer_cancel(timer: &ts->sched_timer); |
841 | hrtimer_set_expires(timer: &ts->sched_timer, time: ts->last_tick); |
842 | |
843 | /* Forward the time to expire in the future */ |
844 | hrtimer_forward(timer: &ts->sched_timer, now, TICK_NSEC); |
845 | |
846 | if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { |
847 | hrtimer_start_expires(timer: &ts->sched_timer, |
848 | mode: HRTIMER_MODE_ABS_PINNED_HARD); |
849 | } else { |
850 | tick_program_event(expires: hrtimer_get_expires(timer: &ts->sched_timer), force: 1); |
851 | } |
852 | |
853 | /* |
854 | * Reset to make sure the next tick stop doesn't get fooled by past |
855 | * cached clock deadline. |
856 | */ |
857 | ts->next_tick = 0; |
858 | } |
859 | |
860 | static inline bool local_timer_softirq_pending(void) |
861 | { |
862 | return local_softirq_pending() & BIT(TIMER_SOFTIRQ); |
863 | } |
864 | |
865 | /* |
866 | * Read jiffies and the time when jiffies were updated last |
867 | */ |
868 | u64 get_jiffies_update(unsigned long *basej) |
869 | { |
870 | unsigned long basejiff; |
871 | unsigned int seq; |
872 | u64 basemono; |
873 | |
874 | do { |
875 | seq = read_seqcount_begin(&jiffies_seq); |
876 | basemono = last_jiffies_update; |
877 | basejiff = jiffies; |
878 | } while (read_seqcount_retry(&jiffies_seq, seq)); |
879 | *basej = basejiff; |
880 | return basemono; |
881 | } |
882 | |
883 | /** |
884 | * tick_nohz_next_event() - return the clock monotonic based next event |
885 | * @ts: pointer to tick_sched struct |
886 | * @cpu: CPU number |
887 | * |
888 | * Return: |
889 | * *%0 - When the next event is a maximum of TICK_NSEC in the future |
890 | * and the tick is not stopped yet |
891 | * *%next_event - Next event based on clock monotonic |
892 | */ |
893 | static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) |
894 | { |
895 | u64 basemono, next_tick, delta, expires; |
896 | unsigned long basejiff; |
897 | int tick_cpu; |
898 | |
899 | basemono = get_jiffies_update(basej: &basejiff); |
900 | ts->last_jiffies = basejiff; |
901 | ts->timer_expires_base = basemono; |
902 | |
903 | /* |
904 | * Keep the periodic tick, when RCU, architecture or irq_work |
905 | * requests it. |
906 | * Aside of that, check whether the local timer softirq is |
907 | * pending. If so, its a bad idea to call get_next_timer_interrupt(), |
908 | * because there is an already expired timer, so it will request |
909 | * immediate expiry, which rearms the hardware timer with a |
910 | * minimal delta, which brings us back to this place |
911 | * immediately. Lather, rinse and repeat... |
912 | */ |
913 | if (rcu_needs_cpu() || arch_needs_cpu() || |
914 | irq_work_needs_cpu() || local_timer_softirq_pending()) { |
915 | next_tick = basemono + TICK_NSEC; |
916 | } else { |
917 | /* |
918 | * Get the next pending timer. If high resolution |
919 | * timers are enabled this only takes the timer wheel |
920 | * timers into account. If high resolution timers are |
921 | * disabled this also looks at the next expiring |
922 | * hrtimer. |
923 | */ |
924 | next_tick = get_next_timer_interrupt(basej: basejiff, basem: basemono); |
925 | ts->next_timer = next_tick; |
926 | } |
927 | |
928 | /* Make sure next_tick is never before basemono! */ |
929 | if (WARN_ON_ONCE(basemono > next_tick)) |
930 | next_tick = basemono; |
931 | |
932 | /* |
933 | * If the tick is due in the next period, keep it ticking or |
934 | * force prod the timer. |
935 | */ |
936 | delta = next_tick - basemono; |
937 | if (delta <= (u64)TICK_NSEC) { |
938 | /* |
939 | * We've not stopped the tick yet, and there's a timer in the |
940 | * next period, so no point in stopping it either, bail. |
941 | */ |
942 | if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
943 | ts->timer_expires = 0; |
944 | goto out; |
945 | } |
946 | } |
947 | |
948 | /* |
949 | * If this CPU is the one which had the do_timer() duty last, we limit |
950 | * the sleep time to the timekeeping 'max_deferment' value. |
951 | * Otherwise we can sleep as long as we want. |
952 | */ |
953 | delta = timekeeping_max_deferment(); |
954 | tick_cpu = READ_ONCE(tick_do_timer_cpu); |
955 | if (tick_cpu != cpu && |
956 | (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) |
957 | delta = KTIME_MAX; |
958 | |
959 | /* Calculate the next expiry time */ |
960 | if (delta < (KTIME_MAX - basemono)) |
961 | expires = basemono + delta; |
962 | else |
963 | expires = KTIME_MAX; |
964 | |
965 | ts->timer_expires = min_t(u64, expires, next_tick); |
966 | |
967 | out: |
968 | return ts->timer_expires; |
969 | } |
970 | |
971 | static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) |
972 | { |
973 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
974 | unsigned long basejiff = ts->last_jiffies; |
975 | u64 basemono = ts->timer_expires_base; |
976 | bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); |
977 | int tick_cpu; |
978 | u64 expires; |
979 | |
980 | /* Make sure we won't be trying to stop it twice in a row. */ |
981 | ts->timer_expires_base = 0; |
982 | |
983 | /* |
984 | * Now the tick should be stopped definitely - so the timer base needs |
985 | * to be marked idle as well to not miss a newly queued timer. |
986 | */ |
987 | expires = timer_base_try_to_set_idle(basej: basejiff, basem: basemono, idle: &timer_idle); |
988 | if (expires > ts->timer_expires) { |
989 | /* |
990 | * This path could only happen when the first timer was removed |
991 | * between calculating the possible sleep length and now (when |
992 | * high resolution mode is not active, timer could also be a |
993 | * hrtimer). |
994 | * |
995 | * We have to stick to the original calculated expiry value to |
996 | * not stop the tick for too long with a shallow C-state (which |
997 | * was programmed by cpuidle because of an early next expiration |
998 | * value). |
999 | */ |
1000 | expires = ts->timer_expires; |
1001 | } |
1002 | |
1003 | /* If the timer base is not idle, retain the not yet stopped tick. */ |
1004 | if (!timer_idle) |
1005 | return; |
1006 | |
1007 | /* |
1008 | * If this CPU is the one which updates jiffies, then give up |
1009 | * the assignment and let it be taken by the CPU which runs |
1010 | * the tick timer next, which might be this CPU as well. If we |
1011 | * don't drop this here, the jiffies might be stale and |
1012 | * do_timer() never gets invoked. Keep track of the fact that it |
1013 | * was the one which had the do_timer() duty last. |
1014 | */ |
1015 | tick_cpu = READ_ONCE(tick_do_timer_cpu); |
1016 | if (tick_cpu == cpu) { |
1017 | WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE); |
1018 | tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); |
1019 | } else if (tick_cpu != TICK_DO_TIMER_NONE) { |
1020 | tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); |
1021 | } |
1022 | |
1023 | /* Skip reprogram of event if it's not changed */ |
1024 | if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { |
1025 | /* Sanity check: make sure clockevent is actually programmed */ |
1026 | if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(timer: &ts->sched_timer)) |
1027 | return; |
1028 | |
1029 | WARN_ON_ONCE(1); |
1030 | printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n" , |
1031 | basemono, ts->next_tick, dev->next_event, |
1032 | hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); |
1033 | } |
1034 | |
1035 | /* |
1036 | * tick_nohz_stop_tick() can be called several times before |
1037 | * tick_nohz_restart_sched_tick() is called. This happens when |
1038 | * interrupts arrive which do not cause a reschedule. In the first |
1039 | * call we save the current tick time, so we can restart the |
1040 | * scheduler tick in tick_nohz_restart_sched_tick(). |
1041 | */ |
1042 | if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
1043 | calc_load_nohz_start(); |
1044 | quiet_vmstat(); |
1045 | |
1046 | ts->last_tick = hrtimer_get_expires(timer: &ts->sched_timer); |
1047 | tick_sched_flag_set(ts, TS_FLAG_STOPPED); |
1048 | trace_tick_stop(success: 1, TICK_DEP_MASK_NONE); |
1049 | } |
1050 | |
1051 | ts->next_tick = expires; |
1052 | |
1053 | /* |
1054 | * If the expiration time == KTIME_MAX, then we simply stop |
1055 | * the tick timer. |
1056 | */ |
1057 | if (unlikely(expires == KTIME_MAX)) { |
1058 | tick_sched_timer_cancel(ts); |
1059 | return; |
1060 | } |
1061 | |
1062 | if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { |
1063 | hrtimer_start(timer: &ts->sched_timer, tim: expires, |
1064 | mode: HRTIMER_MODE_ABS_PINNED_HARD); |
1065 | } else { |
1066 | hrtimer_set_expires(timer: &ts->sched_timer, time: expires); |
1067 | tick_program_event(expires, force: 1); |
1068 | } |
1069 | } |
1070 | |
1071 | static void tick_nohz_retain_tick(struct tick_sched *ts) |
1072 | { |
1073 | ts->timer_expires_base = 0; |
1074 | } |
1075 | |
1076 | #ifdef CONFIG_NO_HZ_FULL |
1077 | static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) |
1078 | { |
1079 | if (tick_nohz_next_event(ts, cpu)) |
1080 | tick_nohz_stop_tick(ts, cpu); |
1081 | else |
1082 | tick_nohz_retain_tick(ts); |
1083 | } |
1084 | #endif /* CONFIG_NO_HZ_FULL */ |
1085 | |
1086 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
1087 | { |
1088 | /* Update jiffies first */ |
1089 | tick_do_update_jiffies64(now); |
1090 | |
1091 | /* |
1092 | * Clear the timer idle flag, so we avoid IPIs on remote queueing and |
1093 | * the clock forward checks in the enqueue path: |
1094 | */ |
1095 | timer_clear_idle(); |
1096 | |
1097 | calc_load_nohz_stop(); |
1098 | touch_softlockup_watchdog_sched(); |
1099 | |
1100 | /* Cancel the scheduled timer and restore the tick: */ |
1101 | tick_sched_flag_clear(ts, TS_FLAG_STOPPED); |
1102 | tick_nohz_restart(ts, now); |
1103 | } |
1104 | |
1105 | static void __tick_nohz_full_update_tick(struct tick_sched *ts, |
1106 | ktime_t now) |
1107 | { |
1108 | #ifdef CONFIG_NO_HZ_FULL |
1109 | int cpu = smp_processor_id(); |
1110 | |
1111 | if (can_stop_full_tick(cpu, ts)) |
1112 | tick_nohz_full_stop_tick(ts, cpu); |
1113 | else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) |
1114 | tick_nohz_restart_sched_tick(ts, now); |
1115 | #endif |
1116 | } |
1117 | |
1118 | static void tick_nohz_full_update_tick(struct tick_sched *ts) |
1119 | { |
1120 | if (!tick_nohz_full_cpu(smp_processor_id())) |
1121 | return; |
1122 | |
1123 | if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) |
1124 | return; |
1125 | |
1126 | __tick_nohz_full_update_tick(ts, now: ktime_get()); |
1127 | } |
1128 | |
1129 | /* |
1130 | * A pending softirq outside an IRQ (or softirq disabled section) context |
1131 | * should be waiting for ksoftirqd to handle it. Therefore we shouldn't |
1132 | * reach this code due to the need_resched() early check in can_stop_idle_tick(). |
1133 | * |
1134 | * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the |
1135 | * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, |
1136 | * triggering the code below, since wakep_softirqd() is ignored. |
1137 | * |
1138 | */ |
1139 | static bool report_idle_softirq(void) |
1140 | { |
1141 | static int ratelimit; |
1142 | unsigned int pending = local_softirq_pending(); |
1143 | |
1144 | if (likely(!pending)) |
1145 | return false; |
1146 | |
1147 | /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ |
1148 | if (!cpu_active(smp_processor_id())) { |
1149 | pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; |
1150 | if (!pending) |
1151 | return false; |
1152 | } |
1153 | |
1154 | if (ratelimit >= 10) |
1155 | return false; |
1156 | |
1157 | /* On RT, softirq handling may be waiting on some lock */ |
1158 | if (local_bh_blocked()) |
1159 | return false; |
1160 | |
1161 | pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n" , |
1162 | pending); |
1163 | ratelimit++; |
1164 | |
1165 | return true; |
1166 | } |
1167 | |
1168 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
1169 | { |
1170 | WARN_ON_ONCE(cpu_is_offline(cpu)); |
1171 | |
1172 | if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) |
1173 | return false; |
1174 | |
1175 | if (need_resched()) |
1176 | return false; |
1177 | |
1178 | if (unlikely(report_idle_softirq())) |
1179 | return false; |
1180 | |
1181 | if (tick_nohz_full_enabled()) { |
1182 | int tick_cpu = READ_ONCE(tick_do_timer_cpu); |
1183 | |
1184 | /* |
1185 | * Keep the tick alive to guarantee timekeeping progression |
1186 | * if there are full dynticks CPUs around |
1187 | */ |
1188 | if (tick_cpu == cpu) |
1189 | return false; |
1190 | |
1191 | /* Should not happen for nohz-full */ |
1192 | if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE)) |
1193 | return false; |
1194 | } |
1195 | |
1196 | return true; |
1197 | } |
1198 | |
1199 | /** |
1200 | * tick_nohz_idle_stop_tick - stop the idle tick from the idle task |
1201 | * |
1202 | * When the next event is more than a tick into the future, stop the idle tick |
1203 | */ |
1204 | void tick_nohz_idle_stop_tick(void) |
1205 | { |
1206 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1207 | int cpu = smp_processor_id(); |
1208 | ktime_t expires; |
1209 | |
1210 | /* |
1211 | * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the |
1212 | * tick timer expiration time is known already. |
1213 | */ |
1214 | if (ts->timer_expires_base) |
1215 | expires = ts->timer_expires; |
1216 | else if (can_stop_idle_tick(cpu, ts)) |
1217 | expires = tick_nohz_next_event(ts, cpu); |
1218 | else |
1219 | return; |
1220 | |
1221 | ts->idle_calls++; |
1222 | |
1223 | if (expires > 0LL) { |
1224 | int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); |
1225 | |
1226 | tick_nohz_stop_tick(ts, cpu); |
1227 | |
1228 | ts->idle_sleeps++; |
1229 | ts->idle_expires = expires; |
1230 | |
1231 | if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
1232 | ts->idle_jiffies = ts->last_jiffies; |
1233 | nohz_balance_enter_idle(cpu); |
1234 | } |
1235 | } else { |
1236 | tick_nohz_retain_tick(ts); |
1237 | } |
1238 | } |
1239 | |
1240 | void tick_nohz_idle_retain_tick(void) |
1241 | { |
1242 | tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); |
1243 | } |
1244 | |
1245 | /** |
1246 | * tick_nohz_idle_enter - prepare for entering idle on the current CPU |
1247 | * |
1248 | * Called when we start the idle loop. |
1249 | */ |
1250 | void tick_nohz_idle_enter(void) |
1251 | { |
1252 | struct tick_sched *ts; |
1253 | |
1254 | lockdep_assert_irqs_enabled(); |
1255 | |
1256 | local_irq_disable(); |
1257 | |
1258 | ts = this_cpu_ptr(&tick_cpu_sched); |
1259 | |
1260 | WARN_ON_ONCE(ts->timer_expires_base); |
1261 | |
1262 | tick_sched_flag_set(ts, TS_FLAG_INIDLE); |
1263 | tick_nohz_start_idle(ts); |
1264 | |
1265 | local_irq_enable(); |
1266 | } |
1267 | |
1268 | /** |
1269 | * tick_nohz_irq_exit - Notify the tick about IRQ exit |
1270 | * |
1271 | * A timer may have been added/modified/deleted either by the current IRQ, |
1272 | * or by another place using this IRQ as a notification. This IRQ may have |
1273 | * also updated the RCU callback list. These events may require a |
1274 | * re-evaluation of the next tick. Depending on the context: |
1275 | * |
1276 | * 1) If the CPU is idle and no resched is pending, just proceed with idle |
1277 | * time accounting. The next tick will be re-evaluated on the next idle |
1278 | * loop iteration. |
1279 | * |
1280 | * 2) If the CPU is nohz_full: |
1281 | * |
1282 | * 2.1) If there is any tick dependency, restart the tick if stopped. |
1283 | * |
1284 | * 2.2) If there is no tick dependency, (re-)evaluate the next tick and |
1285 | * stop/update it accordingly. |
1286 | */ |
1287 | void tick_nohz_irq_exit(void) |
1288 | { |
1289 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1290 | |
1291 | if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) |
1292 | tick_nohz_start_idle(ts); |
1293 | else |
1294 | tick_nohz_full_update_tick(ts); |
1295 | } |
1296 | |
1297 | /** |
1298 | * tick_nohz_idle_got_tick - Check whether or not the tick handler has run |
1299 | * |
1300 | * Return: %true if the tick handler has run, otherwise %false |
1301 | */ |
1302 | bool tick_nohz_idle_got_tick(void) |
1303 | { |
1304 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1305 | |
1306 | if (ts->got_idle_tick) { |
1307 | ts->got_idle_tick = 0; |
1308 | return true; |
1309 | } |
1310 | return false; |
1311 | } |
1312 | |
1313 | /** |
1314 | * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer |
1315 | * or the tick, whichever expires first. Note that, if the tick has been |
1316 | * stopped, it returns the next hrtimer. |
1317 | * |
1318 | * Called from power state control code with interrupts disabled |
1319 | * |
1320 | * Return: the next expiration time |
1321 | */ |
1322 | ktime_t tick_nohz_get_next_hrtimer(void) |
1323 | { |
1324 | return __this_cpu_read(tick_cpu_device.evtdev)->next_event; |
1325 | } |
1326 | |
1327 | /** |
1328 | * tick_nohz_get_sleep_length - return the expected length of the current sleep |
1329 | * @delta_next: duration until the next event if the tick cannot be stopped |
1330 | * |
1331 | * Called from power state control code with interrupts disabled. |
1332 | * |
1333 | * The return value of this function and/or the value returned by it through the |
1334 | * @delta_next pointer can be negative which must be taken into account by its |
1335 | * callers. |
1336 | * |
1337 | * Return: the expected length of the current sleep |
1338 | */ |
1339 | ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) |
1340 | { |
1341 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
1342 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1343 | int cpu = smp_processor_id(); |
1344 | /* |
1345 | * The idle entry time is expected to be a sufficient approximation of |
1346 | * the current time at this point. |
1347 | */ |
1348 | ktime_t now = ts->idle_entrytime; |
1349 | ktime_t next_event; |
1350 | |
1351 | WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); |
1352 | |
1353 | *delta_next = ktime_sub(dev->next_event, now); |
1354 | |
1355 | if (!can_stop_idle_tick(cpu, ts)) |
1356 | return *delta_next; |
1357 | |
1358 | next_event = tick_nohz_next_event(ts, cpu); |
1359 | if (!next_event) |
1360 | return *delta_next; |
1361 | |
1362 | /* |
1363 | * If the next highres timer to expire is earlier than 'next_event', the |
1364 | * idle governor needs to know that. |
1365 | */ |
1366 | next_event = min_t(u64, next_event, |
1367 | hrtimer_next_event_without(&ts->sched_timer)); |
1368 | |
1369 | return ktime_sub(next_event, now); |
1370 | } |
1371 | |
1372 | /** |
1373 | * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value |
1374 | * for a particular CPU. |
1375 | * @cpu: target CPU number |
1376 | * |
1377 | * Called from the schedutil frequency scaling governor in scheduler context. |
1378 | * |
1379 | * Return: the current idle calls counter value for @cpu |
1380 | */ |
1381 | unsigned long tick_nohz_get_idle_calls_cpu(int cpu) |
1382 | { |
1383 | struct tick_sched *ts = tick_get_tick_sched(cpu); |
1384 | |
1385 | return ts->idle_calls; |
1386 | } |
1387 | |
1388 | /** |
1389 | * tick_nohz_get_idle_calls - return the current idle calls counter value |
1390 | * |
1391 | * Called from the schedutil frequency scaling governor in scheduler context. |
1392 | * |
1393 | * Return: the current idle calls counter value for the current CPU |
1394 | */ |
1395 | unsigned long tick_nohz_get_idle_calls(void) |
1396 | { |
1397 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1398 | |
1399 | return ts->idle_calls; |
1400 | } |
1401 | |
1402 | static void tick_nohz_account_idle_time(struct tick_sched *ts, |
1403 | ktime_t now) |
1404 | { |
1405 | unsigned long ticks; |
1406 | |
1407 | ts->idle_exittime = now; |
1408 | |
1409 | if (vtime_accounting_enabled_this_cpu()) |
1410 | return; |
1411 | /* |
1412 | * We stopped the tick in idle. update_process_times() would miss the |
1413 | * time we slept, as it does only a 1 tick accounting. |
1414 | * Enforce that this is accounted to idle ! |
1415 | */ |
1416 | ticks = jiffies - ts->idle_jiffies; |
1417 | /* |
1418 | * We might be one off. Do not randomly account a huge number of ticks! |
1419 | */ |
1420 | if (ticks && ticks < LONG_MAX) |
1421 | account_idle_ticks(ticks); |
1422 | } |
1423 | |
1424 | void tick_nohz_idle_restart_tick(void) |
1425 | { |
1426 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1427 | |
1428 | if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { |
1429 | ktime_t now = ktime_get(); |
1430 | tick_nohz_restart_sched_tick(ts, now); |
1431 | tick_nohz_account_idle_time(ts, now); |
1432 | } |
1433 | } |
1434 | |
1435 | static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) |
1436 | { |
1437 | if (tick_nohz_full_cpu(smp_processor_id())) |
1438 | __tick_nohz_full_update_tick(ts, now); |
1439 | else |
1440 | tick_nohz_restart_sched_tick(ts, now); |
1441 | |
1442 | tick_nohz_account_idle_time(ts, now); |
1443 | } |
1444 | |
1445 | /** |
1446 | * tick_nohz_idle_exit - Update the tick upon idle task exit |
1447 | * |
1448 | * When the idle task exits, update the tick depending on the |
1449 | * following situations: |
1450 | * |
1451 | * 1) If the CPU is not in nohz_full mode (most cases), then |
1452 | * restart the tick. |
1453 | * |
1454 | * 2) If the CPU is in nohz_full mode (corner case): |
1455 | * 2.1) If the tick can be kept stopped (no tick dependencies) |
1456 | * then re-evaluate the next tick and try to keep it stopped |
1457 | * as long as possible. |
1458 | * 2.2) If the tick has dependencies, restart the tick. |
1459 | * |
1460 | */ |
1461 | void tick_nohz_idle_exit(void) |
1462 | { |
1463 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1464 | bool idle_active, tick_stopped; |
1465 | ktime_t now; |
1466 | |
1467 | local_irq_disable(); |
1468 | |
1469 | WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); |
1470 | WARN_ON_ONCE(ts->timer_expires_base); |
1471 | |
1472 | tick_sched_flag_clear(ts, TS_FLAG_INIDLE); |
1473 | idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE); |
1474 | tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); |
1475 | |
1476 | if (idle_active || tick_stopped) |
1477 | now = ktime_get(); |
1478 | |
1479 | if (idle_active) |
1480 | tick_nohz_stop_idle(ts, now); |
1481 | |
1482 | if (tick_stopped) |
1483 | tick_nohz_idle_update_tick(ts, now); |
1484 | |
1485 | local_irq_enable(); |
1486 | } |
1487 | |
1488 | /* |
1489 | * In low-resolution mode, the tick handler must be implemented directly |
1490 | * at the clockevent level. hrtimer can't be used instead, because its |
1491 | * infrastructure actually relies on the tick itself as a backend in |
1492 | * low-resolution mode (see hrtimer_run_queues()). |
1493 | */ |
1494 | static void tick_nohz_lowres_handler(struct clock_event_device *dev) |
1495 | { |
1496 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1497 | |
1498 | dev->next_event = KTIME_MAX; |
1499 | |
1500 | if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) |
1501 | tick_program_event(expires: hrtimer_get_expires(timer: &ts->sched_timer), force: 1); |
1502 | } |
1503 | |
1504 | static inline void tick_nohz_activate(struct tick_sched *ts) |
1505 | { |
1506 | if (!tick_nohz_enabled) |
1507 | return; |
1508 | tick_sched_flag_set(ts, TS_FLAG_NOHZ); |
1509 | /* One update is enough */ |
1510 | if (!test_and_set_bit(nr: 0, addr: &tick_nohz_active)) |
1511 | timers_update_nohz(); |
1512 | } |
1513 | |
1514 | /** |
1515 | * tick_nohz_switch_to_nohz - switch to NOHZ mode |
1516 | */ |
1517 | static void tick_nohz_switch_to_nohz(void) |
1518 | { |
1519 | if (!tick_nohz_enabled) |
1520 | return; |
1521 | |
1522 | if (tick_switch_to_oneshot(handler: tick_nohz_lowres_handler)) |
1523 | return; |
1524 | |
1525 | /* |
1526 | * Recycle the hrtimer in 'ts', so we can share the |
1527 | * highres code. |
1528 | */ |
1529 | tick_setup_sched_timer(hrtimer: false); |
1530 | } |
1531 | |
1532 | static inline void tick_nohz_irq_enter(void) |
1533 | { |
1534 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1535 | ktime_t now; |
1536 | |
1537 | if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE)) |
1538 | return; |
1539 | now = ktime_get(); |
1540 | if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) |
1541 | tick_nohz_stop_idle(ts, now); |
1542 | /* |
1543 | * If all CPUs are idle we may need to update a stale jiffies value. |
1544 | * Note nohz_full is a special case: a timekeeper is guaranteed to stay |
1545 | * alive but it might be busy looping with interrupts disabled in some |
1546 | * rare case (typically stop machine). So we must make sure we have a |
1547 | * last resort. |
1548 | */ |
1549 | if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) |
1550 | tick_nohz_update_jiffies(now); |
1551 | } |
1552 | |
1553 | #else |
1554 | |
1555 | static inline void tick_nohz_switch_to_nohz(void) { } |
1556 | static inline void tick_nohz_irq_enter(void) { } |
1557 | static inline void tick_nohz_activate(struct tick_sched *ts) { } |
1558 | |
1559 | #endif /* CONFIG_NO_HZ_COMMON */ |
1560 | |
1561 | /* |
1562 | * Called from irq_enter() to notify about the possible interruption of idle() |
1563 | */ |
1564 | void tick_irq_enter(void) |
1565 | { |
1566 | tick_check_oneshot_broadcast_this_cpu(); |
1567 | tick_nohz_irq_enter(); |
1568 | } |
1569 | |
1570 | static int sched_skew_tick; |
1571 | |
1572 | static int __init skew_tick(char *str) |
1573 | { |
1574 | get_option(str: &str, pint: &sched_skew_tick); |
1575 | |
1576 | return 0; |
1577 | } |
1578 | early_param("skew_tick" , skew_tick); |
1579 | |
1580 | /** |
1581 | * tick_setup_sched_timer - setup the tick emulation timer |
1582 | * @hrtimer: whether to use the hrtimer or not |
1583 | */ |
1584 | void tick_setup_sched_timer(bool hrtimer) |
1585 | { |
1586 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1587 | |
1588 | /* Emulate tick processing via per-CPU hrtimers: */ |
1589 | hrtimer_init(timer: &ts->sched_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_ABS_HARD); |
1590 | |
1591 | if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) { |
1592 | tick_sched_flag_set(ts, TS_FLAG_HIGHRES); |
1593 | ts->sched_timer.function = tick_nohz_handler; |
1594 | } |
1595 | |
1596 | /* Get the next period (per-CPU) */ |
1597 | hrtimer_set_expires(timer: &ts->sched_timer, time: tick_init_jiffy_update()); |
1598 | |
1599 | /* Offset the tick to avert 'jiffies_lock' contention. */ |
1600 | if (sched_skew_tick) { |
1601 | u64 offset = TICK_NSEC >> 1; |
1602 | do_div(offset, num_possible_cpus()); |
1603 | offset *= smp_processor_id(); |
1604 | hrtimer_add_expires_ns(timer: &ts->sched_timer, ns: offset); |
1605 | } |
1606 | |
1607 | hrtimer_forward_now(timer: &ts->sched_timer, TICK_NSEC); |
1608 | if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) |
1609 | hrtimer_start_expires(timer: &ts->sched_timer, mode: HRTIMER_MODE_ABS_PINNED_HARD); |
1610 | else |
1611 | tick_program_event(expires: hrtimer_get_expires(timer: &ts->sched_timer), force: 1); |
1612 | tick_nohz_activate(ts); |
1613 | } |
1614 | |
1615 | /* |
1616 | * Shut down the tick and make sure the CPU won't try to retake the timekeeping |
1617 | * duty before disabling IRQs in idle for the last time. |
1618 | */ |
1619 | void tick_sched_timer_dying(int cpu) |
1620 | { |
1621 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
1622 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
1623 | struct clock_event_device *dev = td->evtdev; |
1624 | ktime_t idle_sleeptime, iowait_sleeptime; |
1625 | unsigned long idle_calls, idle_sleeps; |
1626 | |
1627 | /* This must happen before hrtimers are migrated! */ |
1628 | tick_sched_timer_cancel(ts); |
1629 | |
1630 | /* |
1631 | * If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED, |
1632 | * make sure not to call low-res tick handler. |
1633 | */ |
1634 | if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) |
1635 | dev->event_handler = clockevents_handle_noop; |
1636 | |
1637 | idle_sleeptime = ts->idle_sleeptime; |
1638 | iowait_sleeptime = ts->iowait_sleeptime; |
1639 | idle_calls = ts->idle_calls; |
1640 | idle_sleeps = ts->idle_sleeps; |
1641 | memset(ts, 0, sizeof(*ts)); |
1642 | ts->idle_sleeptime = idle_sleeptime; |
1643 | ts->iowait_sleeptime = iowait_sleeptime; |
1644 | ts->idle_calls = idle_calls; |
1645 | ts->idle_sleeps = idle_sleeps; |
1646 | } |
1647 | |
1648 | /* |
1649 | * Async notification about clocksource changes |
1650 | */ |
1651 | void tick_clock_notify(void) |
1652 | { |
1653 | int cpu; |
1654 | |
1655 | for_each_possible_cpu(cpu) |
1656 | set_bit(nr: 0, addr: &per_cpu(tick_cpu_sched, cpu).check_clocks); |
1657 | } |
1658 | |
1659 | /* |
1660 | * Async notification about clock event changes |
1661 | */ |
1662 | void tick_oneshot_notify(void) |
1663 | { |
1664 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1665 | |
1666 | set_bit(nr: 0, addr: &ts->check_clocks); |
1667 | } |
1668 | |
1669 | /* |
1670 | * Check if a change happened, which makes oneshot possible. |
1671 | * |
1672 | * Called cyclically from the hrtimer softirq (driven by the timer |
1673 | * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ |
1674 | * mode, because high resolution timers are disabled (either compile |
1675 | * or runtime). Called with interrupts disabled. |
1676 | */ |
1677 | int tick_check_oneshot_change(int allow_nohz) |
1678 | { |
1679 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1680 | |
1681 | if (!test_and_clear_bit(nr: 0, addr: &ts->check_clocks)) |
1682 | return 0; |
1683 | |
1684 | if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) |
1685 | return 0; |
1686 | |
1687 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
1688 | return 0; |
1689 | |
1690 | if (!allow_nohz) |
1691 | return 1; |
1692 | |
1693 | tick_nohz_switch_to_nohz(); |
1694 | return 0; |
1695 | } |
1696 | |