1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 * tasks which are handled in sched/fair.c )
8 */
9
10/* Linker adds these: start and end of __cpuidle functions */
11extern char __cpuidle_text_start[], __cpuidle_text_end[];
12
13/**
14 * sched_idle_set_state - Record idle state for the current CPU.
15 * @idle_state: State to record.
16 */
17void sched_idle_set_state(struct cpuidle_state *idle_state)
18{
19 idle_set_state(this_rq(), idle_state);
20}
21
22static int __read_mostly cpu_idle_force_poll;
23
24void cpu_idle_poll_ctrl(bool enable)
25{
26 if (enable) {
27 cpu_idle_force_poll++;
28 } else {
29 cpu_idle_force_poll--;
30 WARN_ON_ONCE(cpu_idle_force_poll < 0);
31 }
32}
33
34#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
35static int __init cpu_idle_poll_setup(char *__unused)
36{
37 cpu_idle_force_poll = 1;
38
39 return 1;
40}
41__setup("nohlt", cpu_idle_poll_setup);
42
43static int __init cpu_idle_nopoll_setup(char *__unused)
44{
45 cpu_idle_force_poll = 0;
46
47 return 1;
48}
49__setup("hlt", cpu_idle_nopoll_setup);
50#endif
51
52static noinline int __cpuidle cpu_idle_poll(void)
53{
54 instrumentation_begin();
55 trace_cpu_idle(state: 0, smp_processor_id());
56 stop_critical_timings();
57 ct_cpuidle_enter();
58
59 raw_local_irq_enable();
60 while (!tif_need_resched() &&
61 (cpu_idle_force_poll || tick_check_broadcast_expired()))
62 cpu_relax();
63 raw_local_irq_disable();
64
65 ct_cpuidle_exit();
66 start_critical_timings();
67 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
68 local_irq_enable();
69 instrumentation_end();
70
71 return 1;
72}
73
74/* Weak implementations for optional arch specific functions */
75void __weak arch_cpu_idle_prepare(void) { }
76void __weak arch_cpu_idle_enter(void) { }
77void __weak arch_cpu_idle_exit(void) { }
78void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
79void __weak arch_cpu_idle(void)
80{
81 cpu_idle_force_poll = 1;
82}
83
84/**
85 * default_idle_call - Default CPU idle routine.
86 *
87 * To use when the cpuidle framework cannot be used.
88 */
89void __cpuidle default_idle_call(void)
90{
91 instrumentation_begin();
92 if (!current_clr_polling_and_test()) {
93 trace_cpu_idle(state: 1, smp_processor_id());
94 stop_critical_timings();
95
96 ct_cpuidle_enter();
97 arch_cpu_idle();
98 ct_cpuidle_exit();
99
100 start_critical_timings();
101 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
102 }
103 local_irq_enable();
104 instrumentation_end();
105}
106
107static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
108 struct cpuidle_device *dev)
109{
110 if (current_clr_polling_and_test())
111 return -EBUSY;
112
113 return cpuidle_enter_s2idle(drv, dev);
114}
115
116static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
117 int next_state)
118{
119 /*
120 * The idle task must be scheduled, it is pointless to go to idle, just
121 * update no idle residency and return.
122 */
123 if (current_clr_polling_and_test()) {
124 dev->last_residency_ns = 0;
125 local_irq_enable();
126 return -EBUSY;
127 }
128
129 /*
130 * Enter the idle state previously returned by the governor decision.
131 * This function will block until an interrupt occurs and will take
132 * care of re-enabling the local interrupts
133 */
134 return cpuidle_enter(drv, dev, index: next_state);
135}
136
137/**
138 * cpuidle_idle_call - the main idle function
139 *
140 * NOTE: no locks or semaphores should be used here
141 *
142 * On architectures that support TIF_POLLING_NRFLAG, is called with polling
143 * set, and it returns with polling set. If it ever stops polling, it
144 * must clear the polling bit.
145 */
146static void cpuidle_idle_call(void)
147{
148 struct cpuidle_device *dev = cpuidle_get_device();
149 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
150 int next_state, entered_state;
151
152 /*
153 * Check if the idle task must be rescheduled. If it is the
154 * case, exit the function after re-enabling the local irq.
155 */
156 if (need_resched()) {
157 local_irq_enable();
158 return;
159 }
160
161 /*
162 * The RCU framework needs to be told that we are entering an idle
163 * section, so no more rcu read side critical sections and one more
164 * step to the grace period
165 */
166
167 if (cpuidle_not_available(drv, dev)) {
168 tick_nohz_idle_stop_tick();
169
170 default_idle_call();
171 goto exit_idle;
172 }
173
174 /*
175 * Suspend-to-idle ("s2idle") is a system state in which all user space
176 * has been frozen, all I/O devices have been suspended and the only
177 * activity happens here and in interrupts (if any). In that case bypass
178 * the cpuidle governor and go straight for the deepest idle state
179 * available. Possibly also suspend the local tick and the entire
180 * timekeeping to prevent timer interrupts from kicking us out of idle
181 * until a proper wakeup interrupt happens.
182 */
183
184 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
185 u64 max_latency_ns;
186
187 if (idle_should_enter_s2idle()) {
188
189 entered_state = call_cpuidle_s2idle(drv, dev);
190 if (entered_state > 0)
191 goto exit_idle;
192
193 max_latency_ns = U64_MAX;
194 } else {
195 max_latency_ns = dev->forced_idle_latency_limit_ns;
196 }
197
198 tick_nohz_idle_stop_tick();
199
200 next_state = cpuidle_find_deepest_state(drv, dev, latency_limit_ns: max_latency_ns);
201 call_cpuidle(drv, dev, next_state);
202 } else {
203 bool stop_tick = true;
204
205 /*
206 * Ask the cpuidle framework to choose a convenient idle state.
207 */
208 next_state = cpuidle_select(drv, dev, stop_tick: &stop_tick);
209
210 if (stop_tick || tick_nohz_tick_stopped())
211 tick_nohz_idle_stop_tick();
212 else
213 tick_nohz_idle_retain_tick();
214
215 entered_state = call_cpuidle(drv, dev, next_state);
216 /*
217 * Give the governor an opportunity to reflect on the outcome
218 */
219 cpuidle_reflect(dev, index: entered_state);
220 }
221
222exit_idle:
223 __current_set_polling();
224
225 /*
226 * It is up to the idle functions to reenable local interrupts
227 */
228 if (WARN_ON_ONCE(irqs_disabled()))
229 local_irq_enable();
230}
231
232/*
233 * Generic idle loop implementation
234 *
235 * Called with polling cleared.
236 */
237static void do_idle(void)
238{
239 int cpu = smp_processor_id();
240
241 /*
242 * Check if we need to update blocked load
243 */
244 nohz_run_idle_balance(cpu);
245
246 /*
247 * If the arch has a polling bit, we maintain an invariant:
248 *
249 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
250 * rq->idle). This means that, if rq->idle has the polling bit set,
251 * then setting need_resched is guaranteed to cause the CPU to
252 * reschedule.
253 */
254
255 __current_set_polling();
256 tick_nohz_idle_enter();
257
258 while (!need_resched()) {
259 rmb();
260
261 local_irq_disable();
262
263 if (cpu_is_offline(cpu)) {
264 tick_nohz_idle_stop_tick();
265 cpuhp_report_idle_dead();
266 arch_cpu_idle_dead();
267 }
268
269 arch_cpu_idle_enter();
270 rcu_nocb_flush_deferred_wakeup();
271
272 /*
273 * In poll mode we reenable interrupts and spin. Also if we
274 * detected in the wakeup from idle path that the tick
275 * broadcast device expired for us, we don't want to go deep
276 * idle as we know that the IPI is going to arrive right away.
277 */
278 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
279 tick_nohz_idle_restart_tick();
280 cpu_idle_poll();
281 } else {
282 cpuidle_idle_call();
283 }
284 arch_cpu_idle_exit();
285 }
286
287 /*
288 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
289 * be set, propagate it into PREEMPT_NEED_RESCHED.
290 *
291 * This is required because for polling idle loops we will not have had
292 * an IPI to fold the state for us.
293 */
294 preempt_set_need_resched();
295 tick_nohz_idle_exit();
296 __current_clr_polling();
297
298 /*
299 * We promise to call sched_ttwu_pending() and reschedule if
300 * need_resched() is set while polling is set. That means that clearing
301 * polling needs to be visible before doing these things.
302 */
303 smp_mb__after_atomic();
304
305 /*
306 * RCU relies on this call to be done outside of an RCU read-side
307 * critical section.
308 */
309 flush_smp_call_function_queue();
310 schedule_idle();
311
312 if (unlikely(klp_patch_pending(current)))
313 klp_update_patch_state(current);
314}
315
316bool cpu_in_idle(unsigned long pc)
317{
318 return pc >= (unsigned long)__cpuidle_text_start &&
319 pc < (unsigned long)__cpuidle_text_end;
320}
321
322struct idle_timer {
323 struct hrtimer timer;
324 int done;
325};
326
327static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
328{
329 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
330
331 WRITE_ONCE(it->done, 1);
332 set_tsk_need_resched(current);
333
334 return HRTIMER_NORESTART;
335}
336
337void play_idle_precise(u64 duration_ns, u64 latency_ns)
338{
339 struct idle_timer it;
340
341 /*
342 * Only FIFO tasks can disable the tick since they don't need the forced
343 * preemption.
344 */
345 WARN_ON_ONCE(current->policy != SCHED_FIFO);
346 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
347 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
348 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
349 WARN_ON_ONCE(!duration_ns);
350 WARN_ON_ONCE(current->mm);
351
352 rcu_sleep_check();
353 preempt_disable();
354 current->flags |= PF_IDLE;
355 cpuidle_use_deepest_state(latency_limit_ns: latency_ns);
356
357 it.done = 0;
358 hrtimer_init_on_stack(timer: &it.timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_HARD);
359 it.timer.function = idle_inject_timer_fn;
360 hrtimer_start(timer: &it.timer, tim: ns_to_ktime(ns: duration_ns),
361 mode: HRTIMER_MODE_REL_PINNED_HARD);
362
363 while (!READ_ONCE(it.done))
364 do_idle();
365
366 cpuidle_use_deepest_state(latency_limit_ns: 0);
367 current->flags &= ~PF_IDLE;
368
369 preempt_fold_need_resched();
370 preempt_enable();
371}
372EXPORT_SYMBOL_GPL(play_idle_precise);
373
374void cpu_startup_entry(enum cpuhp_state state)
375{
376 current->flags |= PF_IDLE;
377 arch_cpu_idle_prepare();
378 cpuhp_online_idle(state);
379 while (1)
380 do_idle();
381}
382
383/*
384 * idle-task scheduling class.
385 */
386
387#ifdef CONFIG_SMP
388static int
389select_task_rq_idle(struct task_struct *p, int cpu, int flags)
390{
391 return task_cpu(p); /* IDLE tasks as never migrated */
392}
393
394static int
395balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
396{
397 return WARN_ON_ONCE(1);
398}
399#endif
400
401/*
402 * Idle tasks are unconditionally rescheduled:
403 */
404static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
405{
406 resched_curr(rq);
407}
408
409static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
410{
411}
412
413static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
414{
415 update_idle_core(rq);
416 schedstat_inc(rq->sched_goidle);
417}
418
419#ifdef CONFIG_SMP
420static struct task_struct *pick_task_idle(struct rq *rq)
421{
422 return rq->idle;
423}
424#endif
425
426struct task_struct *pick_next_task_idle(struct rq *rq)
427{
428 struct task_struct *next = rq->idle;
429
430 set_next_task_idle(rq, next, first: true);
431
432 return next;
433}
434
435/*
436 * It is not legal to sleep in the idle task - print a warning
437 * message if some code attempts to do it:
438 */
439static void
440dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
441{
442 raw_spin_rq_unlock_irq(rq);
443 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
444 dump_stack();
445 raw_spin_rq_lock_irq(rq);
446}
447
448/*
449 * scheduler tick hitting a task of our scheduling class.
450 *
451 * NOTE: This function can be called remotely by the tick offload that
452 * goes along full dynticks. Therefore no local assumption can be made
453 * and everything must be accessed through the @rq and @curr passed in
454 * parameters.
455 */
456static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
457{
458}
459
460static void switched_to_idle(struct rq *rq, struct task_struct *p)
461{
462 BUG();
463}
464
465static void
466prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
467{
468 BUG();
469}
470
471static void update_curr_idle(struct rq *rq)
472{
473}
474
475/*
476 * Simple, special scheduling class for the per-CPU idle tasks:
477 */
478DEFINE_SCHED_CLASS(idle) = {
479
480 /* no enqueue/yield_task for idle tasks */
481
482 /* dequeue is not valid, we print a debug message there: */
483 .dequeue_task = dequeue_task_idle,
484
485 .wakeup_preempt = wakeup_preempt_idle,
486
487 .pick_next_task = pick_next_task_idle,
488 .put_prev_task = put_prev_task_idle,
489 .set_next_task = set_next_task_idle,
490
491#ifdef CONFIG_SMP
492 .balance = balance_idle,
493 .pick_task = pick_task_idle,
494 .select_task_rq = select_task_rq_idle,
495 .set_cpus_allowed = set_cpus_allowed_common,
496#endif
497
498 .task_tick = task_tick_idle,
499
500 .prio_changed = prio_changed_idle,
501 .switched_to = switched_to_idle,
502 .update_curr = update_curr_idle,
503};
504

source code of linux/kernel/sched/idle.c