1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell |
3 | * |
4 | * This code is licenced under the GPL. |
5 | */ |
6 | #include <linux/sched/mm.h> |
7 | #include <linux/proc_fs.h> |
8 | #include <linux/smp.h> |
9 | #include <linux/init.h> |
10 | #include <linux/notifier.h> |
11 | #include <linux/sched/signal.h> |
12 | #include <linux/sched/hotplug.h> |
13 | #include <linux/sched/isolation.h> |
14 | #include <linux/sched/task.h> |
15 | #include <linux/sched/smt.h> |
16 | #include <linux/unistd.h> |
17 | #include <linux/cpu.h> |
18 | #include <linux/oom.h> |
19 | #include <linux/rcupdate.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/export.h> |
22 | #include <linux/bug.h> |
23 | #include <linux/kthread.h> |
24 | #include <linux/stop_machine.h> |
25 | #include <linux/mutex.h> |
26 | #include <linux/gfp.h> |
27 | #include <linux/suspend.h> |
28 | #include <linux/lockdep.h> |
29 | #include <linux/tick.h> |
30 | #include <linux/irq.h> |
31 | #include <linux/nmi.h> |
32 | #include <linux/smpboot.h> |
33 | #include <linux/relay.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/scs.h> |
36 | #include <linux/percpu-rwsem.h> |
37 | #include <linux/cpuset.h> |
38 | #include <linux/random.h> |
39 | #include <linux/cc_platform.h> |
40 | |
41 | #include <trace/events/power.h> |
42 | #define CREATE_TRACE_POINTS |
43 | #include <trace/events/cpuhp.h> |
44 | |
45 | #include "smpboot.h" |
46 | |
47 | /** |
48 | * struct cpuhp_cpu_state - Per cpu hotplug state storage |
49 | * @state: The current cpu state |
50 | * @target: The target state |
51 | * @fail: Current CPU hotplug callback state |
52 | * @thread: Pointer to the hotplug thread |
53 | * @should_run: Thread should execute |
54 | * @rollback: Perform a rollback |
55 | * @single: Single callback invocation |
56 | * @bringup: Single callback bringup or teardown selector |
57 | * @cpu: CPU number |
58 | * @node: Remote CPU node; for multi-instance, do a |
59 | * single entry callback for install/remove |
60 | * @last: For multi-instance rollback, remember how far we got |
61 | * @cb_state: The state for a single callback (install/uninstall) |
62 | * @result: Result of the operation |
63 | * @ap_sync_state: State for AP synchronization |
64 | * @done_up: Signal completion to the issuer of the task for cpu-up |
65 | * @done_down: Signal completion to the issuer of the task for cpu-down |
66 | */ |
67 | struct cpuhp_cpu_state { |
68 | enum cpuhp_state state; |
69 | enum cpuhp_state target; |
70 | enum cpuhp_state fail; |
71 | #ifdef CONFIG_SMP |
72 | struct task_struct *thread; |
73 | bool should_run; |
74 | bool rollback; |
75 | bool single; |
76 | bool bringup; |
77 | struct hlist_node *node; |
78 | struct hlist_node *last; |
79 | enum cpuhp_state cb_state; |
80 | int result; |
81 | atomic_t ap_sync_state; |
82 | struct completion done_up; |
83 | struct completion done_down; |
84 | #endif |
85 | }; |
86 | |
87 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { |
88 | .fail = CPUHP_INVALID, |
89 | }; |
90 | |
91 | #ifdef CONFIG_SMP |
92 | cpumask_t cpus_booted_once_mask; |
93 | #endif |
94 | |
95 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) |
96 | static struct lockdep_map cpuhp_state_up_map = |
97 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up" , &cpuhp_state_up_map); |
98 | static struct lockdep_map cpuhp_state_down_map = |
99 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down" , &cpuhp_state_down_map); |
100 | |
101 | |
102 | static inline void cpuhp_lock_acquire(bool bringup) |
103 | { |
104 | lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); |
105 | } |
106 | |
107 | static inline void cpuhp_lock_release(bool bringup) |
108 | { |
109 | lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); |
110 | } |
111 | #else |
112 | |
113 | static inline void cpuhp_lock_acquire(bool bringup) { } |
114 | static inline void cpuhp_lock_release(bool bringup) { } |
115 | |
116 | #endif |
117 | |
118 | /** |
119 | * struct cpuhp_step - Hotplug state machine step |
120 | * @name: Name of the step |
121 | * @startup: Startup function of the step |
122 | * @teardown: Teardown function of the step |
123 | * @cant_stop: Bringup/teardown can't be stopped at this step |
124 | * @multi_instance: State has multiple instances which get added afterwards |
125 | */ |
126 | struct cpuhp_step { |
127 | const char *name; |
128 | union { |
129 | int (*single)(unsigned int cpu); |
130 | int (*multi)(unsigned int cpu, |
131 | struct hlist_node *node); |
132 | } startup; |
133 | union { |
134 | int (*single)(unsigned int cpu); |
135 | int (*multi)(unsigned int cpu, |
136 | struct hlist_node *node); |
137 | } teardown; |
138 | /* private: */ |
139 | struct hlist_head list; |
140 | /* public: */ |
141 | bool cant_stop; |
142 | bool multi_instance; |
143 | }; |
144 | |
145 | static DEFINE_MUTEX(cpuhp_state_mutex); |
146 | static struct cpuhp_step cpuhp_hp_states[]; |
147 | |
148 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) |
149 | { |
150 | return cpuhp_hp_states + state; |
151 | } |
152 | |
153 | static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step) |
154 | { |
155 | return bringup ? !step->startup.single : !step->teardown.single; |
156 | } |
157 | |
158 | /** |
159 | * cpuhp_invoke_callback - Invoke the callbacks for a given state |
160 | * @cpu: The cpu for which the callback should be invoked |
161 | * @state: The state to do callbacks for |
162 | * @bringup: True if the bringup callback should be invoked |
163 | * @node: For multi-instance, do a single entry callback for install/remove |
164 | * @lastp: For multi-instance rollback, remember how far we got |
165 | * |
166 | * Called from cpu hotplug and from the state register machinery. |
167 | * |
168 | * Return: %0 on success or a negative errno code |
169 | */ |
170 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, |
171 | bool bringup, struct hlist_node *node, |
172 | struct hlist_node **lastp) |
173 | { |
174 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
175 | struct cpuhp_step *step = cpuhp_get_step(state); |
176 | int (*cbm)(unsigned int cpu, struct hlist_node *node); |
177 | int (*cb)(unsigned int cpu); |
178 | int ret, cnt; |
179 | |
180 | if (st->fail == state) { |
181 | st->fail = CPUHP_INVALID; |
182 | return -EAGAIN; |
183 | } |
184 | |
185 | if (cpuhp_step_empty(bringup, step)) { |
186 | WARN_ON_ONCE(1); |
187 | return 0; |
188 | } |
189 | |
190 | if (!step->multi_instance) { |
191 | WARN_ON_ONCE(lastp && *lastp); |
192 | cb = bringup ? step->startup.single : step->teardown.single; |
193 | |
194 | trace_cpuhp_enter(cpu, target: st->target, idx: state, fun: cb); |
195 | ret = cb(cpu); |
196 | trace_cpuhp_exit(cpu, state: st->state, idx: state, ret); |
197 | return ret; |
198 | } |
199 | cbm = bringup ? step->startup.multi : step->teardown.multi; |
200 | |
201 | /* Single invocation for instance add/remove */ |
202 | if (node) { |
203 | WARN_ON_ONCE(lastp && *lastp); |
204 | trace_cpuhp_multi_enter(cpu, target: st->target, idx: state, fun: cbm, node); |
205 | ret = cbm(cpu, node); |
206 | trace_cpuhp_exit(cpu, state: st->state, idx: state, ret); |
207 | return ret; |
208 | } |
209 | |
210 | /* State transition. Invoke on all instances */ |
211 | cnt = 0; |
212 | hlist_for_each(node, &step->list) { |
213 | if (lastp && node == *lastp) |
214 | break; |
215 | |
216 | trace_cpuhp_multi_enter(cpu, target: st->target, idx: state, fun: cbm, node); |
217 | ret = cbm(cpu, node); |
218 | trace_cpuhp_exit(cpu, state: st->state, idx: state, ret); |
219 | if (ret) { |
220 | if (!lastp) |
221 | goto err; |
222 | |
223 | *lastp = node; |
224 | return ret; |
225 | } |
226 | cnt++; |
227 | } |
228 | if (lastp) |
229 | *lastp = NULL; |
230 | return 0; |
231 | err: |
232 | /* Rollback the instances if one failed */ |
233 | cbm = !bringup ? step->startup.multi : step->teardown.multi; |
234 | if (!cbm) |
235 | return ret; |
236 | |
237 | hlist_for_each(node, &step->list) { |
238 | if (!cnt--) |
239 | break; |
240 | |
241 | trace_cpuhp_multi_enter(cpu, target: st->target, idx: state, fun: cbm, node); |
242 | ret = cbm(cpu, node); |
243 | trace_cpuhp_exit(cpu, state: st->state, idx: state, ret); |
244 | /* |
245 | * Rollback must not fail, |
246 | */ |
247 | WARN_ON_ONCE(ret); |
248 | } |
249 | return ret; |
250 | } |
251 | |
252 | #ifdef CONFIG_SMP |
253 | static bool cpuhp_is_ap_state(enum cpuhp_state state) |
254 | { |
255 | /* |
256 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation |
257 | * purposes as that state is handled explicitly in cpu_down. |
258 | */ |
259 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; |
260 | } |
261 | |
262 | static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) |
263 | { |
264 | struct completion *done = bringup ? &st->done_up : &st->done_down; |
265 | wait_for_completion(done); |
266 | } |
267 | |
268 | static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) |
269 | { |
270 | struct completion *done = bringup ? &st->done_up : &st->done_down; |
271 | complete(done); |
272 | } |
273 | |
274 | /* |
275 | * The former STARTING/DYING states, ran with IRQs disabled and must not fail. |
276 | */ |
277 | static bool cpuhp_is_atomic_state(enum cpuhp_state state) |
278 | { |
279 | return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; |
280 | } |
281 | |
282 | /* Synchronization state management */ |
283 | enum cpuhp_sync_state { |
284 | SYNC_STATE_DEAD, |
285 | SYNC_STATE_KICKED, |
286 | SYNC_STATE_SHOULD_DIE, |
287 | SYNC_STATE_ALIVE, |
288 | SYNC_STATE_SHOULD_ONLINE, |
289 | SYNC_STATE_ONLINE, |
290 | }; |
291 | |
292 | #ifdef CONFIG_HOTPLUG_CORE_SYNC |
293 | /** |
294 | * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown |
295 | * @state: The synchronization state to set |
296 | * |
297 | * No synchronization point. Just update of the synchronization state, but implies |
298 | * a full barrier so that the AP changes are visible before the control CPU proceeds. |
299 | */ |
300 | static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) |
301 | { |
302 | atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state); |
303 | |
304 | (void)atomic_xchg(v: st, new: state); |
305 | } |
306 | |
307 | void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); } |
308 | |
309 | static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state, |
310 | enum cpuhp_sync_state next_state) |
311 | { |
312 | atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); |
313 | ktime_t now, end, start = ktime_get(); |
314 | int sync; |
315 | |
316 | end = start + 10ULL * NSEC_PER_SEC; |
317 | |
318 | sync = atomic_read(v: st); |
319 | while (1) { |
320 | if (sync == state) { |
321 | if (!atomic_try_cmpxchg(v: st, old: &sync, new: next_state)) |
322 | continue; |
323 | return true; |
324 | } |
325 | |
326 | now = ktime_get(); |
327 | if (now > end) { |
328 | /* Timeout. Leave the state unchanged */ |
329 | return false; |
330 | } else if (now - start < NSEC_PER_MSEC) { |
331 | /* Poll for one millisecond */ |
332 | arch_cpuhp_sync_state_poll(); |
333 | } else { |
334 | usleep_range_state(USEC_PER_MSEC, max: 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE); |
335 | } |
336 | sync = atomic_read(v: st); |
337 | } |
338 | return true; |
339 | } |
340 | #else /* CONFIG_HOTPLUG_CORE_SYNC */ |
341 | static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { } |
342 | #endif /* !CONFIG_HOTPLUG_CORE_SYNC */ |
343 | |
344 | #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD |
345 | /** |
346 | * cpuhp_ap_report_dead - Update synchronization state to DEAD |
347 | * |
348 | * No synchronization point. Just update of the synchronization state. |
349 | */ |
350 | void cpuhp_ap_report_dead(void) |
351 | { |
352 | cpuhp_ap_update_sync_state(state: SYNC_STATE_DEAD); |
353 | } |
354 | |
355 | void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { } |
356 | |
357 | /* |
358 | * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down |
359 | * because the AP cannot issue complete() at this stage. |
360 | */ |
361 | static void cpuhp_bp_sync_dead(unsigned int cpu) |
362 | { |
363 | atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); |
364 | int sync = atomic_read(v: st); |
365 | |
366 | do { |
367 | /* CPU can have reported dead already. Don't overwrite that! */ |
368 | if (sync == SYNC_STATE_DEAD) |
369 | break; |
370 | } while (!atomic_try_cmpxchg(v: st, old: &sync, new: SYNC_STATE_SHOULD_DIE)); |
371 | |
372 | if (cpuhp_wait_for_sync_state(cpu, state: SYNC_STATE_DEAD, next_state: SYNC_STATE_DEAD)) { |
373 | /* CPU reached dead state. Invoke the cleanup function */ |
374 | arch_cpuhp_cleanup_dead_cpu(cpu); |
375 | return; |
376 | } |
377 | |
378 | /* No further action possible. Emit message and give up. */ |
379 | pr_err("CPU%u failed to report dead state\n" , cpu); |
380 | } |
381 | #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */ |
382 | static inline void cpuhp_bp_sync_dead(unsigned int cpu) { } |
383 | #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */ |
384 | |
385 | #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL |
386 | /** |
387 | * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive |
388 | * |
389 | * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits |
390 | * for the BP to release it. |
391 | */ |
392 | void cpuhp_ap_sync_alive(void) |
393 | { |
394 | atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state); |
395 | |
396 | cpuhp_ap_update_sync_state(state: SYNC_STATE_ALIVE); |
397 | |
398 | /* Wait for the control CPU to release it. */ |
399 | while (atomic_read(v: st) != SYNC_STATE_SHOULD_ONLINE) |
400 | cpu_relax(); |
401 | } |
402 | |
403 | static bool cpuhp_can_boot_ap(unsigned int cpu) |
404 | { |
405 | atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); |
406 | int sync = atomic_read(v: st); |
407 | |
408 | again: |
409 | switch (sync) { |
410 | case SYNC_STATE_DEAD: |
411 | /* CPU is properly dead */ |
412 | break; |
413 | case SYNC_STATE_KICKED: |
414 | /* CPU did not come up in previous attempt */ |
415 | break; |
416 | case SYNC_STATE_ALIVE: |
417 | /* CPU is stuck cpuhp_ap_sync_alive(). */ |
418 | break; |
419 | default: |
420 | /* CPU failed to report online or dead and is in limbo state. */ |
421 | return false; |
422 | } |
423 | |
424 | /* Prepare for booting */ |
425 | if (!atomic_try_cmpxchg(v: st, old: &sync, new: SYNC_STATE_KICKED)) |
426 | goto again; |
427 | |
428 | return true; |
429 | } |
430 | |
431 | void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { } |
432 | |
433 | /* |
434 | * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up |
435 | * because the AP cannot issue complete() so early in the bringup. |
436 | */ |
437 | static int cpuhp_bp_sync_alive(unsigned int cpu) |
438 | { |
439 | int ret = 0; |
440 | |
441 | if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL)) |
442 | return 0; |
443 | |
444 | if (!cpuhp_wait_for_sync_state(cpu, state: SYNC_STATE_ALIVE, next_state: SYNC_STATE_SHOULD_ONLINE)) { |
445 | pr_err("CPU%u failed to report alive state\n" , cpu); |
446 | ret = -EIO; |
447 | } |
448 | |
449 | /* Let the architecture cleanup the kick alive mechanics. */ |
450 | arch_cpuhp_cleanup_kick_cpu(cpu); |
451 | return ret; |
452 | } |
453 | #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */ |
454 | static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; } |
455 | static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; } |
456 | #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */ |
457 | |
458 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
459 | static DEFINE_MUTEX(cpu_add_remove_lock); |
460 | bool cpuhp_tasks_frozen; |
461 | EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); |
462 | |
463 | /* |
464 | * The following two APIs (cpu_maps_update_begin/done) must be used when |
465 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. |
466 | */ |
467 | void cpu_maps_update_begin(void) |
468 | { |
469 | mutex_lock(&cpu_add_remove_lock); |
470 | } |
471 | |
472 | void cpu_maps_update_done(void) |
473 | { |
474 | mutex_unlock(lock: &cpu_add_remove_lock); |
475 | } |
476 | |
477 | /* |
478 | * If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
479 | * Should always be manipulated under cpu_add_remove_lock |
480 | */ |
481 | static int cpu_hotplug_disabled; |
482 | |
483 | #ifdef CONFIG_HOTPLUG_CPU |
484 | |
485 | DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); |
486 | |
487 | void cpus_read_lock(void) |
488 | { |
489 | percpu_down_read(sem: &cpu_hotplug_lock); |
490 | } |
491 | EXPORT_SYMBOL_GPL(cpus_read_lock); |
492 | |
493 | int cpus_read_trylock(void) |
494 | { |
495 | return percpu_down_read_trylock(sem: &cpu_hotplug_lock); |
496 | } |
497 | EXPORT_SYMBOL_GPL(cpus_read_trylock); |
498 | |
499 | void cpus_read_unlock(void) |
500 | { |
501 | percpu_up_read(sem: &cpu_hotplug_lock); |
502 | } |
503 | EXPORT_SYMBOL_GPL(cpus_read_unlock); |
504 | |
505 | void cpus_write_lock(void) |
506 | { |
507 | percpu_down_write(&cpu_hotplug_lock); |
508 | } |
509 | |
510 | void cpus_write_unlock(void) |
511 | { |
512 | percpu_up_write(&cpu_hotplug_lock); |
513 | } |
514 | |
515 | void lockdep_assert_cpus_held(void) |
516 | { |
517 | /* |
518 | * We can't have hotplug operations before userspace starts running, |
519 | * and some init codepaths will knowingly not take the hotplug lock. |
520 | * This is all valid, so mute lockdep until it makes sense to report |
521 | * unheld locks. |
522 | */ |
523 | if (system_state < SYSTEM_RUNNING) |
524 | return; |
525 | |
526 | percpu_rwsem_assert_held(&cpu_hotplug_lock); |
527 | } |
528 | |
529 | #ifdef CONFIG_LOCKDEP |
530 | int lockdep_is_cpus_held(void) |
531 | { |
532 | return percpu_rwsem_is_held(&cpu_hotplug_lock); |
533 | } |
534 | #endif |
535 | |
536 | static void lockdep_acquire_cpus_lock(void) |
537 | { |
538 | rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_); |
539 | } |
540 | |
541 | static void lockdep_release_cpus_lock(void) |
542 | { |
543 | rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); |
544 | } |
545 | |
546 | /* |
547 | * Wait for currently running CPU hotplug operations to complete (if any) and |
548 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects |
549 | * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the |
550 | * hotplug path before performing hotplug operations. So acquiring that lock |
551 | * guarantees mutual exclusion from any currently running hotplug operations. |
552 | */ |
553 | void cpu_hotplug_disable(void) |
554 | { |
555 | cpu_maps_update_begin(); |
556 | cpu_hotplug_disabled++; |
557 | cpu_maps_update_done(); |
558 | } |
559 | EXPORT_SYMBOL_GPL(cpu_hotplug_disable); |
560 | |
561 | static void __cpu_hotplug_enable(void) |
562 | { |
563 | if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n" )) |
564 | return; |
565 | cpu_hotplug_disabled--; |
566 | } |
567 | |
568 | void cpu_hotplug_enable(void) |
569 | { |
570 | cpu_maps_update_begin(); |
571 | __cpu_hotplug_enable(); |
572 | cpu_maps_update_done(); |
573 | } |
574 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
575 | |
576 | #else |
577 | |
578 | static void lockdep_acquire_cpus_lock(void) |
579 | { |
580 | } |
581 | |
582 | static void lockdep_release_cpus_lock(void) |
583 | { |
584 | } |
585 | |
586 | #endif /* CONFIG_HOTPLUG_CPU */ |
587 | |
588 | /* |
589 | * Architectures that need SMT-specific errata handling during SMT hotplug |
590 | * should override this. |
591 | */ |
592 | void __weak arch_smt_update(void) { } |
593 | |
594 | #ifdef CONFIG_HOTPLUG_SMT |
595 | |
596 | enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; |
597 | static unsigned int cpu_smt_max_threads __ro_after_init; |
598 | unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX; |
599 | |
600 | void __init cpu_smt_disable(bool force) |
601 | { |
602 | if (!cpu_smt_possible()) |
603 | return; |
604 | |
605 | if (force) { |
606 | pr_info("SMT: Force disabled\n" ); |
607 | cpu_smt_control = CPU_SMT_FORCE_DISABLED; |
608 | } else { |
609 | pr_info("SMT: disabled\n" ); |
610 | cpu_smt_control = CPU_SMT_DISABLED; |
611 | } |
612 | cpu_smt_num_threads = 1; |
613 | } |
614 | |
615 | /* |
616 | * The decision whether SMT is supported can only be done after the full |
617 | * CPU identification. Called from architecture code. |
618 | */ |
619 | void __init cpu_smt_set_num_threads(unsigned int num_threads, |
620 | unsigned int max_threads) |
621 | { |
622 | WARN_ON(!num_threads || (num_threads > max_threads)); |
623 | |
624 | if (max_threads == 1) |
625 | cpu_smt_control = CPU_SMT_NOT_SUPPORTED; |
626 | |
627 | cpu_smt_max_threads = max_threads; |
628 | |
629 | /* |
630 | * If SMT has been disabled via the kernel command line or SMT is |
631 | * not supported, set cpu_smt_num_threads to 1 for consistency. |
632 | * If enabled, take the architecture requested number of threads |
633 | * to bring up into account. |
634 | */ |
635 | if (cpu_smt_control != CPU_SMT_ENABLED) |
636 | cpu_smt_num_threads = 1; |
637 | else if (num_threads < cpu_smt_num_threads) |
638 | cpu_smt_num_threads = num_threads; |
639 | } |
640 | |
641 | static int __init smt_cmdline_disable(char *str) |
642 | { |
643 | cpu_smt_disable(force: str && !strcmp(str, "force" )); |
644 | return 0; |
645 | } |
646 | early_param("nosmt" , smt_cmdline_disable); |
647 | |
648 | /* |
649 | * For Archicture supporting partial SMT states check if the thread is allowed. |
650 | * Otherwise this has already been checked through cpu_smt_max_threads when |
651 | * setting the SMT level. |
652 | */ |
653 | static inline bool cpu_smt_thread_allowed(unsigned int cpu) |
654 | { |
655 | #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC |
656 | return topology_smt_thread_allowed(cpu); |
657 | #else |
658 | return true; |
659 | #endif |
660 | } |
661 | |
662 | static inline bool cpu_bootable(unsigned int cpu) |
663 | { |
664 | if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) |
665 | return true; |
666 | |
667 | /* All CPUs are bootable if controls are not configured */ |
668 | if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED) |
669 | return true; |
670 | |
671 | /* All CPUs are bootable if CPU is not SMT capable */ |
672 | if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
673 | return true; |
674 | |
675 | if (topology_is_primary_thread(cpu)) |
676 | return true; |
677 | |
678 | /* |
679 | * On x86 it's required to boot all logical CPUs at least once so |
680 | * that the init code can get a chance to set CR4.MCE on each |
681 | * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any |
682 | * core will shutdown the machine. |
683 | */ |
684 | return !cpumask_test_cpu(cpu, cpumask: &cpus_booted_once_mask); |
685 | } |
686 | |
687 | /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */ |
688 | bool cpu_smt_possible(void) |
689 | { |
690 | return cpu_smt_control != CPU_SMT_FORCE_DISABLED && |
691 | cpu_smt_control != CPU_SMT_NOT_SUPPORTED; |
692 | } |
693 | EXPORT_SYMBOL_GPL(cpu_smt_possible); |
694 | |
695 | #else |
696 | static inline bool cpu_bootable(unsigned int cpu) { return true; } |
697 | #endif |
698 | |
699 | static inline enum cpuhp_state |
700 | cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) |
701 | { |
702 | enum cpuhp_state prev_state = st->state; |
703 | bool bringup = st->state < target; |
704 | |
705 | st->rollback = false; |
706 | st->last = NULL; |
707 | |
708 | st->target = target; |
709 | st->single = false; |
710 | st->bringup = bringup; |
711 | if (cpu_dying(cpu) != !bringup) |
712 | set_cpu_dying(cpu, dying: !bringup); |
713 | |
714 | return prev_state; |
715 | } |
716 | |
717 | static inline void |
718 | cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st, |
719 | enum cpuhp_state prev_state) |
720 | { |
721 | bool bringup = !st->bringup; |
722 | |
723 | st->target = prev_state; |
724 | |
725 | /* |
726 | * Already rolling back. No need invert the bringup value or to change |
727 | * the current state. |
728 | */ |
729 | if (st->rollback) |
730 | return; |
731 | |
732 | st->rollback = true; |
733 | |
734 | /* |
735 | * If we have st->last we need to undo partial multi_instance of this |
736 | * state first. Otherwise start undo at the previous state. |
737 | */ |
738 | if (!st->last) { |
739 | if (st->bringup) |
740 | st->state--; |
741 | else |
742 | st->state++; |
743 | } |
744 | |
745 | st->bringup = bringup; |
746 | if (cpu_dying(cpu) != !bringup) |
747 | set_cpu_dying(cpu, dying: !bringup); |
748 | } |
749 | |
750 | /* Regular hotplug invocation of the AP hotplug thread */ |
751 | static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) |
752 | { |
753 | if (!st->single && st->state == st->target) |
754 | return; |
755 | |
756 | st->result = 0; |
757 | /* |
758 | * Make sure the above stores are visible before should_run becomes |
759 | * true. Paired with the mb() above in cpuhp_thread_fun() |
760 | */ |
761 | smp_mb(); |
762 | st->should_run = true; |
763 | wake_up_process(tsk: st->thread); |
764 | wait_for_ap_thread(st, bringup: st->bringup); |
765 | } |
766 | |
767 | static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, |
768 | enum cpuhp_state target) |
769 | { |
770 | enum cpuhp_state prev_state; |
771 | int ret; |
772 | |
773 | prev_state = cpuhp_set_state(cpu, st, target); |
774 | __cpuhp_kick_ap(st); |
775 | if ((ret = st->result)) { |
776 | cpuhp_reset_state(cpu, st, prev_state); |
777 | __cpuhp_kick_ap(st); |
778 | } |
779 | |
780 | return ret; |
781 | } |
782 | |
783 | static int bringup_wait_for_ap_online(unsigned int cpu) |
784 | { |
785 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
786 | |
787 | /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ |
788 | wait_for_ap_thread(st, bringup: true); |
789 | if (WARN_ON_ONCE((!cpu_online(cpu)))) |
790 | return -ECANCELED; |
791 | |
792 | /* Unpark the hotplug thread of the target cpu */ |
793 | kthread_unpark(k: st->thread); |
794 | |
795 | /* |
796 | * SMT soft disabling on X86 requires to bring the CPU out of the |
797 | * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The |
798 | * CPU marked itself as booted_once in notify_cpu_starting() so the |
799 | * cpu_bootable() check will now return false if this is not the |
800 | * primary sibling. |
801 | */ |
802 | if (!cpu_bootable(cpu)) |
803 | return -ECANCELED; |
804 | return 0; |
805 | } |
806 | |
807 | #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP |
808 | static int cpuhp_kick_ap_alive(unsigned int cpu) |
809 | { |
810 | if (!cpuhp_can_boot_ap(cpu)) |
811 | return -EAGAIN; |
812 | |
813 | return arch_cpuhp_kick_ap_alive(cpu, tidle: idle_thread_get(cpu)); |
814 | } |
815 | |
816 | static int cpuhp_bringup_ap(unsigned int cpu) |
817 | { |
818 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
819 | int ret; |
820 | |
821 | /* |
822 | * Some architectures have to walk the irq descriptors to |
823 | * setup the vector space for the cpu which comes online. |
824 | * Prevent irq alloc/free across the bringup. |
825 | */ |
826 | irq_lock_sparse(); |
827 | |
828 | ret = cpuhp_bp_sync_alive(cpu); |
829 | if (ret) |
830 | goto out_unlock; |
831 | |
832 | ret = bringup_wait_for_ap_online(cpu); |
833 | if (ret) |
834 | goto out_unlock; |
835 | |
836 | irq_unlock_sparse(); |
837 | |
838 | if (st->target <= CPUHP_AP_ONLINE_IDLE) |
839 | return 0; |
840 | |
841 | return cpuhp_kick_ap(cpu, st, target: st->target); |
842 | |
843 | out_unlock: |
844 | irq_unlock_sparse(); |
845 | return ret; |
846 | } |
847 | #else |
848 | static int bringup_cpu(unsigned int cpu) |
849 | { |
850 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
851 | struct task_struct *idle = idle_thread_get(cpu); |
852 | int ret; |
853 | |
854 | if (!cpuhp_can_boot_ap(cpu)) |
855 | return -EAGAIN; |
856 | |
857 | /* |
858 | * Some architectures have to walk the irq descriptors to |
859 | * setup the vector space for the cpu which comes online. |
860 | * |
861 | * Prevent irq alloc/free across the bringup by acquiring the |
862 | * sparse irq lock. Hold it until the upcoming CPU completes the |
863 | * startup in cpuhp_online_idle() which allows to avoid |
864 | * intermediate synchronization points in the architecture code. |
865 | */ |
866 | irq_lock_sparse(); |
867 | |
868 | ret = __cpu_up(cpu, idle); |
869 | if (ret) |
870 | goto out_unlock; |
871 | |
872 | ret = cpuhp_bp_sync_alive(cpu); |
873 | if (ret) |
874 | goto out_unlock; |
875 | |
876 | ret = bringup_wait_for_ap_online(cpu); |
877 | if (ret) |
878 | goto out_unlock; |
879 | |
880 | irq_unlock_sparse(); |
881 | |
882 | if (st->target <= CPUHP_AP_ONLINE_IDLE) |
883 | return 0; |
884 | |
885 | return cpuhp_kick_ap(cpu, st, st->target); |
886 | |
887 | out_unlock: |
888 | irq_unlock_sparse(); |
889 | return ret; |
890 | } |
891 | #endif |
892 | |
893 | static int finish_cpu(unsigned int cpu) |
894 | { |
895 | struct task_struct *idle = idle_thread_get(cpu); |
896 | struct mm_struct *mm = idle->active_mm; |
897 | |
898 | /* |
899 | * idle_task_exit() will have switched to &init_mm, now |
900 | * clean up any remaining active_mm state. |
901 | */ |
902 | if (mm != &init_mm) |
903 | idle->active_mm = &init_mm; |
904 | mmdrop_lazy_tlb(mm); |
905 | return 0; |
906 | } |
907 | |
908 | /* |
909 | * Hotplug state machine related functions |
910 | */ |
911 | |
912 | /* |
913 | * Get the next state to run. Empty ones will be skipped. Returns true if a |
914 | * state must be run. |
915 | * |
916 | * st->state will be modified ahead of time, to match state_to_run, as if it |
917 | * has already ran. |
918 | */ |
919 | static bool cpuhp_next_state(bool bringup, |
920 | enum cpuhp_state *state_to_run, |
921 | struct cpuhp_cpu_state *st, |
922 | enum cpuhp_state target) |
923 | { |
924 | do { |
925 | if (bringup) { |
926 | if (st->state >= target) |
927 | return false; |
928 | |
929 | *state_to_run = ++st->state; |
930 | } else { |
931 | if (st->state <= target) |
932 | return false; |
933 | |
934 | *state_to_run = st->state--; |
935 | } |
936 | |
937 | if (!cpuhp_step_empty(bringup, step: cpuhp_get_step(state: *state_to_run))) |
938 | break; |
939 | } while (true); |
940 | |
941 | return true; |
942 | } |
943 | |
944 | static int __cpuhp_invoke_callback_range(bool bringup, |
945 | unsigned int cpu, |
946 | struct cpuhp_cpu_state *st, |
947 | enum cpuhp_state target, |
948 | bool nofail) |
949 | { |
950 | enum cpuhp_state state; |
951 | int ret = 0; |
952 | |
953 | while (cpuhp_next_state(bringup, state_to_run: &state, st, target)) { |
954 | int err; |
955 | |
956 | err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); |
957 | if (!err) |
958 | continue; |
959 | |
960 | if (nofail) { |
961 | pr_warn("CPU %u %s state %s (%d) failed (%d)\n" , |
962 | cpu, bringup ? "UP" : "DOWN" , |
963 | cpuhp_get_step(st->state)->name, |
964 | st->state, err); |
965 | ret = -1; |
966 | } else { |
967 | ret = err; |
968 | break; |
969 | } |
970 | } |
971 | |
972 | return ret; |
973 | } |
974 | |
975 | static inline int cpuhp_invoke_callback_range(bool bringup, |
976 | unsigned int cpu, |
977 | struct cpuhp_cpu_state *st, |
978 | enum cpuhp_state target) |
979 | { |
980 | return __cpuhp_invoke_callback_range(bringup, cpu, st, target, nofail: false); |
981 | } |
982 | |
983 | static inline void cpuhp_invoke_callback_range_nofail(bool bringup, |
984 | unsigned int cpu, |
985 | struct cpuhp_cpu_state *st, |
986 | enum cpuhp_state target) |
987 | { |
988 | __cpuhp_invoke_callback_range(bringup, cpu, st, target, nofail: true); |
989 | } |
990 | |
991 | static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) |
992 | { |
993 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) |
994 | return true; |
995 | /* |
996 | * When CPU hotplug is disabled, then taking the CPU down is not |
997 | * possible because takedown_cpu() and the architecture and |
998 | * subsystem specific mechanisms are not available. So the CPU |
999 | * which would be completely unplugged again needs to stay around |
1000 | * in the current state. |
1001 | */ |
1002 | return st->state <= CPUHP_BRINGUP_CPU; |
1003 | } |
1004 | |
1005 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
1006 | enum cpuhp_state target) |
1007 | { |
1008 | enum cpuhp_state prev_state = st->state; |
1009 | int ret = 0; |
1010 | |
1011 | ret = cpuhp_invoke_callback_range(bringup: true, cpu, st, target); |
1012 | if (ret) { |
1013 | pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n" , |
1014 | ret, cpu, cpuhp_get_step(st->state)->name, |
1015 | st->state); |
1016 | |
1017 | cpuhp_reset_state(cpu, st, prev_state); |
1018 | if (can_rollback_cpu(st)) |
1019 | WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, |
1020 | prev_state)); |
1021 | } |
1022 | return ret; |
1023 | } |
1024 | |
1025 | /* |
1026 | * The cpu hotplug threads manage the bringup and teardown of the cpus |
1027 | */ |
1028 | static int cpuhp_should_run(unsigned int cpu) |
1029 | { |
1030 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
1031 | |
1032 | return st->should_run; |
1033 | } |
1034 | |
1035 | /* |
1036 | * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke |
1037 | * callbacks when a state gets [un]installed at runtime. |
1038 | * |
1039 | * Each invocation of this function by the smpboot thread does a single AP |
1040 | * state callback. |
1041 | * |
1042 | * It has 3 modes of operation: |
1043 | * - single: runs st->cb_state |
1044 | * - up: runs ++st->state, while st->state < st->target |
1045 | * - down: runs st->state--, while st->state > st->target |
1046 | * |
1047 | * When complete or on error, should_run is cleared and the completion is fired. |
1048 | */ |
1049 | static void cpuhp_thread_fun(unsigned int cpu) |
1050 | { |
1051 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
1052 | bool bringup = st->bringup; |
1053 | enum cpuhp_state state; |
1054 | |
1055 | if (WARN_ON_ONCE(!st->should_run)) |
1056 | return; |
1057 | |
1058 | /* |
1059 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures |
1060 | * that if we see ->should_run we also see the rest of the state. |
1061 | */ |
1062 | smp_mb(); |
1063 | |
1064 | /* |
1065 | * The BP holds the hotplug lock, but we're now running on the AP, |
1066 | * ensure that anybody asserting the lock is held, will actually find |
1067 | * it so. |
1068 | */ |
1069 | lockdep_acquire_cpus_lock(); |
1070 | cpuhp_lock_acquire(bringup); |
1071 | |
1072 | if (st->single) { |
1073 | state = st->cb_state; |
1074 | st->should_run = false; |
1075 | } else { |
1076 | st->should_run = cpuhp_next_state(bringup, state_to_run: &state, st, target: st->target); |
1077 | if (!st->should_run) |
1078 | goto end; |
1079 | } |
1080 | |
1081 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
1082 | |
1083 | if (cpuhp_is_atomic_state(state)) { |
1084 | local_irq_disable(); |
1085 | st->result = cpuhp_invoke_callback(cpu, state, bringup, node: st->node, lastp: &st->last); |
1086 | local_irq_enable(); |
1087 | |
1088 | /* |
1089 | * STARTING/DYING must not fail! |
1090 | */ |
1091 | WARN_ON_ONCE(st->result); |
1092 | } else { |
1093 | st->result = cpuhp_invoke_callback(cpu, state, bringup, node: st->node, lastp: &st->last); |
1094 | } |
1095 | |
1096 | if (st->result) { |
1097 | /* |
1098 | * If we fail on a rollback, we're up a creek without no |
1099 | * paddle, no way forward, no way back. We loose, thanks for |
1100 | * playing. |
1101 | */ |
1102 | WARN_ON_ONCE(st->rollback); |
1103 | st->should_run = false; |
1104 | } |
1105 | |
1106 | end: |
1107 | cpuhp_lock_release(bringup); |
1108 | lockdep_release_cpus_lock(); |
1109 | |
1110 | if (!st->should_run) |
1111 | complete_ap_thread(st, bringup); |
1112 | } |
1113 | |
1114 | /* Invoke a single callback on a remote cpu */ |
1115 | static int |
1116 | cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, |
1117 | struct hlist_node *node) |
1118 | { |
1119 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1120 | int ret; |
1121 | |
1122 | if (!cpu_online(cpu)) |
1123 | return 0; |
1124 | |
1125 | cpuhp_lock_acquire(bringup: false); |
1126 | cpuhp_lock_release(bringup: false); |
1127 | |
1128 | cpuhp_lock_acquire(bringup: true); |
1129 | cpuhp_lock_release(bringup: true); |
1130 | |
1131 | /* |
1132 | * If we are up and running, use the hotplug thread. For early calls |
1133 | * we invoke the thread function directly. |
1134 | */ |
1135 | if (!st->thread) |
1136 | return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
1137 | |
1138 | st->rollback = false; |
1139 | st->last = NULL; |
1140 | |
1141 | st->node = node; |
1142 | st->bringup = bringup; |
1143 | st->cb_state = state; |
1144 | st->single = true; |
1145 | |
1146 | __cpuhp_kick_ap(st); |
1147 | |
1148 | /* |
1149 | * If we failed and did a partial, do a rollback. |
1150 | */ |
1151 | if ((ret = st->result) && st->last) { |
1152 | st->rollback = true; |
1153 | st->bringup = !bringup; |
1154 | |
1155 | __cpuhp_kick_ap(st); |
1156 | } |
1157 | |
1158 | /* |
1159 | * Clean up the leftovers so the next hotplug operation wont use stale |
1160 | * data. |
1161 | */ |
1162 | st->node = st->last = NULL; |
1163 | return ret; |
1164 | } |
1165 | |
1166 | static int cpuhp_kick_ap_work(unsigned int cpu) |
1167 | { |
1168 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1169 | enum cpuhp_state prev_state = st->state; |
1170 | int ret; |
1171 | |
1172 | cpuhp_lock_acquire(bringup: false); |
1173 | cpuhp_lock_release(bringup: false); |
1174 | |
1175 | cpuhp_lock_acquire(bringup: true); |
1176 | cpuhp_lock_release(bringup: true); |
1177 | |
1178 | trace_cpuhp_enter(cpu, target: st->target, idx: prev_state, fun: cpuhp_kick_ap_work); |
1179 | ret = cpuhp_kick_ap(cpu, st, target: st->target); |
1180 | trace_cpuhp_exit(cpu, state: st->state, idx: prev_state, ret); |
1181 | |
1182 | return ret; |
1183 | } |
1184 | |
1185 | static struct smp_hotplug_thread cpuhp_threads = { |
1186 | .store = &cpuhp_state.thread, |
1187 | .thread_should_run = cpuhp_should_run, |
1188 | .thread_fn = cpuhp_thread_fun, |
1189 | .thread_comm = "cpuhp/%u" , |
1190 | .selfparking = true, |
1191 | }; |
1192 | |
1193 | static __init void cpuhp_init_state(void) |
1194 | { |
1195 | struct cpuhp_cpu_state *st; |
1196 | int cpu; |
1197 | |
1198 | for_each_possible_cpu(cpu) { |
1199 | st = per_cpu_ptr(&cpuhp_state, cpu); |
1200 | init_completion(x: &st->done_up); |
1201 | init_completion(x: &st->done_down); |
1202 | } |
1203 | } |
1204 | |
1205 | void __init cpuhp_threads_init(void) |
1206 | { |
1207 | cpuhp_init_state(); |
1208 | BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); |
1209 | kthread_unpark(this_cpu_read(cpuhp_state.thread)); |
1210 | } |
1211 | |
1212 | /* |
1213 | * |
1214 | * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock |
1215 | * protected region. |
1216 | * |
1217 | * The operation is still serialized against concurrent CPU hotplug via |
1218 | * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_ |
1219 | * serialized against other hotplug related activity like adding or |
1220 | * removing of state callbacks and state instances, which invoke either the |
1221 | * startup or the teardown callback of the affected state. |
1222 | * |
1223 | * This is required for subsystems which are unfixable vs. CPU hotplug and |
1224 | * evade lock inversion problems by scheduling work which has to be |
1225 | * completed _before_ cpu_up()/_cpu_down() returns. |
1226 | * |
1227 | * Don't even think about adding anything to this for any new code or even |
1228 | * drivers. It's only purpose is to keep existing lock order trainwrecks |
1229 | * working. |
1230 | * |
1231 | * For cpu_down() there might be valid reasons to finish cleanups which are |
1232 | * not required to be done under cpu_hotplug_lock, but that's a different |
1233 | * story and would be not invoked via this. |
1234 | */ |
1235 | static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen) |
1236 | { |
1237 | /* |
1238 | * cpusets delegate hotplug operations to a worker to "solve" the |
1239 | * lock order problems. Wait for the worker, but only if tasks are |
1240 | * _not_ frozen (suspend, hibernate) as that would wait forever. |
1241 | * |
1242 | * The wait is required because otherwise the hotplug operation |
1243 | * returns with inconsistent state, which could even be observed in |
1244 | * user space when a new CPU is brought up. The CPU plug uevent |
1245 | * would be delivered and user space reacting on it would fail to |
1246 | * move tasks to the newly plugged CPU up to the point where the |
1247 | * work has finished because up to that point the newly plugged CPU |
1248 | * is not assignable in cpusets/cgroups. On unplug that's not |
1249 | * necessarily a visible issue, but it is still inconsistent state, |
1250 | * which is the real problem which needs to be "fixed". This can't |
1251 | * prevent the transient state between scheduling the work and |
1252 | * returning from waiting for it. |
1253 | */ |
1254 | if (!tasks_frozen) |
1255 | cpuset_wait_for_hotplug(); |
1256 | } |
1257 | |
1258 | #ifdef CONFIG_HOTPLUG_CPU |
1259 | #ifndef arch_clear_mm_cpumask_cpu |
1260 | #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) |
1261 | #endif |
1262 | |
1263 | /** |
1264 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU |
1265 | * @cpu: a CPU id |
1266 | * |
1267 | * This function walks all processes, finds a valid mm struct for each one and |
1268 | * then clears a corresponding bit in mm's cpumask. While this all sounds |
1269 | * trivial, there are various non-obvious corner cases, which this function |
1270 | * tries to solve in a safe manner. |
1271 | * |
1272 | * Also note that the function uses a somewhat relaxed locking scheme, so it may |
1273 | * be called only for an already offlined CPU. |
1274 | */ |
1275 | void clear_tasks_mm_cpumask(int cpu) |
1276 | { |
1277 | struct task_struct *p; |
1278 | |
1279 | /* |
1280 | * This function is called after the cpu is taken down and marked |
1281 | * offline, so its not like new tasks will ever get this cpu set in |
1282 | * their mm mask. -- Peter Zijlstra |
1283 | * Thus, we may use rcu_read_lock() here, instead of grabbing |
1284 | * full-fledged tasklist_lock. |
1285 | */ |
1286 | WARN_ON(cpu_online(cpu)); |
1287 | rcu_read_lock(); |
1288 | for_each_process(p) { |
1289 | struct task_struct *t; |
1290 | |
1291 | /* |
1292 | * Main thread might exit, but other threads may still have |
1293 | * a valid mm. Find one. |
1294 | */ |
1295 | t = find_lock_task_mm(p); |
1296 | if (!t) |
1297 | continue; |
1298 | arch_clear_mm_cpumask_cpu(cpu, t->mm); |
1299 | task_unlock(p: t); |
1300 | } |
1301 | rcu_read_unlock(); |
1302 | } |
1303 | |
1304 | /* Take this CPU down. */ |
1305 | static int take_cpu_down(void *_param) |
1306 | { |
1307 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
1308 | enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); |
1309 | int err, cpu = smp_processor_id(); |
1310 | |
1311 | /* Ensure this CPU doesn't handle any more interrupts. */ |
1312 | err = __cpu_disable(); |
1313 | if (err < 0) |
1314 | return err; |
1315 | |
1316 | /* |
1317 | * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going |
1318 | * down, that the current state is CPUHP_TEARDOWN_CPU - 1. |
1319 | */ |
1320 | WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); |
1321 | |
1322 | /* |
1323 | * Invoke the former CPU_DYING callbacks. DYING must not fail! |
1324 | */ |
1325 | cpuhp_invoke_callback_range_nofail(bringup: false, cpu, st, target); |
1326 | |
1327 | /* Give up timekeeping duties */ |
1328 | tick_handover_do_timer(); |
1329 | /* Remove CPU from timer broadcasting */ |
1330 | tick_offline_cpu(cpu); |
1331 | /* Park the stopper thread */ |
1332 | stop_machine_park(cpu); |
1333 | return 0; |
1334 | } |
1335 | |
1336 | static int takedown_cpu(unsigned int cpu) |
1337 | { |
1338 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1339 | int err; |
1340 | |
1341 | /* Park the smpboot threads */ |
1342 | kthread_park(k: st->thread); |
1343 | |
1344 | /* |
1345 | * Prevent irq alloc/free while the dying cpu reorganizes the |
1346 | * interrupt affinities. |
1347 | */ |
1348 | irq_lock_sparse(); |
1349 | |
1350 | /* |
1351 | * So now all preempt/rcu users must observe !cpu_active(). |
1352 | */ |
1353 | err = stop_machine_cpuslocked(fn: take_cpu_down, NULL, cpumask_of(cpu)); |
1354 | if (err) { |
1355 | /* CPU refused to die */ |
1356 | irq_unlock_sparse(); |
1357 | /* Unpark the hotplug thread so we can rollback there */ |
1358 | kthread_unpark(k: st->thread); |
1359 | return err; |
1360 | } |
1361 | BUG_ON(cpu_online(cpu)); |
1362 | |
1363 | /* |
1364 | * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed |
1365 | * all runnable tasks from the CPU, there's only the idle task left now |
1366 | * that the migration thread is done doing the stop_machine thing. |
1367 | * |
1368 | * Wait for the stop thread to go away. |
1369 | */ |
1370 | wait_for_ap_thread(st, bringup: false); |
1371 | BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); |
1372 | |
1373 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ |
1374 | irq_unlock_sparse(); |
1375 | |
1376 | hotplug_cpu__broadcast_tick_pull(dead_cpu: cpu); |
1377 | /* This actually kills the CPU. */ |
1378 | __cpu_die(cpu); |
1379 | |
1380 | cpuhp_bp_sync_dead(cpu); |
1381 | |
1382 | tick_cleanup_dead_cpu(cpu); |
1383 | |
1384 | /* |
1385 | * Callbacks must be re-integrated right away to the RCU state machine. |
1386 | * Otherwise an RCU callback could block a further teardown function |
1387 | * waiting for its completion. |
1388 | */ |
1389 | rcutree_migrate_callbacks(cpu); |
1390 | |
1391 | return 0; |
1392 | } |
1393 | |
1394 | static void cpuhp_complete_idle_dead(void *arg) |
1395 | { |
1396 | struct cpuhp_cpu_state *st = arg; |
1397 | |
1398 | complete_ap_thread(st, bringup: false); |
1399 | } |
1400 | |
1401 | void cpuhp_report_idle_dead(void) |
1402 | { |
1403 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
1404 | |
1405 | BUG_ON(st->state != CPUHP_AP_OFFLINE); |
1406 | rcutree_report_cpu_dead(); |
1407 | st->state = CPUHP_AP_IDLE_DEAD; |
1408 | /* |
1409 | * We cannot call complete after rcutree_report_cpu_dead() so we delegate it |
1410 | * to an online cpu. |
1411 | */ |
1412 | smp_call_function_single(cpuid: cpumask_first(cpu_online_mask), |
1413 | func: cpuhp_complete_idle_dead, info: st, wait: 0); |
1414 | } |
1415 | |
1416 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
1417 | enum cpuhp_state target) |
1418 | { |
1419 | enum cpuhp_state prev_state = st->state; |
1420 | int ret = 0; |
1421 | |
1422 | ret = cpuhp_invoke_callback_range(bringup: false, cpu, st, target); |
1423 | if (ret) { |
1424 | pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n" , |
1425 | ret, cpu, cpuhp_get_step(st->state)->name, |
1426 | st->state); |
1427 | |
1428 | cpuhp_reset_state(cpu, st, prev_state); |
1429 | |
1430 | if (st->state < prev_state) |
1431 | WARN_ON(cpuhp_invoke_callback_range(true, cpu, st, |
1432 | prev_state)); |
1433 | } |
1434 | |
1435 | return ret; |
1436 | } |
1437 | |
1438 | /* Requires cpu_add_remove_lock to be held */ |
1439 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, |
1440 | enum cpuhp_state target) |
1441 | { |
1442 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1443 | int prev_state, ret = 0; |
1444 | |
1445 | if (num_online_cpus() == 1) |
1446 | return -EBUSY; |
1447 | |
1448 | if (!cpu_present(cpu)) |
1449 | return -EINVAL; |
1450 | |
1451 | cpus_write_lock(); |
1452 | |
1453 | cpuhp_tasks_frozen = tasks_frozen; |
1454 | |
1455 | prev_state = cpuhp_set_state(cpu, st, target); |
1456 | /* |
1457 | * If the current CPU state is in the range of the AP hotplug thread, |
1458 | * then we need to kick the thread. |
1459 | */ |
1460 | if (st->state > CPUHP_TEARDOWN_CPU) { |
1461 | st->target = max((int)target, CPUHP_TEARDOWN_CPU); |
1462 | ret = cpuhp_kick_ap_work(cpu); |
1463 | /* |
1464 | * The AP side has done the error rollback already. Just |
1465 | * return the error code.. |
1466 | */ |
1467 | if (ret) |
1468 | goto out; |
1469 | |
1470 | /* |
1471 | * We might have stopped still in the range of the AP hotplug |
1472 | * thread. Nothing to do anymore. |
1473 | */ |
1474 | if (st->state > CPUHP_TEARDOWN_CPU) |
1475 | goto out; |
1476 | |
1477 | st->target = target; |
1478 | } |
1479 | /* |
1480 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
1481 | * to do the further cleanups. |
1482 | */ |
1483 | ret = cpuhp_down_callbacks(cpu, st, target); |
1484 | if (ret && st->state < prev_state) { |
1485 | if (st->state == CPUHP_TEARDOWN_CPU) { |
1486 | cpuhp_reset_state(cpu, st, prev_state); |
1487 | __cpuhp_kick_ap(st); |
1488 | } else { |
1489 | WARN(1, "DEAD callback error for CPU%d" , cpu); |
1490 | } |
1491 | } |
1492 | |
1493 | out: |
1494 | cpus_write_unlock(); |
1495 | /* |
1496 | * Do post unplug cleanup. This is still protected against |
1497 | * concurrent CPU hotplug via cpu_add_remove_lock. |
1498 | */ |
1499 | lockup_detector_cleanup(); |
1500 | arch_smt_update(); |
1501 | cpu_up_down_serialize_trainwrecks(tasks_frozen); |
1502 | return ret; |
1503 | } |
1504 | |
1505 | struct cpu_down_work { |
1506 | unsigned int cpu; |
1507 | enum cpuhp_state target; |
1508 | }; |
1509 | |
1510 | static long __cpu_down_maps_locked(void *arg) |
1511 | { |
1512 | struct cpu_down_work *work = arg; |
1513 | |
1514 | return _cpu_down(cpu: work->cpu, tasks_frozen: 0, target: work->target); |
1515 | } |
1516 | |
1517 | static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) |
1518 | { |
1519 | struct cpu_down_work work = { .cpu = cpu, .target = target, }; |
1520 | |
1521 | /* |
1522 | * If the platform does not support hotplug, report it explicitly to |
1523 | * differentiate it from a transient offlining failure. |
1524 | */ |
1525 | if (cc_platform_has(attr: CC_ATTR_HOTPLUG_DISABLED)) |
1526 | return -EOPNOTSUPP; |
1527 | if (cpu_hotplug_disabled) |
1528 | return -EBUSY; |
1529 | |
1530 | /* |
1531 | * Ensure that the control task does not run on the to be offlined |
1532 | * CPU to prevent a deadlock against cfs_b->period_timer. |
1533 | * Also keep at least one housekeeping cpu onlined to avoid generating |
1534 | * an empty sched_domain span. |
1535 | */ |
1536 | for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) { |
1537 | if (cpu != work.cpu) |
1538 | return work_on_cpu(cpu, __cpu_down_maps_locked, &work); |
1539 | } |
1540 | return -EBUSY; |
1541 | } |
1542 | |
1543 | static int cpu_down(unsigned int cpu, enum cpuhp_state target) |
1544 | { |
1545 | int err; |
1546 | |
1547 | cpu_maps_update_begin(); |
1548 | err = cpu_down_maps_locked(cpu, target); |
1549 | cpu_maps_update_done(); |
1550 | return err; |
1551 | } |
1552 | |
1553 | /** |
1554 | * cpu_device_down - Bring down a cpu device |
1555 | * @dev: Pointer to the cpu device to offline |
1556 | * |
1557 | * This function is meant to be used by device core cpu subsystem only. |
1558 | * |
1559 | * Other subsystems should use remove_cpu() instead. |
1560 | * |
1561 | * Return: %0 on success or a negative errno code |
1562 | */ |
1563 | int cpu_device_down(struct device *dev) |
1564 | { |
1565 | return cpu_down(cpu: dev->id, target: CPUHP_OFFLINE); |
1566 | } |
1567 | |
1568 | int remove_cpu(unsigned int cpu) |
1569 | { |
1570 | int ret; |
1571 | |
1572 | lock_device_hotplug(); |
1573 | ret = device_offline(dev: get_cpu_device(cpu)); |
1574 | unlock_device_hotplug(); |
1575 | |
1576 | return ret; |
1577 | } |
1578 | EXPORT_SYMBOL_GPL(remove_cpu); |
1579 | |
1580 | void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) |
1581 | { |
1582 | unsigned int cpu; |
1583 | int error; |
1584 | |
1585 | cpu_maps_update_begin(); |
1586 | |
1587 | /* |
1588 | * Make certain the cpu I'm about to reboot on is online. |
1589 | * |
1590 | * This is inline to what migrate_to_reboot_cpu() already do. |
1591 | */ |
1592 | if (!cpu_online(cpu: primary_cpu)) |
1593 | primary_cpu = cpumask_first(cpu_online_mask); |
1594 | |
1595 | for_each_online_cpu(cpu) { |
1596 | if (cpu == primary_cpu) |
1597 | continue; |
1598 | |
1599 | error = cpu_down_maps_locked(cpu, target: CPUHP_OFFLINE); |
1600 | if (error) { |
1601 | pr_err("Failed to offline CPU%d - error=%d" , |
1602 | cpu, error); |
1603 | break; |
1604 | } |
1605 | } |
1606 | |
1607 | /* |
1608 | * Ensure all but the reboot CPU are offline. |
1609 | */ |
1610 | BUG_ON(num_online_cpus() > 1); |
1611 | |
1612 | /* |
1613 | * Make sure the CPUs won't be enabled by someone else after this |
1614 | * point. Kexec will reboot to a new kernel shortly resetting |
1615 | * everything along the way. |
1616 | */ |
1617 | cpu_hotplug_disabled++; |
1618 | |
1619 | cpu_maps_update_done(); |
1620 | } |
1621 | |
1622 | #else |
1623 | #define takedown_cpu NULL |
1624 | #endif /*CONFIG_HOTPLUG_CPU*/ |
1625 | |
1626 | /** |
1627 | * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU |
1628 | * @cpu: cpu that just started |
1629 | * |
1630 | * It must be called by the arch code on the new cpu, before the new cpu |
1631 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
1632 | */ |
1633 | void notify_cpu_starting(unsigned int cpu) |
1634 | { |
1635 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1636 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
1637 | |
1638 | rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ |
1639 | cpumask_set_cpu(cpu, dstp: &cpus_booted_once_mask); |
1640 | |
1641 | /* |
1642 | * STARTING must not fail! |
1643 | */ |
1644 | cpuhp_invoke_callback_range_nofail(bringup: true, cpu, st, target); |
1645 | } |
1646 | |
1647 | /* |
1648 | * Called from the idle task. Wake up the controlling task which brings the |
1649 | * hotplug thread of the upcoming CPU up and then delegates the rest of the |
1650 | * online bringup to the hotplug thread. |
1651 | */ |
1652 | void cpuhp_online_idle(enum cpuhp_state state) |
1653 | { |
1654 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
1655 | |
1656 | /* Happens for the boot cpu */ |
1657 | if (state != CPUHP_AP_ONLINE_IDLE) |
1658 | return; |
1659 | |
1660 | cpuhp_ap_update_sync_state(state: SYNC_STATE_ONLINE); |
1661 | |
1662 | /* |
1663 | * Unpark the stopper thread before we start the idle loop (and start |
1664 | * scheduling); this ensures the stopper task is always available. |
1665 | */ |
1666 | stop_machine_unpark(smp_processor_id()); |
1667 | |
1668 | st->state = CPUHP_AP_ONLINE_IDLE; |
1669 | complete_ap_thread(st, bringup: true); |
1670 | } |
1671 | |
1672 | /* Requires cpu_add_remove_lock to be held */ |
1673 | static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) |
1674 | { |
1675 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1676 | struct task_struct *idle; |
1677 | int ret = 0; |
1678 | |
1679 | cpus_write_lock(); |
1680 | |
1681 | if (!cpu_present(cpu)) { |
1682 | ret = -EINVAL; |
1683 | goto out; |
1684 | } |
1685 | |
1686 | /* |
1687 | * The caller of cpu_up() might have raced with another |
1688 | * caller. Nothing to do. |
1689 | */ |
1690 | if (st->state >= target) |
1691 | goto out; |
1692 | |
1693 | if (st->state == CPUHP_OFFLINE) { |
1694 | /* Let it fail before we try to bring the cpu up */ |
1695 | idle = idle_thread_get(cpu); |
1696 | if (IS_ERR(ptr: idle)) { |
1697 | ret = PTR_ERR(ptr: idle); |
1698 | goto out; |
1699 | } |
1700 | |
1701 | /* |
1702 | * Reset stale stack state from the last time this CPU was online. |
1703 | */ |
1704 | scs_task_reset(tsk: idle); |
1705 | kasan_unpoison_task_stack(task: idle); |
1706 | } |
1707 | |
1708 | cpuhp_tasks_frozen = tasks_frozen; |
1709 | |
1710 | cpuhp_set_state(cpu, st, target); |
1711 | /* |
1712 | * If the current CPU state is in the range of the AP hotplug thread, |
1713 | * then we need to kick the thread once more. |
1714 | */ |
1715 | if (st->state > CPUHP_BRINGUP_CPU) { |
1716 | ret = cpuhp_kick_ap_work(cpu); |
1717 | /* |
1718 | * The AP side has done the error rollback already. Just |
1719 | * return the error code.. |
1720 | */ |
1721 | if (ret) |
1722 | goto out; |
1723 | } |
1724 | |
1725 | /* |
1726 | * Try to reach the target state. We max out on the BP at |
1727 | * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is |
1728 | * responsible for bringing it up to the target state. |
1729 | */ |
1730 | target = min((int)target, CPUHP_BRINGUP_CPU); |
1731 | ret = cpuhp_up_callbacks(cpu, st, target); |
1732 | out: |
1733 | cpus_write_unlock(); |
1734 | arch_smt_update(); |
1735 | cpu_up_down_serialize_trainwrecks(tasks_frozen); |
1736 | return ret; |
1737 | } |
1738 | |
1739 | static int cpu_up(unsigned int cpu, enum cpuhp_state target) |
1740 | { |
1741 | int err = 0; |
1742 | |
1743 | if (!cpu_possible(cpu)) { |
1744 | pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n" , |
1745 | cpu); |
1746 | return -EINVAL; |
1747 | } |
1748 | |
1749 | err = try_online_node(cpu_to_node(cpu)); |
1750 | if (err) |
1751 | return err; |
1752 | |
1753 | cpu_maps_update_begin(); |
1754 | |
1755 | if (cpu_hotplug_disabled) { |
1756 | err = -EBUSY; |
1757 | goto out; |
1758 | } |
1759 | if (!cpu_bootable(cpu)) { |
1760 | err = -EPERM; |
1761 | goto out; |
1762 | } |
1763 | |
1764 | err = _cpu_up(cpu, tasks_frozen: 0, target); |
1765 | out: |
1766 | cpu_maps_update_done(); |
1767 | return err; |
1768 | } |
1769 | |
1770 | /** |
1771 | * cpu_device_up - Bring up a cpu device |
1772 | * @dev: Pointer to the cpu device to online |
1773 | * |
1774 | * This function is meant to be used by device core cpu subsystem only. |
1775 | * |
1776 | * Other subsystems should use add_cpu() instead. |
1777 | * |
1778 | * Return: %0 on success or a negative errno code |
1779 | */ |
1780 | int cpu_device_up(struct device *dev) |
1781 | { |
1782 | return cpu_up(cpu: dev->id, target: CPUHP_ONLINE); |
1783 | } |
1784 | |
1785 | int add_cpu(unsigned int cpu) |
1786 | { |
1787 | int ret; |
1788 | |
1789 | lock_device_hotplug(); |
1790 | ret = device_online(dev: get_cpu_device(cpu)); |
1791 | unlock_device_hotplug(); |
1792 | |
1793 | return ret; |
1794 | } |
1795 | EXPORT_SYMBOL_GPL(add_cpu); |
1796 | |
1797 | /** |
1798 | * bringup_hibernate_cpu - Bring up the CPU that we hibernated on |
1799 | * @sleep_cpu: The cpu we hibernated on and should be brought up. |
1800 | * |
1801 | * On some architectures like arm64, we can hibernate on any CPU, but on |
1802 | * wake up the CPU we hibernated on might be offline as a side effect of |
1803 | * using maxcpus= for example. |
1804 | * |
1805 | * Return: %0 on success or a negative errno code |
1806 | */ |
1807 | int bringup_hibernate_cpu(unsigned int sleep_cpu) |
1808 | { |
1809 | int ret; |
1810 | |
1811 | if (!cpu_online(cpu: sleep_cpu)) { |
1812 | pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n" ); |
1813 | ret = cpu_up(cpu: sleep_cpu, target: CPUHP_ONLINE); |
1814 | if (ret) { |
1815 | pr_err("Failed to bring hibernate-CPU up!\n" ); |
1816 | return ret; |
1817 | } |
1818 | } |
1819 | return 0; |
1820 | } |
1821 | |
1822 | static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus, |
1823 | enum cpuhp_state target) |
1824 | { |
1825 | unsigned int cpu; |
1826 | |
1827 | for_each_cpu(cpu, mask) { |
1828 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
1829 | |
1830 | if (cpu_up(cpu, target) && can_rollback_cpu(st)) { |
1831 | /* |
1832 | * If this failed then cpu_up() might have only |
1833 | * rolled back to CPUHP_BP_KICK_AP for the final |
1834 | * online. Clean it up. NOOP if already rolled back. |
1835 | */ |
1836 | WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE)); |
1837 | } |
1838 | |
1839 | if (!--ncpus) |
1840 | break; |
1841 | } |
1842 | } |
1843 | |
1844 | #ifdef CONFIG_HOTPLUG_PARALLEL |
1845 | static bool __cpuhp_parallel_bringup __ro_after_init = true; |
1846 | |
1847 | static int __init parallel_bringup_parse_param(char *arg) |
1848 | { |
1849 | return kstrtobool(s: arg, res: &__cpuhp_parallel_bringup); |
1850 | } |
1851 | early_param("cpuhp.parallel" , parallel_bringup_parse_param); |
1852 | |
1853 | static inline bool cpuhp_smt_aware(void) |
1854 | { |
1855 | return cpu_smt_max_threads > 1; |
1856 | } |
1857 | |
1858 | static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) |
1859 | { |
1860 | return cpu_primary_thread_mask; |
1861 | } |
1862 | |
1863 | /* |
1864 | * On architectures which have enabled parallel bringup this invokes all BP |
1865 | * prepare states for each of the to be onlined APs first. The last state |
1866 | * sends the startup IPI to the APs. The APs proceed through the low level |
1867 | * bringup code in parallel and then wait for the control CPU to release |
1868 | * them one by one for the final onlining procedure. |
1869 | * |
1870 | * This avoids waiting for each AP to respond to the startup IPI in |
1871 | * CPUHP_BRINGUP_CPU. |
1872 | */ |
1873 | static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus) |
1874 | { |
1875 | const struct cpumask *mask = cpu_present_mask; |
1876 | |
1877 | if (__cpuhp_parallel_bringup) |
1878 | __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup(); |
1879 | if (!__cpuhp_parallel_bringup) |
1880 | return false; |
1881 | |
1882 | if (cpuhp_smt_aware()) { |
1883 | const struct cpumask *pmask = cpuhp_get_primary_thread_mask(); |
1884 | static struct cpumask tmp_mask __initdata; |
1885 | |
1886 | /* |
1887 | * X86 requires to prevent that SMT siblings stopped while |
1888 | * the primary thread does a microcode update for various |
1889 | * reasons. Bring the primary threads up first. |
1890 | */ |
1891 | cpumask_and(dstp: &tmp_mask, src1p: mask, src2p: pmask); |
1892 | cpuhp_bringup_mask(mask: &tmp_mask, ncpus, target: CPUHP_BP_KICK_AP); |
1893 | cpuhp_bringup_mask(mask: &tmp_mask, ncpus, target: CPUHP_ONLINE); |
1894 | /* Account for the online CPUs */ |
1895 | ncpus -= num_online_cpus(); |
1896 | if (!ncpus) |
1897 | return true; |
1898 | /* Create the mask for secondary CPUs */ |
1899 | cpumask_andnot(dstp: &tmp_mask, src1p: mask, src2p: pmask); |
1900 | mask = &tmp_mask; |
1901 | } |
1902 | |
1903 | /* Bring the not-yet started CPUs up */ |
1904 | cpuhp_bringup_mask(mask, ncpus, target: CPUHP_BP_KICK_AP); |
1905 | cpuhp_bringup_mask(mask, ncpus, target: CPUHP_ONLINE); |
1906 | return true; |
1907 | } |
1908 | #else |
1909 | static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; } |
1910 | #endif /* CONFIG_HOTPLUG_PARALLEL */ |
1911 | |
1912 | void __init bringup_nonboot_cpus(unsigned int setup_max_cpus) |
1913 | { |
1914 | /* Try parallel bringup optimization if enabled */ |
1915 | if (cpuhp_bringup_cpus_parallel(ncpus: setup_max_cpus)) |
1916 | return; |
1917 | |
1918 | /* Full per CPU serialized bringup */ |
1919 | cpuhp_bringup_mask(cpu_present_mask, ncpus: setup_max_cpus, target: CPUHP_ONLINE); |
1920 | } |
1921 | |
1922 | #ifdef CONFIG_PM_SLEEP_SMP |
1923 | static cpumask_var_t frozen_cpus; |
1924 | |
1925 | int freeze_secondary_cpus(int primary) |
1926 | { |
1927 | int cpu, error = 0; |
1928 | |
1929 | cpu_maps_update_begin(); |
1930 | if (primary == -1) { |
1931 | primary = cpumask_first(cpu_online_mask); |
1932 | if (!housekeeping_cpu(cpu: primary, type: HK_TYPE_TIMER)) |
1933 | primary = housekeeping_any_cpu(type: HK_TYPE_TIMER); |
1934 | } else { |
1935 | if (!cpu_online(cpu: primary)) |
1936 | primary = cpumask_first(cpu_online_mask); |
1937 | } |
1938 | |
1939 | /* |
1940 | * We take down all of the non-boot CPUs in one shot to avoid races |
1941 | * with the userspace trying to use the CPU hotplug at the same time |
1942 | */ |
1943 | cpumask_clear(dstp: frozen_cpus); |
1944 | |
1945 | pr_info("Disabling non-boot CPUs ...\n" ); |
1946 | for_each_online_cpu(cpu) { |
1947 | if (cpu == primary) |
1948 | continue; |
1949 | |
1950 | if (pm_wakeup_pending()) { |
1951 | pr_info("Wakeup pending. Abort CPU freeze\n" ); |
1952 | error = -EBUSY; |
1953 | break; |
1954 | } |
1955 | |
1956 | trace_suspend_resume(TPS("CPU_OFF" ), val: cpu, start: true); |
1957 | error = _cpu_down(cpu, tasks_frozen: 1, target: CPUHP_OFFLINE); |
1958 | trace_suspend_resume(TPS("CPU_OFF" ), val: cpu, start: false); |
1959 | if (!error) |
1960 | cpumask_set_cpu(cpu, dstp: frozen_cpus); |
1961 | else { |
1962 | pr_err("Error taking CPU%d down: %d\n" , cpu, error); |
1963 | break; |
1964 | } |
1965 | } |
1966 | |
1967 | if (!error) |
1968 | BUG_ON(num_online_cpus() > 1); |
1969 | else |
1970 | pr_err("Non-boot CPUs are not disabled\n" ); |
1971 | |
1972 | /* |
1973 | * Make sure the CPUs won't be enabled by someone else. We need to do |
1974 | * this even in case of failure as all freeze_secondary_cpus() users are |
1975 | * supposed to do thaw_secondary_cpus() on the failure path. |
1976 | */ |
1977 | cpu_hotplug_disabled++; |
1978 | |
1979 | cpu_maps_update_done(); |
1980 | return error; |
1981 | } |
1982 | |
1983 | void __weak arch_thaw_secondary_cpus_begin(void) |
1984 | { |
1985 | } |
1986 | |
1987 | void __weak arch_thaw_secondary_cpus_end(void) |
1988 | { |
1989 | } |
1990 | |
1991 | void thaw_secondary_cpus(void) |
1992 | { |
1993 | int cpu, error; |
1994 | |
1995 | /* Allow everyone to use the CPU hotplug again */ |
1996 | cpu_maps_update_begin(); |
1997 | __cpu_hotplug_enable(); |
1998 | if (cpumask_empty(srcp: frozen_cpus)) |
1999 | goto out; |
2000 | |
2001 | pr_info("Enabling non-boot CPUs ...\n" ); |
2002 | |
2003 | arch_thaw_secondary_cpus_begin(); |
2004 | |
2005 | for_each_cpu(cpu, frozen_cpus) { |
2006 | trace_suspend_resume(TPS("CPU_ON" ), val: cpu, start: true); |
2007 | error = _cpu_up(cpu, tasks_frozen: 1, target: CPUHP_ONLINE); |
2008 | trace_suspend_resume(TPS("CPU_ON" ), val: cpu, start: false); |
2009 | if (!error) { |
2010 | pr_info("CPU%d is up\n" , cpu); |
2011 | continue; |
2012 | } |
2013 | pr_warn("Error taking CPU%d up: %d\n" , cpu, error); |
2014 | } |
2015 | |
2016 | arch_thaw_secondary_cpus_end(); |
2017 | |
2018 | cpumask_clear(dstp: frozen_cpus); |
2019 | out: |
2020 | cpu_maps_update_done(); |
2021 | } |
2022 | |
2023 | static int __init alloc_frozen_cpus(void) |
2024 | { |
2025 | if (!alloc_cpumask_var(mask: &frozen_cpus, GFP_KERNEL|__GFP_ZERO)) |
2026 | return -ENOMEM; |
2027 | return 0; |
2028 | } |
2029 | core_initcall(alloc_frozen_cpus); |
2030 | |
2031 | /* |
2032 | * When callbacks for CPU hotplug notifications are being executed, we must |
2033 | * ensure that the state of the system with respect to the tasks being frozen |
2034 | * or not, as reported by the notification, remains unchanged *throughout the |
2035 | * duration* of the execution of the callbacks. |
2036 | * Hence we need to prevent the freezer from racing with regular CPU hotplug. |
2037 | * |
2038 | * This synchronization is implemented by mutually excluding regular CPU |
2039 | * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ |
2040 | * Hibernate notifications. |
2041 | */ |
2042 | static int |
2043 | cpu_hotplug_pm_callback(struct notifier_block *nb, |
2044 | unsigned long action, void *ptr) |
2045 | { |
2046 | switch (action) { |
2047 | |
2048 | case PM_SUSPEND_PREPARE: |
2049 | case PM_HIBERNATION_PREPARE: |
2050 | cpu_hotplug_disable(); |
2051 | break; |
2052 | |
2053 | case PM_POST_SUSPEND: |
2054 | case PM_POST_HIBERNATION: |
2055 | cpu_hotplug_enable(); |
2056 | break; |
2057 | |
2058 | default: |
2059 | return NOTIFY_DONE; |
2060 | } |
2061 | |
2062 | return NOTIFY_OK; |
2063 | } |
2064 | |
2065 | |
2066 | static int __init cpu_hotplug_pm_sync_init(void) |
2067 | { |
2068 | /* |
2069 | * cpu_hotplug_pm_callback has higher priority than x86 |
2070 | * bsp_pm_callback which depends on cpu_hotplug_pm_callback |
2071 | * to disable cpu hotplug to avoid cpu hotplug race. |
2072 | */ |
2073 | pm_notifier(cpu_hotplug_pm_callback, 0); |
2074 | return 0; |
2075 | } |
2076 | core_initcall(cpu_hotplug_pm_sync_init); |
2077 | |
2078 | #endif /* CONFIG_PM_SLEEP_SMP */ |
2079 | |
2080 | int __boot_cpu_id; |
2081 | |
2082 | #endif /* CONFIG_SMP */ |
2083 | |
2084 | /* Boot processor state steps */ |
2085 | static struct cpuhp_step cpuhp_hp_states[] = { |
2086 | [CPUHP_OFFLINE] = { |
2087 | .name = "offline" , |
2088 | .startup.single = NULL, |
2089 | .teardown.single = NULL, |
2090 | }, |
2091 | #ifdef CONFIG_SMP |
2092 | [CPUHP_CREATE_THREADS]= { |
2093 | .name = "threads:prepare" , |
2094 | .startup.single = smpboot_create_threads, |
2095 | .teardown.single = NULL, |
2096 | .cant_stop = true, |
2097 | }, |
2098 | [CPUHP_PERF_PREPARE] = { |
2099 | .name = "perf:prepare" , |
2100 | .startup.single = perf_event_init_cpu, |
2101 | .teardown.single = perf_event_exit_cpu, |
2102 | }, |
2103 | [CPUHP_RANDOM_PREPARE] = { |
2104 | .name = "random:prepare" , |
2105 | .startup.single = random_prepare_cpu, |
2106 | .teardown.single = NULL, |
2107 | }, |
2108 | [CPUHP_WORKQUEUE_PREP] = { |
2109 | .name = "workqueue:prepare" , |
2110 | .startup.single = workqueue_prepare_cpu, |
2111 | .teardown.single = NULL, |
2112 | }, |
2113 | [CPUHP_HRTIMERS_PREPARE] = { |
2114 | .name = "hrtimers:prepare" , |
2115 | .startup.single = hrtimers_prepare_cpu, |
2116 | .teardown.single = hrtimers_dead_cpu, |
2117 | }, |
2118 | [CPUHP_SMPCFD_PREPARE] = { |
2119 | .name = "smpcfd:prepare" , |
2120 | .startup.single = smpcfd_prepare_cpu, |
2121 | .teardown.single = smpcfd_dead_cpu, |
2122 | }, |
2123 | [CPUHP_RELAY_PREPARE] = { |
2124 | .name = "relay:prepare" , |
2125 | .startup.single = relay_prepare_cpu, |
2126 | .teardown.single = NULL, |
2127 | }, |
2128 | [CPUHP_SLAB_PREPARE] = { |
2129 | .name = "slab:prepare" , |
2130 | .startup.single = slab_prepare_cpu, |
2131 | .teardown.single = slab_dead_cpu, |
2132 | }, |
2133 | [CPUHP_RCUTREE_PREP] = { |
2134 | .name = "RCU/tree:prepare" , |
2135 | .startup.single = rcutree_prepare_cpu, |
2136 | .teardown.single = rcutree_dead_cpu, |
2137 | }, |
2138 | /* |
2139 | * On the tear-down path, timers_dead_cpu() must be invoked |
2140 | * before blk_mq_queue_reinit_notify() from notify_dead(), |
2141 | * otherwise a RCU stall occurs. |
2142 | */ |
2143 | [CPUHP_TIMERS_PREPARE] = { |
2144 | .name = "timers:prepare" , |
2145 | .startup.single = timers_prepare_cpu, |
2146 | .teardown.single = timers_dead_cpu, |
2147 | }, |
2148 | |
2149 | #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP |
2150 | /* |
2151 | * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until |
2152 | * the next step will release it. |
2153 | */ |
2154 | [CPUHP_BP_KICK_AP] = { |
2155 | .name = "cpu:kick_ap" , |
2156 | .startup.single = cpuhp_kick_ap_alive, |
2157 | }, |
2158 | |
2159 | /* |
2160 | * Waits for the AP to reach cpuhp_ap_sync_alive() and then |
2161 | * releases it for the complete bringup. |
2162 | */ |
2163 | [CPUHP_BRINGUP_CPU] = { |
2164 | .name = "cpu:bringup" , |
2165 | .startup.single = cpuhp_bringup_ap, |
2166 | .teardown.single = finish_cpu, |
2167 | .cant_stop = true, |
2168 | }, |
2169 | #else |
2170 | /* |
2171 | * All-in-one CPU bringup state which includes the kick alive. |
2172 | */ |
2173 | [CPUHP_BRINGUP_CPU] = { |
2174 | .name = "cpu:bringup" , |
2175 | .startup.single = bringup_cpu, |
2176 | .teardown.single = finish_cpu, |
2177 | .cant_stop = true, |
2178 | }, |
2179 | #endif |
2180 | /* Final state before CPU kills itself */ |
2181 | [CPUHP_AP_IDLE_DEAD] = { |
2182 | .name = "idle:dead" , |
2183 | }, |
2184 | /* |
2185 | * Last state before CPU enters the idle loop to die. Transient state |
2186 | * for synchronization. |
2187 | */ |
2188 | [CPUHP_AP_OFFLINE] = { |
2189 | .name = "ap:offline" , |
2190 | .cant_stop = true, |
2191 | }, |
2192 | /* First state is scheduler control. Interrupts are disabled */ |
2193 | [CPUHP_AP_SCHED_STARTING] = { |
2194 | .name = "sched:starting" , |
2195 | .startup.single = sched_cpu_starting, |
2196 | .teardown.single = sched_cpu_dying, |
2197 | }, |
2198 | [CPUHP_AP_RCUTREE_DYING] = { |
2199 | .name = "RCU/tree:dying" , |
2200 | .startup.single = NULL, |
2201 | .teardown.single = rcutree_dying_cpu, |
2202 | }, |
2203 | [CPUHP_AP_SMPCFD_DYING] = { |
2204 | .name = "smpcfd:dying" , |
2205 | .startup.single = NULL, |
2206 | .teardown.single = smpcfd_dying_cpu, |
2207 | }, |
2208 | /* Entry state on starting. Interrupts enabled from here on. Transient |
2209 | * state for synchronsization */ |
2210 | [CPUHP_AP_ONLINE] = { |
2211 | .name = "ap:online" , |
2212 | }, |
2213 | /* |
2214 | * Handled on control processor until the plugged processor manages |
2215 | * this itself. |
2216 | */ |
2217 | [CPUHP_TEARDOWN_CPU] = { |
2218 | .name = "cpu:teardown" , |
2219 | .startup.single = NULL, |
2220 | .teardown.single = takedown_cpu, |
2221 | .cant_stop = true, |
2222 | }, |
2223 | |
2224 | [CPUHP_AP_SCHED_WAIT_EMPTY] = { |
2225 | .name = "sched:waitempty" , |
2226 | .startup.single = NULL, |
2227 | .teardown.single = sched_cpu_wait_empty, |
2228 | }, |
2229 | |
2230 | /* Handle smpboot threads park/unpark */ |
2231 | [CPUHP_AP_SMPBOOT_THREADS] = { |
2232 | .name = "smpboot/threads:online" , |
2233 | .startup.single = smpboot_unpark_threads, |
2234 | .teardown.single = smpboot_park_threads, |
2235 | }, |
2236 | [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { |
2237 | .name = "irq/affinity:online" , |
2238 | .startup.single = irq_affinity_online_cpu, |
2239 | .teardown.single = NULL, |
2240 | }, |
2241 | [CPUHP_AP_PERF_ONLINE] = { |
2242 | .name = "perf:online" , |
2243 | .startup.single = perf_event_init_cpu, |
2244 | .teardown.single = perf_event_exit_cpu, |
2245 | }, |
2246 | [CPUHP_AP_WATCHDOG_ONLINE] = { |
2247 | .name = "lockup_detector:online" , |
2248 | .startup.single = lockup_detector_online_cpu, |
2249 | .teardown.single = lockup_detector_offline_cpu, |
2250 | }, |
2251 | [CPUHP_AP_WORKQUEUE_ONLINE] = { |
2252 | .name = "workqueue:online" , |
2253 | .startup.single = workqueue_online_cpu, |
2254 | .teardown.single = workqueue_offline_cpu, |
2255 | }, |
2256 | [CPUHP_AP_RANDOM_ONLINE] = { |
2257 | .name = "random:online" , |
2258 | .startup.single = random_online_cpu, |
2259 | .teardown.single = NULL, |
2260 | }, |
2261 | [CPUHP_AP_RCUTREE_ONLINE] = { |
2262 | .name = "RCU/tree:online" , |
2263 | .startup.single = rcutree_online_cpu, |
2264 | .teardown.single = rcutree_offline_cpu, |
2265 | }, |
2266 | #endif |
2267 | /* |
2268 | * The dynamically registered state space is here |
2269 | */ |
2270 | |
2271 | #ifdef CONFIG_SMP |
2272 | /* Last state is scheduler control setting the cpu active */ |
2273 | [CPUHP_AP_ACTIVE] = { |
2274 | .name = "sched:active" , |
2275 | .startup.single = sched_cpu_activate, |
2276 | .teardown.single = sched_cpu_deactivate, |
2277 | }, |
2278 | #endif |
2279 | |
2280 | /* CPU is fully up and running. */ |
2281 | [CPUHP_ONLINE] = { |
2282 | .name = "online" , |
2283 | .startup.single = NULL, |
2284 | .teardown.single = NULL, |
2285 | }, |
2286 | }; |
2287 | |
2288 | /* Sanity check for callbacks */ |
2289 | static int cpuhp_cb_check(enum cpuhp_state state) |
2290 | { |
2291 | if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) |
2292 | return -EINVAL; |
2293 | return 0; |
2294 | } |
2295 | |
2296 | /* |
2297 | * Returns a free for dynamic slot assignment of the Online state. The states |
2298 | * are protected by the cpuhp_slot_states mutex and an empty slot is identified |
2299 | * by having no name assigned. |
2300 | */ |
2301 | static int cpuhp_reserve_state(enum cpuhp_state state) |
2302 | { |
2303 | enum cpuhp_state i, end; |
2304 | struct cpuhp_step *step; |
2305 | |
2306 | switch (state) { |
2307 | case CPUHP_AP_ONLINE_DYN: |
2308 | step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; |
2309 | end = CPUHP_AP_ONLINE_DYN_END; |
2310 | break; |
2311 | case CPUHP_BP_PREPARE_DYN: |
2312 | step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; |
2313 | end = CPUHP_BP_PREPARE_DYN_END; |
2314 | break; |
2315 | default: |
2316 | return -EINVAL; |
2317 | } |
2318 | |
2319 | for (i = state; i <= end; i++, step++) { |
2320 | if (!step->name) |
2321 | return i; |
2322 | } |
2323 | WARN(1, "No more dynamic states available for CPU hotplug\n" ); |
2324 | return -ENOSPC; |
2325 | } |
2326 | |
2327 | static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, |
2328 | int (*startup)(unsigned int cpu), |
2329 | int (*teardown)(unsigned int cpu), |
2330 | bool multi_instance) |
2331 | { |
2332 | /* (Un)Install the callbacks for further cpu hotplug operations */ |
2333 | struct cpuhp_step *sp; |
2334 | int ret = 0; |
2335 | |
2336 | /* |
2337 | * If name is NULL, then the state gets removed. |
2338 | * |
2339 | * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on |
2340 | * the first allocation from these dynamic ranges, so the removal |
2341 | * would trigger a new allocation and clear the wrong (already |
2342 | * empty) state, leaving the callbacks of the to be cleared state |
2343 | * dangling, which causes wreckage on the next hotplug operation. |
2344 | */ |
2345 | if (name && (state == CPUHP_AP_ONLINE_DYN || |
2346 | state == CPUHP_BP_PREPARE_DYN)) { |
2347 | ret = cpuhp_reserve_state(state); |
2348 | if (ret < 0) |
2349 | return ret; |
2350 | state = ret; |
2351 | } |
2352 | sp = cpuhp_get_step(state); |
2353 | if (name && sp->name) |
2354 | return -EBUSY; |
2355 | |
2356 | sp->startup.single = startup; |
2357 | sp->teardown.single = teardown; |
2358 | sp->name = name; |
2359 | sp->multi_instance = multi_instance; |
2360 | INIT_HLIST_HEAD(&sp->list); |
2361 | return ret; |
2362 | } |
2363 | |
2364 | static void *cpuhp_get_teardown_cb(enum cpuhp_state state) |
2365 | { |
2366 | return cpuhp_get_step(state)->teardown.single; |
2367 | } |
2368 | |
2369 | /* |
2370 | * Call the startup/teardown function for a step either on the AP or |
2371 | * on the current CPU. |
2372 | */ |
2373 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, |
2374 | struct hlist_node *node) |
2375 | { |
2376 | struct cpuhp_step *sp = cpuhp_get_step(state); |
2377 | int ret; |
2378 | |
2379 | /* |
2380 | * If there's nothing to do, we done. |
2381 | * Relies on the union for multi_instance. |
2382 | */ |
2383 | if (cpuhp_step_empty(bringup, step: sp)) |
2384 | return 0; |
2385 | /* |
2386 | * The non AP bound callbacks can fail on bringup. On teardown |
2387 | * e.g. module removal we crash for now. |
2388 | */ |
2389 | #ifdef CONFIG_SMP |
2390 | if (cpuhp_is_ap_state(state)) |
2391 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); |
2392 | else |
2393 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
2394 | #else |
2395 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
2396 | #endif |
2397 | BUG_ON(ret && !bringup); |
2398 | return ret; |
2399 | } |
2400 | |
2401 | /* |
2402 | * Called from __cpuhp_setup_state on a recoverable failure. |
2403 | * |
2404 | * Note: The teardown callbacks for rollback are not allowed to fail! |
2405 | */ |
2406 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, |
2407 | struct hlist_node *node) |
2408 | { |
2409 | int cpu; |
2410 | |
2411 | /* Roll back the already executed steps on the other cpus */ |
2412 | for_each_present_cpu(cpu) { |
2413 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
2414 | int cpustate = st->state; |
2415 | |
2416 | if (cpu >= failedcpu) |
2417 | break; |
2418 | |
2419 | /* Did we invoke the startup call on that cpu ? */ |
2420 | if (cpustate >= state) |
2421 | cpuhp_issue_call(cpu, state, bringup: false, node); |
2422 | } |
2423 | } |
2424 | |
2425 | int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, |
2426 | struct hlist_node *node, |
2427 | bool invoke) |
2428 | { |
2429 | struct cpuhp_step *sp; |
2430 | int cpu; |
2431 | int ret; |
2432 | |
2433 | lockdep_assert_cpus_held(); |
2434 | |
2435 | sp = cpuhp_get_step(state); |
2436 | if (sp->multi_instance == false) |
2437 | return -EINVAL; |
2438 | |
2439 | mutex_lock(&cpuhp_state_mutex); |
2440 | |
2441 | if (!invoke || !sp->startup.multi) |
2442 | goto add_node; |
2443 | |
2444 | /* |
2445 | * Try to call the startup callback for each present cpu |
2446 | * depending on the hotplug state of the cpu. |
2447 | */ |
2448 | for_each_present_cpu(cpu) { |
2449 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
2450 | int cpustate = st->state; |
2451 | |
2452 | if (cpustate < state) |
2453 | continue; |
2454 | |
2455 | ret = cpuhp_issue_call(cpu, state, bringup: true, node); |
2456 | if (ret) { |
2457 | if (sp->teardown.multi) |
2458 | cpuhp_rollback_install(failedcpu: cpu, state, node); |
2459 | goto unlock; |
2460 | } |
2461 | } |
2462 | add_node: |
2463 | ret = 0; |
2464 | hlist_add_head(n: node, h: &sp->list); |
2465 | unlock: |
2466 | mutex_unlock(lock: &cpuhp_state_mutex); |
2467 | return ret; |
2468 | } |
2469 | |
2470 | int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, |
2471 | bool invoke) |
2472 | { |
2473 | int ret; |
2474 | |
2475 | cpus_read_lock(); |
2476 | ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); |
2477 | cpus_read_unlock(); |
2478 | return ret; |
2479 | } |
2480 | EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); |
2481 | |
2482 | /** |
2483 | * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state |
2484 | * @state: The state to setup |
2485 | * @name: Name of the step |
2486 | * @invoke: If true, the startup function is invoked for cpus where |
2487 | * cpu state >= @state |
2488 | * @startup: startup callback function |
2489 | * @teardown: teardown callback function |
2490 | * @multi_instance: State is set up for multiple instances which get |
2491 | * added afterwards. |
2492 | * |
2493 | * The caller needs to hold cpus read locked while calling this function. |
2494 | * Return: |
2495 | * On success: |
2496 | * Positive state number if @state is CPUHP_AP_ONLINE_DYN; |
2497 | * 0 for all other states |
2498 | * On failure: proper (negative) error code |
2499 | */ |
2500 | int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, |
2501 | const char *name, bool invoke, |
2502 | int (*startup)(unsigned int cpu), |
2503 | int (*teardown)(unsigned int cpu), |
2504 | bool multi_instance) |
2505 | { |
2506 | int cpu, ret = 0; |
2507 | bool dynstate; |
2508 | |
2509 | lockdep_assert_cpus_held(); |
2510 | |
2511 | if (cpuhp_cb_check(state) || !name) |
2512 | return -EINVAL; |
2513 | |
2514 | mutex_lock(&cpuhp_state_mutex); |
2515 | |
2516 | ret = cpuhp_store_callbacks(state, name, startup, teardown, |
2517 | multi_instance); |
2518 | |
2519 | dynstate = state == CPUHP_AP_ONLINE_DYN; |
2520 | if (ret > 0 && dynstate) { |
2521 | state = ret; |
2522 | ret = 0; |
2523 | } |
2524 | |
2525 | if (ret || !invoke || !startup) |
2526 | goto out; |
2527 | |
2528 | /* |
2529 | * Try to call the startup callback for each present cpu |
2530 | * depending on the hotplug state of the cpu. |
2531 | */ |
2532 | for_each_present_cpu(cpu) { |
2533 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
2534 | int cpustate = st->state; |
2535 | |
2536 | if (cpustate < state) |
2537 | continue; |
2538 | |
2539 | ret = cpuhp_issue_call(cpu, state, bringup: true, NULL); |
2540 | if (ret) { |
2541 | if (teardown) |
2542 | cpuhp_rollback_install(failedcpu: cpu, state, NULL); |
2543 | cpuhp_store_callbacks(state, NULL, NULL, NULL, multi_instance: false); |
2544 | goto out; |
2545 | } |
2546 | } |
2547 | out: |
2548 | mutex_unlock(lock: &cpuhp_state_mutex); |
2549 | /* |
2550 | * If the requested state is CPUHP_AP_ONLINE_DYN, return the |
2551 | * dynamically allocated state in case of success. |
2552 | */ |
2553 | if (!ret && dynstate) |
2554 | return state; |
2555 | return ret; |
2556 | } |
2557 | EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); |
2558 | |
2559 | int __cpuhp_setup_state(enum cpuhp_state state, |
2560 | const char *name, bool invoke, |
2561 | int (*startup)(unsigned int cpu), |
2562 | int (*teardown)(unsigned int cpu), |
2563 | bool multi_instance) |
2564 | { |
2565 | int ret; |
2566 | |
2567 | cpus_read_lock(); |
2568 | ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, |
2569 | teardown, multi_instance); |
2570 | cpus_read_unlock(); |
2571 | return ret; |
2572 | } |
2573 | EXPORT_SYMBOL(__cpuhp_setup_state); |
2574 | |
2575 | int __cpuhp_state_remove_instance(enum cpuhp_state state, |
2576 | struct hlist_node *node, bool invoke) |
2577 | { |
2578 | struct cpuhp_step *sp = cpuhp_get_step(state); |
2579 | int cpu; |
2580 | |
2581 | BUG_ON(cpuhp_cb_check(state)); |
2582 | |
2583 | if (!sp->multi_instance) |
2584 | return -EINVAL; |
2585 | |
2586 | cpus_read_lock(); |
2587 | mutex_lock(&cpuhp_state_mutex); |
2588 | |
2589 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
2590 | goto remove; |
2591 | /* |
2592 | * Call the teardown callback for each present cpu depending |
2593 | * on the hotplug state of the cpu. This function is not |
2594 | * allowed to fail currently! |
2595 | */ |
2596 | for_each_present_cpu(cpu) { |
2597 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
2598 | int cpustate = st->state; |
2599 | |
2600 | if (cpustate >= state) |
2601 | cpuhp_issue_call(cpu, state, bringup: false, node); |
2602 | } |
2603 | |
2604 | remove: |
2605 | hlist_del(n: node); |
2606 | mutex_unlock(lock: &cpuhp_state_mutex); |
2607 | cpus_read_unlock(); |
2608 | |
2609 | return 0; |
2610 | } |
2611 | EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); |
2612 | |
2613 | /** |
2614 | * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state |
2615 | * @state: The state to remove |
2616 | * @invoke: If true, the teardown function is invoked for cpus where |
2617 | * cpu state >= @state |
2618 | * |
2619 | * The caller needs to hold cpus read locked while calling this function. |
2620 | * The teardown callback is currently not allowed to fail. Think |
2621 | * about module removal! |
2622 | */ |
2623 | void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) |
2624 | { |
2625 | struct cpuhp_step *sp = cpuhp_get_step(state); |
2626 | int cpu; |
2627 | |
2628 | BUG_ON(cpuhp_cb_check(state)); |
2629 | |
2630 | lockdep_assert_cpus_held(); |
2631 | |
2632 | mutex_lock(&cpuhp_state_mutex); |
2633 | if (sp->multi_instance) { |
2634 | WARN(!hlist_empty(&sp->list), |
2635 | "Error: Removing state %d which has instances left.\n" , |
2636 | state); |
2637 | goto remove; |
2638 | } |
2639 | |
2640 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
2641 | goto remove; |
2642 | |
2643 | /* |
2644 | * Call the teardown callback for each present cpu depending |
2645 | * on the hotplug state of the cpu. This function is not |
2646 | * allowed to fail currently! |
2647 | */ |
2648 | for_each_present_cpu(cpu) { |
2649 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
2650 | int cpustate = st->state; |
2651 | |
2652 | if (cpustate >= state) |
2653 | cpuhp_issue_call(cpu, state, bringup: false, NULL); |
2654 | } |
2655 | remove: |
2656 | cpuhp_store_callbacks(state, NULL, NULL, NULL, multi_instance: false); |
2657 | mutex_unlock(lock: &cpuhp_state_mutex); |
2658 | } |
2659 | EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); |
2660 | |
2661 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) |
2662 | { |
2663 | cpus_read_lock(); |
2664 | __cpuhp_remove_state_cpuslocked(state, invoke); |
2665 | cpus_read_unlock(); |
2666 | } |
2667 | EXPORT_SYMBOL(__cpuhp_remove_state); |
2668 | |
2669 | #ifdef CONFIG_HOTPLUG_SMT |
2670 | static void cpuhp_offline_cpu_device(unsigned int cpu) |
2671 | { |
2672 | struct device *dev = get_cpu_device(cpu); |
2673 | |
2674 | dev->offline = true; |
2675 | /* Tell user space about the state change */ |
2676 | kobject_uevent(kobj: &dev->kobj, action: KOBJ_OFFLINE); |
2677 | } |
2678 | |
2679 | static void cpuhp_online_cpu_device(unsigned int cpu) |
2680 | { |
2681 | struct device *dev = get_cpu_device(cpu); |
2682 | |
2683 | dev->offline = false; |
2684 | /* Tell user space about the state change */ |
2685 | kobject_uevent(kobj: &dev->kobj, action: KOBJ_ONLINE); |
2686 | } |
2687 | |
2688 | int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) |
2689 | { |
2690 | int cpu, ret = 0; |
2691 | |
2692 | cpu_maps_update_begin(); |
2693 | for_each_online_cpu(cpu) { |
2694 | if (topology_is_primary_thread(cpu)) |
2695 | continue; |
2696 | /* |
2697 | * Disable can be called with CPU_SMT_ENABLED when changing |
2698 | * from a higher to lower number of SMT threads per core. |
2699 | */ |
2700 | if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) |
2701 | continue; |
2702 | ret = cpu_down_maps_locked(cpu, target: CPUHP_OFFLINE); |
2703 | if (ret) |
2704 | break; |
2705 | /* |
2706 | * As this needs to hold the cpu maps lock it's impossible |
2707 | * to call device_offline() because that ends up calling |
2708 | * cpu_down() which takes cpu maps lock. cpu maps lock |
2709 | * needs to be held as this might race against in kernel |
2710 | * abusers of the hotplug machinery (thermal management). |
2711 | * |
2712 | * So nothing would update device:offline state. That would |
2713 | * leave the sysfs entry stale and prevent onlining after |
2714 | * smt control has been changed to 'off' again. This is |
2715 | * called under the sysfs hotplug lock, so it is properly |
2716 | * serialized against the regular offline usage. |
2717 | */ |
2718 | cpuhp_offline_cpu_device(cpu); |
2719 | } |
2720 | if (!ret) |
2721 | cpu_smt_control = ctrlval; |
2722 | cpu_maps_update_done(); |
2723 | return ret; |
2724 | } |
2725 | |
2726 | int cpuhp_smt_enable(void) |
2727 | { |
2728 | int cpu, ret = 0; |
2729 | |
2730 | cpu_maps_update_begin(); |
2731 | cpu_smt_control = CPU_SMT_ENABLED; |
2732 | for_each_present_cpu(cpu) { |
2733 | /* Skip online CPUs and CPUs on offline nodes */ |
2734 | if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) |
2735 | continue; |
2736 | if (!cpu_smt_thread_allowed(cpu)) |
2737 | continue; |
2738 | ret = _cpu_up(cpu, tasks_frozen: 0, target: CPUHP_ONLINE); |
2739 | if (ret) |
2740 | break; |
2741 | /* See comment in cpuhp_smt_disable() */ |
2742 | cpuhp_online_cpu_device(cpu); |
2743 | } |
2744 | cpu_maps_update_done(); |
2745 | return ret; |
2746 | } |
2747 | #endif |
2748 | |
2749 | #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) |
2750 | static ssize_t state_show(struct device *dev, |
2751 | struct device_attribute *attr, char *buf) |
2752 | { |
2753 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
2754 | |
2755 | return sprintf(buf, fmt: "%d\n" , st->state); |
2756 | } |
2757 | static DEVICE_ATTR_RO(state); |
2758 | |
2759 | static ssize_t target_store(struct device *dev, struct device_attribute *attr, |
2760 | const char *buf, size_t count) |
2761 | { |
2762 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
2763 | struct cpuhp_step *sp; |
2764 | int target, ret; |
2765 | |
2766 | ret = kstrtoint(s: buf, base: 10, res: &target); |
2767 | if (ret) |
2768 | return ret; |
2769 | |
2770 | #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL |
2771 | if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) |
2772 | return -EINVAL; |
2773 | #else |
2774 | if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) |
2775 | return -EINVAL; |
2776 | #endif |
2777 | |
2778 | ret = lock_device_hotplug_sysfs(); |
2779 | if (ret) |
2780 | return ret; |
2781 | |
2782 | mutex_lock(&cpuhp_state_mutex); |
2783 | sp = cpuhp_get_step(state: target); |
2784 | ret = !sp->name || sp->cant_stop ? -EINVAL : 0; |
2785 | mutex_unlock(lock: &cpuhp_state_mutex); |
2786 | if (ret) |
2787 | goto out; |
2788 | |
2789 | if (st->state < target) |
2790 | ret = cpu_up(cpu: dev->id, target); |
2791 | else if (st->state > target) |
2792 | ret = cpu_down(cpu: dev->id, target); |
2793 | else if (WARN_ON(st->target != target)) |
2794 | st->target = target; |
2795 | out: |
2796 | unlock_device_hotplug(); |
2797 | return ret ? ret : count; |
2798 | } |
2799 | |
2800 | static ssize_t target_show(struct device *dev, |
2801 | struct device_attribute *attr, char *buf) |
2802 | { |
2803 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
2804 | |
2805 | return sprintf(buf, fmt: "%d\n" , st->target); |
2806 | } |
2807 | static DEVICE_ATTR_RW(target); |
2808 | |
2809 | static ssize_t fail_store(struct device *dev, struct device_attribute *attr, |
2810 | const char *buf, size_t count) |
2811 | { |
2812 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
2813 | struct cpuhp_step *sp; |
2814 | int fail, ret; |
2815 | |
2816 | ret = kstrtoint(s: buf, base: 10, res: &fail); |
2817 | if (ret) |
2818 | return ret; |
2819 | |
2820 | if (fail == CPUHP_INVALID) { |
2821 | st->fail = fail; |
2822 | return count; |
2823 | } |
2824 | |
2825 | if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) |
2826 | return -EINVAL; |
2827 | |
2828 | /* |
2829 | * Cannot fail STARTING/DYING callbacks. |
2830 | */ |
2831 | if (cpuhp_is_atomic_state(state: fail)) |
2832 | return -EINVAL; |
2833 | |
2834 | /* |
2835 | * DEAD callbacks cannot fail... |
2836 | * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter |
2837 | * triggering STARTING callbacks, a failure in this state would |
2838 | * hinder rollback. |
2839 | */ |
2840 | if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU) |
2841 | return -EINVAL; |
2842 | |
2843 | /* |
2844 | * Cannot fail anything that doesn't have callbacks. |
2845 | */ |
2846 | mutex_lock(&cpuhp_state_mutex); |
2847 | sp = cpuhp_get_step(state: fail); |
2848 | if (!sp->startup.single && !sp->teardown.single) |
2849 | ret = -EINVAL; |
2850 | mutex_unlock(lock: &cpuhp_state_mutex); |
2851 | if (ret) |
2852 | return ret; |
2853 | |
2854 | st->fail = fail; |
2855 | |
2856 | return count; |
2857 | } |
2858 | |
2859 | static ssize_t fail_show(struct device *dev, |
2860 | struct device_attribute *attr, char *buf) |
2861 | { |
2862 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
2863 | |
2864 | return sprintf(buf, fmt: "%d\n" , st->fail); |
2865 | } |
2866 | |
2867 | static DEVICE_ATTR_RW(fail); |
2868 | |
2869 | static struct attribute *cpuhp_cpu_attrs[] = { |
2870 | &dev_attr_state.attr, |
2871 | &dev_attr_target.attr, |
2872 | &dev_attr_fail.attr, |
2873 | NULL |
2874 | }; |
2875 | |
2876 | static const struct attribute_group cpuhp_cpu_attr_group = { |
2877 | .attrs = cpuhp_cpu_attrs, |
2878 | .name = "hotplug" , |
2879 | NULL |
2880 | }; |
2881 | |
2882 | static ssize_t states_show(struct device *dev, |
2883 | struct device_attribute *attr, char *buf) |
2884 | { |
2885 | ssize_t cur, res = 0; |
2886 | int i; |
2887 | |
2888 | mutex_lock(&cpuhp_state_mutex); |
2889 | for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { |
2890 | struct cpuhp_step *sp = cpuhp_get_step(state: i); |
2891 | |
2892 | if (sp->name) { |
2893 | cur = sprintf(buf, fmt: "%3d: %s\n" , i, sp->name); |
2894 | buf += cur; |
2895 | res += cur; |
2896 | } |
2897 | } |
2898 | mutex_unlock(lock: &cpuhp_state_mutex); |
2899 | return res; |
2900 | } |
2901 | static DEVICE_ATTR_RO(states); |
2902 | |
2903 | static struct attribute *cpuhp_cpu_root_attrs[] = { |
2904 | &dev_attr_states.attr, |
2905 | NULL |
2906 | }; |
2907 | |
2908 | static const struct attribute_group cpuhp_cpu_root_attr_group = { |
2909 | .attrs = cpuhp_cpu_root_attrs, |
2910 | .name = "hotplug" , |
2911 | NULL |
2912 | }; |
2913 | |
2914 | #ifdef CONFIG_HOTPLUG_SMT |
2915 | |
2916 | static bool cpu_smt_num_threads_valid(unsigned int threads) |
2917 | { |
2918 | if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC)) |
2919 | return threads >= 1 && threads <= cpu_smt_max_threads; |
2920 | return threads == 1 || threads == cpu_smt_max_threads; |
2921 | } |
2922 | |
2923 | static ssize_t |
2924 | __store_smt_control(struct device *dev, struct device_attribute *attr, |
2925 | const char *buf, size_t count) |
2926 | { |
2927 | int ctrlval, ret, num_threads, orig_threads; |
2928 | bool force_off; |
2929 | |
2930 | if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) |
2931 | return -EPERM; |
2932 | |
2933 | if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
2934 | return -ENODEV; |
2935 | |
2936 | if (sysfs_streq(s1: buf, s2: "on" )) { |
2937 | ctrlval = CPU_SMT_ENABLED; |
2938 | num_threads = cpu_smt_max_threads; |
2939 | } else if (sysfs_streq(s1: buf, s2: "off" )) { |
2940 | ctrlval = CPU_SMT_DISABLED; |
2941 | num_threads = 1; |
2942 | } else if (sysfs_streq(s1: buf, s2: "forceoff" )) { |
2943 | ctrlval = CPU_SMT_FORCE_DISABLED; |
2944 | num_threads = 1; |
2945 | } else if (kstrtoint(s: buf, base: 10, res: &num_threads) == 0) { |
2946 | if (num_threads == 1) |
2947 | ctrlval = CPU_SMT_DISABLED; |
2948 | else if (cpu_smt_num_threads_valid(threads: num_threads)) |
2949 | ctrlval = CPU_SMT_ENABLED; |
2950 | else |
2951 | return -EINVAL; |
2952 | } else { |
2953 | return -EINVAL; |
2954 | } |
2955 | |
2956 | ret = lock_device_hotplug_sysfs(); |
2957 | if (ret) |
2958 | return ret; |
2959 | |
2960 | orig_threads = cpu_smt_num_threads; |
2961 | cpu_smt_num_threads = num_threads; |
2962 | |
2963 | force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED; |
2964 | |
2965 | if (num_threads > orig_threads) |
2966 | ret = cpuhp_smt_enable(); |
2967 | else if (num_threads < orig_threads || force_off) |
2968 | ret = cpuhp_smt_disable(ctrlval); |
2969 | |
2970 | unlock_device_hotplug(); |
2971 | return ret ? ret : count; |
2972 | } |
2973 | |
2974 | #else /* !CONFIG_HOTPLUG_SMT */ |
2975 | static ssize_t |
2976 | __store_smt_control(struct device *dev, struct device_attribute *attr, |
2977 | const char *buf, size_t count) |
2978 | { |
2979 | return -ENODEV; |
2980 | } |
2981 | #endif /* CONFIG_HOTPLUG_SMT */ |
2982 | |
2983 | static const char *smt_states[] = { |
2984 | [CPU_SMT_ENABLED] = "on" , |
2985 | [CPU_SMT_DISABLED] = "off" , |
2986 | [CPU_SMT_FORCE_DISABLED] = "forceoff" , |
2987 | [CPU_SMT_NOT_SUPPORTED] = "notsupported" , |
2988 | [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented" , |
2989 | }; |
2990 | |
2991 | static ssize_t control_show(struct device *dev, |
2992 | struct device_attribute *attr, char *buf) |
2993 | { |
2994 | const char *state = smt_states[cpu_smt_control]; |
2995 | |
2996 | #ifdef CONFIG_HOTPLUG_SMT |
2997 | /* |
2998 | * If SMT is enabled but not all threads are enabled then show the |
2999 | * number of threads. If all threads are enabled show "on". Otherwise |
3000 | * show the state name. |
3001 | */ |
3002 | if (cpu_smt_control == CPU_SMT_ENABLED && |
3003 | cpu_smt_num_threads != cpu_smt_max_threads) |
3004 | return sysfs_emit(buf, fmt: "%d\n" , cpu_smt_num_threads); |
3005 | #endif |
3006 | |
3007 | return snprintf(buf, PAGE_SIZE - 2, fmt: "%s\n" , state); |
3008 | } |
3009 | |
3010 | static ssize_t control_store(struct device *dev, struct device_attribute *attr, |
3011 | const char *buf, size_t count) |
3012 | { |
3013 | return __store_smt_control(dev, attr, buf, count); |
3014 | } |
3015 | static DEVICE_ATTR_RW(control); |
3016 | |
3017 | static ssize_t active_show(struct device *dev, |
3018 | struct device_attribute *attr, char *buf) |
3019 | { |
3020 | return snprintf(buf, PAGE_SIZE - 2, fmt: "%d\n" , sched_smt_active()); |
3021 | } |
3022 | static DEVICE_ATTR_RO(active); |
3023 | |
3024 | static struct attribute *cpuhp_smt_attrs[] = { |
3025 | &dev_attr_control.attr, |
3026 | &dev_attr_active.attr, |
3027 | NULL |
3028 | }; |
3029 | |
3030 | static const struct attribute_group cpuhp_smt_attr_group = { |
3031 | .attrs = cpuhp_smt_attrs, |
3032 | .name = "smt" , |
3033 | NULL |
3034 | }; |
3035 | |
3036 | static int __init cpu_smt_sysfs_init(void) |
3037 | { |
3038 | struct device *dev_root; |
3039 | int ret = -ENODEV; |
3040 | |
3041 | dev_root = bus_get_dev_root(bus: &cpu_subsys); |
3042 | if (dev_root) { |
3043 | ret = sysfs_create_group(kobj: &dev_root->kobj, grp: &cpuhp_smt_attr_group); |
3044 | put_device(dev: dev_root); |
3045 | } |
3046 | return ret; |
3047 | } |
3048 | |
3049 | static int __init cpuhp_sysfs_init(void) |
3050 | { |
3051 | struct device *dev_root; |
3052 | int cpu, ret; |
3053 | |
3054 | ret = cpu_smt_sysfs_init(); |
3055 | if (ret) |
3056 | return ret; |
3057 | |
3058 | dev_root = bus_get_dev_root(bus: &cpu_subsys); |
3059 | if (dev_root) { |
3060 | ret = sysfs_create_group(kobj: &dev_root->kobj, grp: &cpuhp_cpu_root_attr_group); |
3061 | put_device(dev: dev_root); |
3062 | if (ret) |
3063 | return ret; |
3064 | } |
3065 | |
3066 | for_each_possible_cpu(cpu) { |
3067 | struct device *dev = get_cpu_device(cpu); |
3068 | |
3069 | if (!dev) |
3070 | continue; |
3071 | ret = sysfs_create_group(kobj: &dev->kobj, grp: &cpuhp_cpu_attr_group); |
3072 | if (ret) |
3073 | return ret; |
3074 | } |
3075 | return 0; |
3076 | } |
3077 | device_initcall(cpuhp_sysfs_init); |
3078 | #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */ |
3079 | |
3080 | /* |
3081 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
3082 | * represents all NR_CPUS bits binary values of 1<<nr. |
3083 | * |
3084 | * It is used by cpumask_of() to get a constant address to a CPU |
3085 | * mask value that has a single bit set only. |
3086 | */ |
3087 | |
3088 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ |
3089 | #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
3090 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) |
3091 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) |
3092 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) |
3093 | |
3094 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { |
3095 | |
3096 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), |
3097 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), |
3098 | #if BITS_PER_LONG > 32 |
3099 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), |
3100 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), |
3101 | #endif |
3102 | }; |
3103 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
3104 | |
3105 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; |
3106 | EXPORT_SYMBOL(cpu_all_bits); |
3107 | |
3108 | #ifdef CONFIG_INIT_ALL_POSSIBLE |
3109 | struct cpumask __cpu_possible_mask __read_mostly |
3110 | = {CPU_BITS_ALL}; |
3111 | #else |
3112 | struct cpumask __cpu_possible_mask __read_mostly; |
3113 | #endif |
3114 | EXPORT_SYMBOL(__cpu_possible_mask); |
3115 | |
3116 | struct cpumask __cpu_online_mask __read_mostly; |
3117 | EXPORT_SYMBOL(__cpu_online_mask); |
3118 | |
3119 | struct cpumask __cpu_present_mask __read_mostly; |
3120 | EXPORT_SYMBOL(__cpu_present_mask); |
3121 | |
3122 | struct cpumask __cpu_active_mask __read_mostly; |
3123 | EXPORT_SYMBOL(__cpu_active_mask); |
3124 | |
3125 | struct cpumask __cpu_dying_mask __read_mostly; |
3126 | EXPORT_SYMBOL(__cpu_dying_mask); |
3127 | |
3128 | atomic_t __num_online_cpus __read_mostly; |
3129 | EXPORT_SYMBOL(__num_online_cpus); |
3130 | |
3131 | void init_cpu_present(const struct cpumask *src) |
3132 | { |
3133 | cpumask_copy(dstp: &__cpu_present_mask, srcp: src); |
3134 | } |
3135 | |
3136 | void init_cpu_possible(const struct cpumask *src) |
3137 | { |
3138 | cpumask_copy(dstp: &__cpu_possible_mask, srcp: src); |
3139 | } |
3140 | |
3141 | void init_cpu_online(const struct cpumask *src) |
3142 | { |
3143 | cpumask_copy(dstp: &__cpu_online_mask, srcp: src); |
3144 | } |
3145 | |
3146 | void set_cpu_online(unsigned int cpu, bool online) |
3147 | { |
3148 | /* |
3149 | * atomic_inc/dec() is required to handle the horrid abuse of this |
3150 | * function by the reboot and kexec code which invoke it from |
3151 | * IPI/NMI broadcasts when shutting down CPUs. Invocation from |
3152 | * regular CPU hotplug is properly serialized. |
3153 | * |
3154 | * Note, that the fact that __num_online_cpus is of type atomic_t |
3155 | * does not protect readers which are not serialized against |
3156 | * concurrent hotplug operations. |
3157 | */ |
3158 | if (online) { |
3159 | if (!cpumask_test_and_set_cpu(cpu, cpumask: &__cpu_online_mask)) |
3160 | atomic_inc(v: &__num_online_cpus); |
3161 | } else { |
3162 | if (cpumask_test_and_clear_cpu(cpu, cpumask: &__cpu_online_mask)) |
3163 | atomic_dec(v: &__num_online_cpus); |
3164 | } |
3165 | } |
3166 | |
3167 | /* |
3168 | * Activate the first processor. |
3169 | */ |
3170 | void __init boot_cpu_init(void) |
3171 | { |
3172 | int cpu = smp_processor_id(); |
3173 | |
3174 | /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
3175 | set_cpu_online(cpu, online: true); |
3176 | set_cpu_active(cpu, active: true); |
3177 | set_cpu_present(cpu, present: true); |
3178 | set_cpu_possible(cpu, possible: true); |
3179 | |
3180 | #ifdef CONFIG_SMP |
3181 | __boot_cpu_id = cpu; |
3182 | #endif |
3183 | } |
3184 | |
3185 | /* |
3186 | * Must be called _AFTER_ setting up the per_cpu areas |
3187 | */ |
3188 | void __init boot_cpu_hotplug_init(void) |
3189 | { |
3190 | #ifdef CONFIG_SMP |
3191 | cpumask_set_cpu(smp_processor_id(), dstp: &cpus_booted_once_mask); |
3192 | atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), i: SYNC_STATE_ONLINE); |
3193 | #endif |
3194 | this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); |
3195 | this_cpu_write(cpuhp_state.target, CPUHP_ONLINE); |
3196 | } |
3197 | |
3198 | /* |
3199 | * These are used for a global "mitigations=" cmdline option for toggling |
3200 | * optional CPU mitigations. |
3201 | */ |
3202 | enum cpu_mitigations { |
3203 | CPU_MITIGATIONS_OFF, |
3204 | CPU_MITIGATIONS_AUTO, |
3205 | CPU_MITIGATIONS_AUTO_NOSMT, |
3206 | }; |
3207 | |
3208 | static enum cpu_mitigations cpu_mitigations __ro_after_init = |
3209 | CPU_MITIGATIONS_AUTO; |
3210 | |
3211 | static int __init mitigations_parse_cmdline(char *arg) |
3212 | { |
3213 | if (!strcmp(arg, "off" )) |
3214 | cpu_mitigations = CPU_MITIGATIONS_OFF; |
3215 | else if (!strcmp(arg, "auto" )) |
3216 | cpu_mitigations = CPU_MITIGATIONS_AUTO; |
3217 | else if (!strcmp(arg, "auto,nosmt" )) |
3218 | cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; |
3219 | else |
3220 | pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n" , |
3221 | arg); |
3222 | |
3223 | return 0; |
3224 | } |
3225 | early_param("mitigations" , mitigations_parse_cmdline); |
3226 | |
3227 | /* mitigations=off */ |
3228 | bool cpu_mitigations_off(void) |
3229 | { |
3230 | return cpu_mitigations == CPU_MITIGATIONS_OFF; |
3231 | } |
3232 | EXPORT_SYMBOL_GPL(cpu_mitigations_off); |
3233 | |
3234 | /* mitigations=auto,nosmt */ |
3235 | bool cpu_mitigations_auto_nosmt(void) |
3236 | { |
3237 | return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; |
3238 | } |
3239 | EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); |
3240 | |