1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * kernel/stop_machine.c |
4 | * |
5 | * Copyright (C) 2008, 2005 IBM Corporation. |
6 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au |
7 | * Copyright (C) 2010 SUSE Linux Products GmbH |
8 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
9 | */ |
10 | #include <linux/compiler.h> |
11 | #include <linux/completion.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kthread.h> |
15 | #include <linux/export.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/stop_machine.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/kallsyms.h> |
21 | #include <linux/smpboot.h> |
22 | #include <linux/atomic.h> |
23 | #include <linux/nmi.h> |
24 | #include <linux/sched/wake_q.h> |
25 | |
26 | /* |
27 | * Structure to determine completion condition and record errors. May |
28 | * be shared by works on different cpus. |
29 | */ |
30 | struct cpu_stop_done { |
31 | atomic_t nr_todo; /* nr left to execute */ |
32 | int ret; /* collected return value */ |
33 | struct completion completion; /* fired if nr_todo reaches 0 */ |
34 | }; |
35 | |
36 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ |
37 | struct cpu_stopper { |
38 | struct task_struct *thread; |
39 | |
40 | raw_spinlock_t lock; |
41 | bool enabled; /* is this stopper enabled? */ |
42 | struct list_head works; /* list of pending works */ |
43 | |
44 | struct cpu_stop_work stop_work; /* for stop_cpus */ |
45 | unsigned long caller; |
46 | cpu_stop_fn_t fn; |
47 | }; |
48 | |
49 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
50 | static bool stop_machine_initialized = false; |
51 | |
52 | void print_stop_info(const char *log_lvl, struct task_struct *task) |
53 | { |
54 | /* |
55 | * If @task is a stopper task, it cannot migrate and task_cpu() is |
56 | * stable. |
57 | */ |
58 | struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); |
59 | |
60 | if (task != stopper->thread) |
61 | return; |
62 | |
63 | printk("%sStopper: %pS <- %pS\n" , log_lvl, stopper->fn, (void *)stopper->caller); |
64 | } |
65 | |
66 | /* static data for stop_cpus */ |
67 | static DEFINE_MUTEX(stop_cpus_mutex); |
68 | static bool stop_cpus_in_progress; |
69 | |
70 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
71 | { |
72 | memset(done, 0, sizeof(*done)); |
73 | atomic_set(v: &done->nr_todo, i: nr_todo); |
74 | init_completion(x: &done->completion); |
75 | } |
76 | |
77 | /* signal completion unless @done is NULL */ |
78 | static void cpu_stop_signal_done(struct cpu_stop_done *done) |
79 | { |
80 | if (atomic_dec_and_test(v: &done->nr_todo)) |
81 | complete(&done->completion); |
82 | } |
83 | |
84 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
85 | struct cpu_stop_work *work, |
86 | struct wake_q_head *wakeq) |
87 | { |
88 | list_add_tail(new: &work->list, head: &stopper->works); |
89 | wake_q_add(head: wakeq, task: stopper->thread); |
90 | } |
91 | |
92 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
93 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
94 | { |
95 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
96 | DEFINE_WAKE_Q(wakeq); |
97 | unsigned long flags; |
98 | bool enabled; |
99 | |
100 | preempt_disable(); |
101 | raw_spin_lock_irqsave(&stopper->lock, flags); |
102 | enabled = stopper->enabled; |
103 | if (enabled) |
104 | __cpu_stop_queue_work(stopper, work, wakeq: &wakeq); |
105 | else if (work->done) |
106 | cpu_stop_signal_done(done: work->done); |
107 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
108 | |
109 | wake_up_q(head: &wakeq); |
110 | preempt_enable(); |
111 | |
112 | return enabled; |
113 | } |
114 | |
115 | /** |
116 | * stop_one_cpu - stop a cpu |
117 | * @cpu: cpu to stop |
118 | * @fn: function to execute |
119 | * @arg: argument to @fn |
120 | * |
121 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with |
122 | * the highest priority preempting any task on the cpu and |
123 | * monopolizing it. This function returns after the execution is |
124 | * complete. |
125 | * |
126 | * This function doesn't guarantee @cpu stays online till @fn |
127 | * completes. If @cpu goes down in the middle, execution may happen |
128 | * partially or fully on different cpus. @fn should either be ready |
129 | * for that or the caller should ensure that @cpu stays online until |
130 | * this function completes. |
131 | * |
132 | * CONTEXT: |
133 | * Might sleep. |
134 | * |
135 | * RETURNS: |
136 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; |
137 | * otherwise, the return value of @fn. |
138 | */ |
139 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
140 | { |
141 | struct cpu_stop_done done; |
142 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; |
143 | |
144 | cpu_stop_init_done(done: &done, nr_todo: 1); |
145 | if (!cpu_stop_queue_work(cpu, work: &work)) |
146 | return -ENOENT; |
147 | /* |
148 | * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup |
149 | * cycle by doing a preemption: |
150 | */ |
151 | cond_resched(); |
152 | wait_for_completion(&done.completion); |
153 | return done.ret; |
154 | } |
155 | |
156 | /* This controls the threads on each CPU. */ |
157 | enum multi_stop_state { |
158 | /* Dummy starting state for thread. */ |
159 | MULTI_STOP_NONE, |
160 | /* Awaiting everyone to be scheduled. */ |
161 | MULTI_STOP_PREPARE, |
162 | /* Disable interrupts. */ |
163 | MULTI_STOP_DISABLE_IRQ, |
164 | /* Run the function */ |
165 | MULTI_STOP_RUN, |
166 | /* Exit */ |
167 | MULTI_STOP_EXIT, |
168 | }; |
169 | |
170 | struct multi_stop_data { |
171 | cpu_stop_fn_t fn; |
172 | void *data; |
173 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
174 | unsigned int num_threads; |
175 | const struct cpumask *active_cpus; |
176 | |
177 | enum multi_stop_state state; |
178 | atomic_t thread_ack; |
179 | }; |
180 | |
181 | static void set_state(struct multi_stop_data *msdata, |
182 | enum multi_stop_state newstate) |
183 | { |
184 | /* Reset ack counter. */ |
185 | atomic_set(v: &msdata->thread_ack, i: msdata->num_threads); |
186 | smp_wmb(); |
187 | WRITE_ONCE(msdata->state, newstate); |
188 | } |
189 | |
190 | /* Last one to ack a state moves to the next state. */ |
191 | static void ack_state(struct multi_stop_data *msdata) |
192 | { |
193 | if (atomic_dec_and_test(v: &msdata->thread_ack)) |
194 | set_state(msdata, newstate: msdata->state + 1); |
195 | } |
196 | |
197 | notrace void __weak stop_machine_yield(const struct cpumask *cpumask) |
198 | { |
199 | cpu_relax(); |
200 | } |
201 | |
202 | /* This is the cpu_stop function which stops the CPU. */ |
203 | static int multi_cpu_stop(void *data) |
204 | { |
205 | struct multi_stop_data *msdata = data; |
206 | enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; |
207 | int cpu = smp_processor_id(), err = 0; |
208 | const struct cpumask *cpumask; |
209 | unsigned long flags; |
210 | bool is_active; |
211 | |
212 | /* |
213 | * When called from stop_machine_from_inactive_cpu(), irq might |
214 | * already be disabled. Save the state and restore it on exit. |
215 | */ |
216 | local_save_flags(flags); |
217 | |
218 | if (!msdata->active_cpus) { |
219 | cpumask = cpu_online_mask; |
220 | is_active = cpu == cpumask_first(srcp: cpumask); |
221 | } else { |
222 | cpumask = msdata->active_cpus; |
223 | is_active = cpumask_test_cpu(cpu, cpumask); |
224 | } |
225 | |
226 | /* Simple state machine */ |
227 | do { |
228 | /* Chill out and ensure we re-read multi_stop_state. */ |
229 | stop_machine_yield(cpumask); |
230 | newstate = READ_ONCE(msdata->state); |
231 | if (newstate != curstate) { |
232 | curstate = newstate; |
233 | switch (curstate) { |
234 | case MULTI_STOP_DISABLE_IRQ: |
235 | local_irq_disable(); |
236 | hard_irq_disable(); |
237 | break; |
238 | case MULTI_STOP_RUN: |
239 | if (is_active) |
240 | err = msdata->fn(msdata->data); |
241 | break; |
242 | default: |
243 | break; |
244 | } |
245 | ack_state(msdata); |
246 | } else if (curstate > MULTI_STOP_PREPARE) { |
247 | /* |
248 | * At this stage all other CPUs we depend on must spin |
249 | * in the same loop. Any reason for hard-lockup should |
250 | * be detected and reported on their side. |
251 | */ |
252 | touch_nmi_watchdog(); |
253 | } |
254 | rcu_momentary_dyntick_idle(); |
255 | } while (curstate != MULTI_STOP_EXIT); |
256 | |
257 | local_irq_restore(flags); |
258 | return err; |
259 | } |
260 | |
261 | static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, |
262 | int cpu2, struct cpu_stop_work *work2) |
263 | { |
264 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); |
265 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
266 | DEFINE_WAKE_Q(wakeq); |
267 | int err; |
268 | |
269 | retry: |
270 | /* |
271 | * The waking up of stopper threads has to happen in the same |
272 | * scheduling context as the queueing. Otherwise, there is a |
273 | * possibility of one of the above stoppers being woken up by another |
274 | * CPU, and preempting us. This will cause us to not wake up the other |
275 | * stopper forever. |
276 | */ |
277 | preempt_disable(); |
278 | raw_spin_lock_irq(&stopper1->lock); |
279 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
280 | |
281 | if (!stopper1->enabled || !stopper2->enabled) { |
282 | err = -ENOENT; |
283 | goto unlock; |
284 | } |
285 | |
286 | /* |
287 | * Ensure that if we race with __stop_cpus() the stoppers won't get |
288 | * queued up in reverse order leading to system deadlock. |
289 | * |
290 | * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has |
291 | * queued a work on cpu1 but not on cpu2, we hold both locks. |
292 | * |
293 | * It can be falsely true but it is safe to spin until it is cleared, |
294 | * queue_stop_cpus_work() does everything under preempt_disable(). |
295 | */ |
296 | if (unlikely(stop_cpus_in_progress)) { |
297 | err = -EDEADLK; |
298 | goto unlock; |
299 | } |
300 | |
301 | err = 0; |
302 | __cpu_stop_queue_work(stopper: stopper1, work: work1, wakeq: &wakeq); |
303 | __cpu_stop_queue_work(stopper: stopper2, work: work2, wakeq: &wakeq); |
304 | |
305 | unlock: |
306 | raw_spin_unlock(&stopper2->lock); |
307 | raw_spin_unlock_irq(&stopper1->lock); |
308 | |
309 | if (unlikely(err == -EDEADLK)) { |
310 | preempt_enable(); |
311 | |
312 | while (stop_cpus_in_progress) |
313 | cpu_relax(); |
314 | |
315 | goto retry; |
316 | } |
317 | |
318 | wake_up_q(head: &wakeq); |
319 | preempt_enable(); |
320 | |
321 | return err; |
322 | } |
323 | /** |
324 | * stop_two_cpus - stops two cpus |
325 | * @cpu1: the cpu to stop |
326 | * @cpu2: the other cpu to stop |
327 | * @fn: function to execute |
328 | * @arg: argument to @fn |
329 | * |
330 | * Stops both the current and specified CPU and runs @fn on one of them. |
331 | * |
332 | * returns when both are completed. |
333 | */ |
334 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) |
335 | { |
336 | struct cpu_stop_done done; |
337 | struct cpu_stop_work work1, work2; |
338 | struct multi_stop_data msdata; |
339 | |
340 | msdata = (struct multi_stop_data){ |
341 | .fn = fn, |
342 | .data = arg, |
343 | .num_threads = 2, |
344 | .active_cpus = cpumask_of(cpu1), |
345 | }; |
346 | |
347 | work1 = work2 = (struct cpu_stop_work){ |
348 | .fn = multi_cpu_stop, |
349 | .arg = &msdata, |
350 | .done = &done, |
351 | .caller = _RET_IP_, |
352 | }; |
353 | |
354 | cpu_stop_init_done(done: &done, nr_todo: 2); |
355 | set_state(msdata: &msdata, newstate: MULTI_STOP_PREPARE); |
356 | |
357 | if (cpu1 > cpu2) |
358 | swap(cpu1, cpu2); |
359 | if (cpu_stop_queue_two_works(cpu1, work1: &work1, cpu2, work2: &work2)) |
360 | return -ENOENT; |
361 | |
362 | wait_for_completion(&done.completion); |
363 | return done.ret; |
364 | } |
365 | |
366 | /** |
367 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion |
368 | * @cpu: cpu to stop |
369 | * @fn: function to execute |
370 | * @arg: argument to @fn |
371 | * @work_buf: pointer to cpu_stop_work structure |
372 | * |
373 | * Similar to stop_one_cpu() but doesn't wait for completion. The |
374 | * caller is responsible for ensuring @work_buf is currently unused |
375 | * and will remain untouched until stopper starts executing @fn. |
376 | * |
377 | * CONTEXT: |
378 | * Don't care. |
379 | * |
380 | * RETURNS: |
381 | * true if cpu_stop_work was queued successfully and @fn will be called, |
382 | * false otherwise. |
383 | */ |
384 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
385 | struct cpu_stop_work *work_buf) |
386 | { |
387 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; |
388 | return cpu_stop_queue_work(cpu, work: work_buf); |
389 | } |
390 | |
391 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
392 | cpu_stop_fn_t fn, void *arg, |
393 | struct cpu_stop_done *done) |
394 | { |
395 | struct cpu_stop_work *work; |
396 | unsigned int cpu; |
397 | bool queued = false; |
398 | |
399 | /* |
400 | * Disable preemption while queueing to avoid getting |
401 | * preempted by a stopper which might wait for other stoppers |
402 | * to enter @fn which can lead to deadlock. |
403 | */ |
404 | preempt_disable(); |
405 | stop_cpus_in_progress = true; |
406 | barrier(); |
407 | for_each_cpu(cpu, cpumask) { |
408 | work = &per_cpu(cpu_stopper.stop_work, cpu); |
409 | work->fn = fn; |
410 | work->arg = arg; |
411 | work->done = done; |
412 | work->caller = _RET_IP_; |
413 | if (cpu_stop_queue_work(cpu, work)) |
414 | queued = true; |
415 | } |
416 | barrier(); |
417 | stop_cpus_in_progress = false; |
418 | preempt_enable(); |
419 | |
420 | return queued; |
421 | } |
422 | |
423 | static int __stop_cpus(const struct cpumask *cpumask, |
424 | cpu_stop_fn_t fn, void *arg) |
425 | { |
426 | struct cpu_stop_done done; |
427 | |
428 | cpu_stop_init_done(done: &done, nr_todo: cpumask_weight(srcp: cpumask)); |
429 | if (!queue_stop_cpus_work(cpumask, fn, arg, done: &done)) |
430 | return -ENOENT; |
431 | wait_for_completion(&done.completion); |
432 | return done.ret; |
433 | } |
434 | |
435 | /** |
436 | * stop_cpus - stop multiple cpus |
437 | * @cpumask: cpus to stop |
438 | * @fn: function to execute |
439 | * @arg: argument to @fn |
440 | * |
441 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, |
442 | * @fn is run in a process context with the highest priority |
443 | * preempting any task on the cpu and monopolizing it. This function |
444 | * returns after all executions are complete. |
445 | * |
446 | * This function doesn't guarantee the cpus in @cpumask stay online |
447 | * till @fn completes. If some cpus go down in the middle, execution |
448 | * on the cpu may happen partially or fully on different cpus. @fn |
449 | * should either be ready for that or the caller should ensure that |
450 | * the cpus stay online until this function completes. |
451 | * |
452 | * All stop_cpus() calls are serialized making it safe for @fn to wait |
453 | * for all cpus to start executing it. |
454 | * |
455 | * CONTEXT: |
456 | * Might sleep. |
457 | * |
458 | * RETURNS: |
459 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in |
460 | * @cpumask were offline; otherwise, 0 if all executions of @fn |
461 | * returned 0, any non zero return value if any returned non zero. |
462 | */ |
463 | static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
464 | { |
465 | int ret; |
466 | |
467 | /* static works are used, process one request at a time */ |
468 | mutex_lock(&stop_cpus_mutex); |
469 | ret = __stop_cpus(cpumask, fn, arg); |
470 | mutex_unlock(lock: &stop_cpus_mutex); |
471 | return ret; |
472 | } |
473 | |
474 | static int cpu_stop_should_run(unsigned int cpu) |
475 | { |
476 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
477 | unsigned long flags; |
478 | int run; |
479 | |
480 | raw_spin_lock_irqsave(&stopper->lock, flags); |
481 | run = !list_empty(head: &stopper->works); |
482 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
483 | return run; |
484 | } |
485 | |
486 | static void cpu_stopper_thread(unsigned int cpu) |
487 | { |
488 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
489 | struct cpu_stop_work *work; |
490 | |
491 | repeat: |
492 | work = NULL; |
493 | raw_spin_lock_irq(&stopper->lock); |
494 | if (!list_empty(head: &stopper->works)) { |
495 | work = list_first_entry(&stopper->works, |
496 | struct cpu_stop_work, list); |
497 | list_del_init(entry: &work->list); |
498 | } |
499 | raw_spin_unlock_irq(&stopper->lock); |
500 | |
501 | if (work) { |
502 | cpu_stop_fn_t fn = work->fn; |
503 | void *arg = work->arg; |
504 | struct cpu_stop_done *done = work->done; |
505 | int ret; |
506 | |
507 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ |
508 | stopper->caller = work->caller; |
509 | stopper->fn = fn; |
510 | preempt_count_inc(); |
511 | ret = fn(arg); |
512 | if (done) { |
513 | if (ret) |
514 | done->ret = ret; |
515 | cpu_stop_signal_done(done); |
516 | } |
517 | preempt_count_dec(); |
518 | stopper->fn = NULL; |
519 | stopper->caller = 0; |
520 | WARN_ONCE(preempt_count(), |
521 | "cpu_stop: %ps(%p) leaked preempt count\n" , fn, arg); |
522 | goto repeat; |
523 | } |
524 | } |
525 | |
526 | void stop_machine_park(int cpu) |
527 | { |
528 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
529 | /* |
530 | * Lockless. cpu_stopper_thread() will take stopper->lock and flush |
531 | * the pending works before it parks, until then it is fine to queue |
532 | * the new works. |
533 | */ |
534 | stopper->enabled = false; |
535 | kthread_park(k: stopper->thread); |
536 | } |
537 | |
538 | static void cpu_stop_create(unsigned int cpu) |
539 | { |
540 | sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); |
541 | } |
542 | |
543 | static void cpu_stop_park(unsigned int cpu) |
544 | { |
545 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
546 | |
547 | WARN_ON(!list_empty(&stopper->works)); |
548 | } |
549 | |
550 | void stop_machine_unpark(int cpu) |
551 | { |
552 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
553 | |
554 | stopper->enabled = true; |
555 | kthread_unpark(k: stopper->thread); |
556 | } |
557 | |
558 | static struct smp_hotplug_thread cpu_stop_threads = { |
559 | .store = &cpu_stopper.thread, |
560 | .thread_should_run = cpu_stop_should_run, |
561 | .thread_fn = cpu_stopper_thread, |
562 | .thread_comm = "migration/%u" , |
563 | .create = cpu_stop_create, |
564 | .park = cpu_stop_park, |
565 | .selfparking = true, |
566 | }; |
567 | |
568 | static int __init cpu_stop_init(void) |
569 | { |
570 | unsigned int cpu; |
571 | |
572 | for_each_possible_cpu(cpu) { |
573 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
574 | |
575 | raw_spin_lock_init(&stopper->lock); |
576 | INIT_LIST_HEAD(list: &stopper->works); |
577 | } |
578 | |
579 | BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); |
580 | stop_machine_unpark(raw_smp_processor_id()); |
581 | stop_machine_initialized = true; |
582 | return 0; |
583 | } |
584 | early_initcall(cpu_stop_init); |
585 | |
586 | int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, |
587 | const struct cpumask *cpus) |
588 | { |
589 | struct multi_stop_data msdata = { |
590 | .fn = fn, |
591 | .data = data, |
592 | .num_threads = num_online_cpus(), |
593 | .active_cpus = cpus, |
594 | }; |
595 | |
596 | lockdep_assert_cpus_held(); |
597 | |
598 | if (!stop_machine_initialized) { |
599 | /* |
600 | * Handle the case where stop_machine() is called |
601 | * early in boot before stop_machine() has been |
602 | * initialized. |
603 | */ |
604 | unsigned long flags; |
605 | int ret; |
606 | |
607 | WARN_ON_ONCE(msdata.num_threads != 1); |
608 | |
609 | local_irq_save(flags); |
610 | hard_irq_disable(); |
611 | ret = (*fn)(data); |
612 | local_irq_restore(flags); |
613 | |
614 | return ret; |
615 | } |
616 | |
617 | /* Set the initial state and stop all online cpus. */ |
618 | set_state(msdata: &msdata, newstate: MULTI_STOP_PREPARE); |
619 | return stop_cpus(cpu_online_mask, fn: multi_cpu_stop, arg: &msdata); |
620 | } |
621 | |
622 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
623 | { |
624 | int ret; |
625 | |
626 | /* No CPUs can come up or down during this. */ |
627 | cpus_read_lock(); |
628 | ret = stop_machine_cpuslocked(fn, data, cpus); |
629 | cpus_read_unlock(); |
630 | return ret; |
631 | } |
632 | EXPORT_SYMBOL_GPL(stop_machine); |
633 | |
634 | #ifdef CONFIG_SCHED_SMT |
635 | int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data) |
636 | { |
637 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); |
638 | |
639 | struct multi_stop_data msdata = { |
640 | .fn = fn, |
641 | .data = data, |
642 | .num_threads = cpumask_weight(srcp: smt_mask), |
643 | .active_cpus = smt_mask, |
644 | }; |
645 | |
646 | lockdep_assert_cpus_held(); |
647 | |
648 | /* Set the initial state and stop all online cpus. */ |
649 | set_state(msdata: &msdata, newstate: MULTI_STOP_PREPARE); |
650 | return stop_cpus(cpumask: smt_mask, fn: multi_cpu_stop, arg: &msdata); |
651 | } |
652 | EXPORT_SYMBOL_GPL(stop_core_cpuslocked); |
653 | #endif |
654 | |
655 | /** |
656 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU |
657 | * @fn: the function to run |
658 | * @data: the data ptr for the @fn() |
659 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
660 | * |
661 | * This is identical to stop_machine() but can be called from a CPU which |
662 | * is not active. The local CPU is in the process of hotplug (so no other |
663 | * CPU hotplug can start) and not marked active and doesn't have enough |
664 | * context to sleep. |
665 | * |
666 | * This function provides stop_machine() functionality for such state by |
667 | * using busy-wait for synchronization and executing @fn directly for local |
668 | * CPU. |
669 | * |
670 | * CONTEXT: |
671 | * Local CPU is inactive. Temporarily stops all active CPUs. |
672 | * |
673 | * RETURNS: |
674 | * 0 if all executions of @fn returned 0, any non zero return value if any |
675 | * returned non zero. |
676 | */ |
677 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
678 | const struct cpumask *cpus) |
679 | { |
680 | struct multi_stop_data msdata = { .fn = fn, .data = data, |
681 | .active_cpus = cpus }; |
682 | struct cpu_stop_done done; |
683 | int ret; |
684 | |
685 | /* Local CPU must be inactive and CPU hotplug in progress. */ |
686 | BUG_ON(cpu_active(raw_smp_processor_id())); |
687 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
688 | |
689 | /* No proper task established and can't sleep - busy wait for lock. */ |
690 | while (!mutex_trylock(lock: &stop_cpus_mutex)) |
691 | cpu_relax(); |
692 | |
693 | /* Schedule work on other CPUs and execute directly for local CPU */ |
694 | set_state(msdata: &msdata, newstate: MULTI_STOP_PREPARE); |
695 | cpu_stop_init_done(done: &done, num_active_cpus()); |
696 | queue_stop_cpus_work(cpu_active_mask, fn: multi_cpu_stop, arg: &msdata, |
697 | done: &done); |
698 | ret = multi_cpu_stop(data: &msdata); |
699 | |
700 | /* Busy wait for completion. */ |
701 | while (!completion_done(x: &done.completion)) |
702 | cpu_relax(); |
703 | |
704 | mutex_unlock(lock: &stop_cpus_mutex); |
705 | return ret ?: done.ret; |
706 | } |
707 | |