1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * kernel/stop_machine.c
4 *
5 * Copyright (C) 2008, 2005 IBM Corporation.
6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 * Copyright (C) 2010 SUSE Linux Products GmbH
8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 */
10 #include <linux/compiler.h>
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/nmi.h>
24 #include <linux/sched/wake_q.h>
25
26 /*
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
29 */
30 struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
34 };
35
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 struct cpu_stopper {
38 struct task_struct *thread;
39
40 raw_spinlock_t lock;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
43
44 struct cpu_stop_work stop_work; /* for stop_cpus */
45 };
46
47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48 static bool stop_machine_initialized = false;
49
50 /* static data for stop_cpus */
51 static DEFINE_MUTEX(stop_cpus_mutex);
52 static bool stop_cpus_in_progress;
53
cpu_stop_init_done(struct cpu_stop_done * done,unsigned int nr_todo)54 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
55 {
56 memset(done, 0, sizeof(*done));
57 atomic_set(&done->nr_todo, nr_todo);
58 init_completion(&done->completion);
59 }
60
61 /* signal completion unless @done is NULL */
cpu_stop_signal_done(struct cpu_stop_done * done)62 static void cpu_stop_signal_done(struct cpu_stop_done *done)
63 {
64 if (atomic_dec_and_test(&done->nr_todo))
65 complete(&done->completion);
66 }
67
__cpu_stop_queue_work(struct cpu_stopper * stopper,struct cpu_stop_work * work,struct wake_q_head * wakeq)68 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
69 struct cpu_stop_work *work,
70 struct wake_q_head *wakeq)
71 {
72 list_add_tail(&work->list, &stopper->works);
73 wake_q_add(wakeq, stopper->thread);
74 }
75
76 /* queue @work to @stopper. if offline, @work is completed immediately */
cpu_stop_queue_work(unsigned int cpu,struct cpu_stop_work * work)77 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
78 {
79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80 DEFINE_WAKE_Q(wakeq);
81 unsigned long flags;
82 bool enabled;
83
84 preempt_disable();
85 raw_spin_lock_irqsave(&stopper->lock, flags);
86 enabled = stopper->enabled;
87 if (enabled)
88 __cpu_stop_queue_work(stopper, work, &wakeq);
89 else if (work->done)
90 cpu_stop_signal_done(work->done);
91 raw_spin_unlock_irqrestore(&stopper->lock, flags);
92
93 wake_up_q(&wakeq);
94 preempt_enable();
95
96 return enabled;
97 }
98
99 /**
100 * stop_one_cpu - stop a cpu
101 * @cpu: cpu to stop
102 * @fn: function to execute
103 * @arg: argument to @fn
104 *
105 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
106 * the highest priority preempting any task on the cpu and
107 * monopolizing it. This function returns after the execution is
108 * complete.
109 *
110 * This function doesn't guarantee @cpu stays online till @fn
111 * completes. If @cpu goes down in the middle, execution may happen
112 * partially or fully on different cpus. @fn should either be ready
113 * for that or the caller should ensure that @cpu stays online until
114 * this function completes.
115 *
116 * CONTEXT:
117 * Might sleep.
118 *
119 * RETURNS:
120 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
121 * otherwise, the return value of @fn.
122 */
stop_one_cpu(unsigned int cpu,cpu_stop_fn_t fn,void * arg)123 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
124 {
125 struct cpu_stop_done done;
126 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
127
128 cpu_stop_init_done(&done, 1);
129 if (!cpu_stop_queue_work(cpu, &work))
130 return -ENOENT;
131 /*
132 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
133 * cycle by doing a preemption:
134 */
135 cond_resched();
136 wait_for_completion(&done.completion);
137 return done.ret;
138 }
139
140 /* This controls the threads on each CPU. */
141 enum multi_stop_state {
142 /* Dummy starting state for thread. */
143 MULTI_STOP_NONE,
144 /* Awaiting everyone to be scheduled. */
145 MULTI_STOP_PREPARE,
146 /* Disable interrupts. */
147 MULTI_STOP_DISABLE_IRQ,
148 /* Run the function */
149 MULTI_STOP_RUN,
150 /* Exit */
151 MULTI_STOP_EXIT,
152 };
153
154 struct multi_stop_data {
155 cpu_stop_fn_t fn;
156 void *data;
157 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
158 unsigned int num_threads;
159 const struct cpumask *active_cpus;
160
161 enum multi_stop_state state;
162 atomic_t thread_ack;
163 };
164
set_state(struct multi_stop_data * msdata,enum multi_stop_state newstate)165 static void set_state(struct multi_stop_data *msdata,
166 enum multi_stop_state newstate)
167 {
168 /* Reset ack counter. */
169 atomic_set(&msdata->thread_ack, msdata->num_threads);
170 smp_wmb();
171 WRITE_ONCE(msdata->state, newstate);
172 }
173
174 /* Last one to ack a state moves to the next state. */
ack_state(struct multi_stop_data * msdata)175 static void ack_state(struct multi_stop_data *msdata)
176 {
177 if (atomic_dec_and_test(&msdata->thread_ack))
178 set_state(msdata, msdata->state + 1);
179 }
180
stop_machine_yield(const struct cpumask * cpumask)181 notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
182 {
183 cpu_relax();
184 }
185
186 /* This is the cpu_stop function which stops the CPU. */
multi_cpu_stop(void * data)187 static int multi_cpu_stop(void *data)
188 {
189 struct multi_stop_data *msdata = data;
190 enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
191 int cpu = smp_processor_id(), err = 0;
192 const struct cpumask *cpumask;
193 unsigned long flags;
194 bool is_active;
195
196 /*
197 * When called from stop_machine_from_inactive_cpu(), irq might
198 * already be disabled. Save the state and restore it on exit.
199 */
200 local_save_flags(flags);
201
202 if (!msdata->active_cpus) {
203 cpumask = cpu_online_mask;
204 is_active = cpu == cpumask_first(cpumask);
205 } else {
206 cpumask = msdata->active_cpus;
207 is_active = cpumask_test_cpu(cpu, cpumask);
208 }
209
210 /* Simple state machine */
211 do {
212 /* Chill out and ensure we re-read multi_stop_state. */
213 stop_machine_yield(cpumask);
214 newstate = READ_ONCE(msdata->state);
215 if (newstate != curstate) {
216 curstate = newstate;
217 switch (curstate) {
218 case MULTI_STOP_DISABLE_IRQ:
219 local_irq_disable();
220 hard_irq_disable();
221 break;
222 case MULTI_STOP_RUN:
223 if (is_active)
224 err = msdata->fn(msdata->data);
225 break;
226 default:
227 break;
228 }
229 ack_state(msdata);
230 } else if (curstate > MULTI_STOP_PREPARE) {
231 /*
232 * At this stage all other CPUs we depend on must spin
233 * in the same loop. Any reason for hard-lockup should
234 * be detected and reported on their side.
235 */
236 touch_nmi_watchdog();
237 }
238 rcu_momentary_dyntick_idle();
239 } while (curstate != MULTI_STOP_EXIT);
240
241 local_irq_restore(flags);
242 return err;
243 }
244
cpu_stop_queue_two_works(int cpu1,struct cpu_stop_work * work1,int cpu2,struct cpu_stop_work * work2)245 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
246 int cpu2, struct cpu_stop_work *work2)
247 {
248 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
249 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
250 DEFINE_WAKE_Q(wakeq);
251 int err;
252
253 retry:
254 /*
255 * The waking up of stopper threads has to happen in the same
256 * scheduling context as the queueing. Otherwise, there is a
257 * possibility of one of the above stoppers being woken up by another
258 * CPU, and preempting us. This will cause us to not wake up the other
259 * stopper forever.
260 */
261 preempt_disable();
262 raw_spin_lock_irq(&stopper1->lock);
263 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
264
265 if (!stopper1->enabled || !stopper2->enabled) {
266 err = -ENOENT;
267 goto unlock;
268 }
269
270 /*
271 * Ensure that if we race with __stop_cpus() the stoppers won't get
272 * queued up in reverse order leading to system deadlock.
273 *
274 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
275 * queued a work on cpu1 but not on cpu2, we hold both locks.
276 *
277 * It can be falsely true but it is safe to spin until it is cleared,
278 * queue_stop_cpus_work() does everything under preempt_disable().
279 */
280 if (unlikely(stop_cpus_in_progress)) {
281 err = -EDEADLK;
282 goto unlock;
283 }
284
285 err = 0;
286 __cpu_stop_queue_work(stopper1, work1, &wakeq);
287 __cpu_stop_queue_work(stopper2, work2, &wakeq);
288
289 unlock:
290 raw_spin_unlock(&stopper2->lock);
291 raw_spin_unlock_irq(&stopper1->lock);
292
293 if (unlikely(err == -EDEADLK)) {
294 preempt_enable();
295
296 while (stop_cpus_in_progress)
297 cpu_relax();
298
299 goto retry;
300 }
301
302 wake_up_q(&wakeq);
303 preempt_enable();
304
305 return err;
306 }
307 /**
308 * stop_two_cpus - stops two cpus
309 * @cpu1: the cpu to stop
310 * @cpu2: the other cpu to stop
311 * @fn: function to execute
312 * @arg: argument to @fn
313 *
314 * Stops both the current and specified CPU and runs @fn on one of them.
315 *
316 * returns when both are completed.
317 */
stop_two_cpus(unsigned int cpu1,unsigned int cpu2,cpu_stop_fn_t fn,void * arg)318 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
319 {
320 struct cpu_stop_done done;
321 struct cpu_stop_work work1, work2;
322 struct multi_stop_data msdata;
323
324 msdata = (struct multi_stop_data){
325 .fn = fn,
326 .data = arg,
327 .num_threads = 2,
328 .active_cpus = cpumask_of(cpu1),
329 };
330
331 work1 = work2 = (struct cpu_stop_work){
332 .fn = multi_cpu_stop,
333 .arg = &msdata,
334 .done = &done
335 };
336
337 cpu_stop_init_done(&done, 2);
338 set_state(&msdata, MULTI_STOP_PREPARE);
339
340 if (cpu1 > cpu2)
341 swap(cpu1, cpu2);
342 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
343 return -ENOENT;
344
345 wait_for_completion(&done.completion);
346 return done.ret;
347 }
348
349 /**
350 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
351 * @cpu: cpu to stop
352 * @fn: function to execute
353 * @arg: argument to @fn
354 * @work_buf: pointer to cpu_stop_work structure
355 *
356 * Similar to stop_one_cpu() but doesn't wait for completion. The
357 * caller is responsible for ensuring @work_buf is currently unused
358 * and will remain untouched until stopper starts executing @fn.
359 *
360 * CONTEXT:
361 * Don't care.
362 *
363 * RETURNS:
364 * true if cpu_stop_work was queued successfully and @fn will be called,
365 * false otherwise.
366 */
stop_one_cpu_nowait(unsigned int cpu,cpu_stop_fn_t fn,void * arg,struct cpu_stop_work * work_buf)367 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
368 struct cpu_stop_work *work_buf)
369 {
370 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
371 return cpu_stop_queue_work(cpu, work_buf);
372 }
373
queue_stop_cpus_work(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg,struct cpu_stop_done * done)374 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
375 cpu_stop_fn_t fn, void *arg,
376 struct cpu_stop_done *done)
377 {
378 struct cpu_stop_work *work;
379 unsigned int cpu;
380 bool queued = false;
381
382 /*
383 * Disable preemption while queueing to avoid getting
384 * preempted by a stopper which might wait for other stoppers
385 * to enter @fn which can lead to deadlock.
386 */
387 preempt_disable();
388 stop_cpus_in_progress = true;
389 barrier();
390 for_each_cpu(cpu, cpumask) {
391 work = &per_cpu(cpu_stopper.stop_work, cpu);
392 work->fn = fn;
393 work->arg = arg;
394 work->done = done;
395 if (cpu_stop_queue_work(cpu, work))
396 queued = true;
397 }
398 barrier();
399 stop_cpus_in_progress = false;
400 preempt_enable();
401
402 return queued;
403 }
404
__stop_cpus(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg)405 static int __stop_cpus(const struct cpumask *cpumask,
406 cpu_stop_fn_t fn, void *arg)
407 {
408 struct cpu_stop_done done;
409
410 cpu_stop_init_done(&done, cpumask_weight(cpumask));
411 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
412 return -ENOENT;
413 wait_for_completion(&done.completion);
414 return done.ret;
415 }
416
417 /**
418 * stop_cpus - stop multiple cpus
419 * @cpumask: cpus to stop
420 * @fn: function to execute
421 * @arg: argument to @fn
422 *
423 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
424 * @fn is run in a process context with the highest priority
425 * preempting any task on the cpu and monopolizing it. This function
426 * returns after all executions are complete.
427 *
428 * This function doesn't guarantee the cpus in @cpumask stay online
429 * till @fn completes. If some cpus go down in the middle, execution
430 * on the cpu may happen partially or fully on different cpus. @fn
431 * should either be ready for that or the caller should ensure that
432 * the cpus stay online until this function completes.
433 *
434 * All stop_cpus() calls are serialized making it safe for @fn to wait
435 * for all cpus to start executing it.
436 *
437 * CONTEXT:
438 * Might sleep.
439 *
440 * RETURNS:
441 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
442 * @cpumask were offline; otherwise, 0 if all executions of @fn
443 * returned 0, any non zero return value if any returned non zero.
444 */
445 #ifdef CONFIG_CPU_ISOLATION_OPT
stop_cpus(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg)446 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
447 #else
448 static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
449 #endif
450 {
451 int ret;
452
453 /* static works are used, process one request at a time */
454 mutex_lock(&stop_cpus_mutex);
455 ret = __stop_cpus(cpumask, fn, arg);
456 mutex_unlock(&stop_cpus_mutex);
457 return ret;
458 }
459
cpu_stop_should_run(unsigned int cpu)460 static int cpu_stop_should_run(unsigned int cpu)
461 {
462 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
463 unsigned long flags;
464 int run;
465
466 raw_spin_lock_irqsave(&stopper->lock, flags);
467 run = !list_empty(&stopper->works);
468 raw_spin_unlock_irqrestore(&stopper->lock, flags);
469 return run;
470 }
471
cpu_stopper_thread(unsigned int cpu)472 static void cpu_stopper_thread(unsigned int cpu)
473 {
474 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
475 struct cpu_stop_work *work;
476
477 repeat:
478 work = NULL;
479 raw_spin_lock_irq(&stopper->lock);
480 if (!list_empty(&stopper->works)) {
481 work = list_first_entry(&stopper->works,
482 struct cpu_stop_work, list);
483 list_del_init(&work->list);
484 }
485 raw_spin_unlock_irq(&stopper->lock);
486
487 if (work) {
488 cpu_stop_fn_t fn = work->fn;
489 void *arg = work->arg;
490 struct cpu_stop_done *done = work->done;
491 int ret;
492
493 /* cpu stop callbacks must not sleep, make in_atomic() == T */
494 preempt_count_inc();
495 ret = fn(arg);
496 if (done) {
497 if (ret)
498 done->ret = ret;
499 cpu_stop_signal_done(done);
500 }
501 preempt_count_dec();
502 WARN_ONCE(preempt_count(),
503 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
504 goto repeat;
505 }
506 }
507
stop_machine_park(int cpu)508 void stop_machine_park(int cpu)
509 {
510 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
511 /*
512 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
513 * the pending works before it parks, until then it is fine to queue
514 * the new works.
515 */
516 stopper->enabled = false;
517 kthread_park(stopper->thread);
518 }
519
520 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
521
cpu_stop_create(unsigned int cpu)522 static void cpu_stop_create(unsigned int cpu)
523 {
524 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
525 }
526
cpu_stop_park(unsigned int cpu)527 static void cpu_stop_park(unsigned int cpu)
528 {
529 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
530
531 WARN_ON(!list_empty(&stopper->works));
532 }
533
stop_machine_unpark(int cpu)534 void stop_machine_unpark(int cpu)
535 {
536 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
537
538 stopper->enabled = true;
539 kthread_unpark(stopper->thread);
540 }
541
542 static struct smp_hotplug_thread cpu_stop_threads = {
543 .store = &cpu_stopper.thread,
544 .thread_should_run = cpu_stop_should_run,
545 .thread_fn = cpu_stopper_thread,
546 .thread_comm = "migration/%u",
547 .create = cpu_stop_create,
548 .park = cpu_stop_park,
549 .selfparking = true,
550 };
551
cpu_stop_init(void)552 static int __init cpu_stop_init(void)
553 {
554 unsigned int cpu;
555
556 for_each_possible_cpu(cpu) {
557 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
558
559 raw_spin_lock_init(&stopper->lock);
560 INIT_LIST_HEAD(&stopper->works);
561 }
562
563 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
564 stop_machine_unpark(raw_smp_processor_id());
565 stop_machine_initialized = true;
566 return 0;
567 }
568 early_initcall(cpu_stop_init);
569
stop_machine_cpuslocked(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)570 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
571 const struct cpumask *cpus)
572 {
573 struct multi_stop_data msdata = {
574 .fn = fn,
575 .data = data,
576 .num_threads = num_online_cpus(),
577 .active_cpus = cpus,
578 };
579
580 lockdep_assert_cpus_held();
581
582 if (!stop_machine_initialized) {
583 /*
584 * Handle the case where stop_machine() is called
585 * early in boot before stop_machine() has been
586 * initialized.
587 */
588 unsigned long flags;
589 int ret;
590
591 WARN_ON_ONCE(msdata.num_threads != 1);
592
593 local_irq_save(flags);
594 hard_irq_disable();
595 ret = (*fn)(data);
596 local_irq_restore(flags);
597
598 return ret;
599 }
600
601 /* Set the initial state and stop all online cpus. */
602 set_state(&msdata, MULTI_STOP_PREPARE);
603 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
604 }
605
stop_machine(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)606 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
607 {
608 int ret;
609
610 /* No CPUs can come up or down during this. */
611 cpus_read_lock();
612 ret = stop_machine_cpuslocked(fn, data, cpus);
613 cpus_read_unlock();
614 return ret;
615 }
616 EXPORT_SYMBOL_GPL(stop_machine);
617
618 /**
619 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
620 * @fn: the function to run
621 * @data: the data ptr for the @fn()
622 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
623 *
624 * This is identical to stop_machine() but can be called from a CPU which
625 * is not active. The local CPU is in the process of hotplug (so no other
626 * CPU hotplug can start) and not marked active and doesn't have enough
627 * context to sleep.
628 *
629 * This function provides stop_machine() functionality for such state by
630 * using busy-wait for synchronization and executing @fn directly for local
631 * CPU.
632 *
633 * CONTEXT:
634 * Local CPU is inactive. Temporarily stops all active CPUs.
635 *
636 * RETURNS:
637 * 0 if all executions of @fn returned 0, any non zero return value if any
638 * returned non zero.
639 */
stop_machine_from_inactive_cpu(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)640 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
641 const struct cpumask *cpus)
642 {
643 struct multi_stop_data msdata = { .fn = fn, .data = data,
644 .active_cpus = cpus };
645 struct cpu_stop_done done;
646 int ret;
647
648 /* Local CPU must be inactive and CPU hotplug in progress. */
649 BUG_ON(cpu_active(raw_smp_processor_id()));
650 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
651
652 /* No proper task established and can't sleep - busy wait for lock. */
653 while (!mutex_trylock(&stop_cpus_mutex))
654 cpu_relax();
655
656 /* Schedule work on other CPUs and execute directly for local CPU */
657 set_state(&msdata, MULTI_STOP_PREPARE);
658 cpu_stop_init_done(&done, num_active_cpus());
659 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
660 &done);
661 ret = multi_cpu_stop(&msdata);
662
663 /* Busy wait for completion. */
664 while (!completion_done(&done.completion))
665 cpu_relax();
666
667 mutex_unlock(&stop_cpus_mutex);
668 return ret ?: done.ret;
669 }
670