1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/suspend.h>
28
29 #include "smpboot.h"
30 #include "sched/smp.h"
31
32 #define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
33
34 struct call_function_data {
35 call_single_data_t __percpu *csd;
36 cpumask_var_t cpumask;
37 cpumask_var_t cpumask_ipi;
38 };
39
40 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
41
42 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
43
44 static void flush_smp_call_function_queue(bool warn_cpu_offline);
45
smpcfd_prepare_cpu(unsigned int cpu)46 int smpcfd_prepare_cpu(unsigned int cpu)
47 {
48 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
49
50 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
51 cpu_to_node(cpu)))
52 return -ENOMEM;
53 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
54 cpu_to_node(cpu))) {
55 free_cpumask_var(cfd->cpumask);
56 return -ENOMEM;
57 }
58 cfd->csd = alloc_percpu(call_single_data_t);
59 if (!cfd->csd) {
60 free_cpumask_var(cfd->cpumask);
61 free_cpumask_var(cfd->cpumask_ipi);
62 return -ENOMEM;
63 }
64
65 return 0;
66 }
67
smpcfd_dead_cpu(unsigned int cpu)68 int smpcfd_dead_cpu(unsigned int cpu)
69 {
70 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
71
72 free_cpumask_var(cfd->cpumask);
73 free_cpumask_var(cfd->cpumask_ipi);
74 free_percpu(cfd->csd);
75 return 0;
76 }
77
smpcfd_dying_cpu(unsigned int cpu)78 int smpcfd_dying_cpu(unsigned int cpu)
79 {
80 /*
81 * The IPIs for the smp-call-function callbacks queued by other
82 * CPUs might arrive late, either due to hardware latencies or
83 * because this CPU disabled interrupts (inside stop-machine)
84 * before the IPIs were sent. So flush out any pending callbacks
85 * explicitly (without waiting for the IPIs to arrive), to
86 * ensure that the outgoing CPU doesn't go offline with work
87 * still pending.
88 */
89 flush_smp_call_function_queue(false);
90 irq_work_run();
91 return 0;
92 }
93
call_function_init(void)94 void __init call_function_init(void)
95 {
96 int i;
97
98 for_each_possible_cpu(i)
99 init_llist_head(&per_cpu(call_single_queue, i));
100
101 smpcfd_prepare_cpu(smp_processor_id());
102 }
103
104 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
105
106 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
107 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
108 static DEFINE_PER_CPU(void *, cur_csd_info);
109
110 #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
111 static atomic_t csd_bug_count = ATOMIC_INIT(0);
112
113 /* Record current CSD work for current CPU, NULL to erase. */
csd_lock_record(struct __call_single_data * csd)114 static void csd_lock_record(struct __call_single_data *csd)
115 {
116 if (!csd) {
117 smp_mb(); /* NULL cur_csd after unlock. */
118 __this_cpu_write(cur_csd, NULL);
119 return;
120 }
121 __this_cpu_write(cur_csd_func, csd->func);
122 __this_cpu_write(cur_csd_info, csd->info);
123 smp_wmb(); /* func and info before csd. */
124 __this_cpu_write(cur_csd, csd);
125 smp_mb(); /* Update cur_csd before function call. */
126 /* Or before unlock, as the case may be. */
127 }
128
csd_lock_wait_getcpu(struct __call_single_data * csd)129 static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
130 {
131 unsigned int csd_type;
132
133 csd_type = CSD_TYPE(csd);
134 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
135 return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */
136 return -1;
137 }
138
139 /*
140 * Complain if too much time spent waiting. Note that only
141 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
142 * so waiting on other types gets much less information.
143 */
csd_lock_wait_toolong(struct __call_single_data * csd,u64 ts0,u64 * ts1,int * bug_id)144 static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
145 {
146 int cpu = -1;
147 int cpux;
148 bool firsttime;
149 u64 ts2, ts_delta;
150 call_single_data_t *cpu_cur_csd;
151 unsigned int flags = READ_ONCE(csd->flags);
152
153 if (!(flags & CSD_FLAG_LOCK)) {
154 if (!unlikely(*bug_id))
155 return true;
156 cpu = csd_lock_wait_getcpu(csd);
157 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
158 *bug_id, raw_smp_processor_id(), cpu);
159 return true;
160 }
161
162 ts2 = sched_clock();
163 ts_delta = ts2 - *ts1;
164 if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
165 return false;
166
167 firsttime = !*bug_id;
168 if (firsttime)
169 *bug_id = atomic_inc_return(&csd_bug_count);
170 cpu = csd_lock_wait_getcpu(csd);
171 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
172 cpux = 0;
173 else
174 cpux = cpu;
175 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
176 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
177 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
178 cpu, csd->func, csd->info);
179 if (cpu_cur_csd && csd != cpu_cur_csd) {
180 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
181 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
182 READ_ONCE(per_cpu(cur_csd_info, cpux)));
183 } else {
184 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
185 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
186 }
187 if (cpu >= 0) {
188 if (!trigger_single_cpu_backtrace(cpu))
189 dump_cpu_task(cpu);
190 if (!cpu_cur_csd) {
191 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
192 arch_send_call_function_single_ipi(cpu);
193 }
194 }
195 dump_stack();
196 *ts1 = ts2;
197
198 return false;
199 }
200
201 /*
202 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
203 *
204 * For non-synchronous ipi calls the csd can still be in use by the
205 * previous function call. For multi-cpu calls its even more interesting
206 * as we'll have to ensure no other cpu is observing our csd.
207 */
csd_lock_wait(struct __call_single_data * csd)208 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
209 {
210 int bug_id = 0;
211 u64 ts0, ts1;
212
213 ts1 = ts0 = sched_clock();
214 for (;;) {
215 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
216 break;
217 cpu_relax();
218 }
219 smp_acquire__after_ctrl_dep();
220 }
221
222 #else
csd_lock_record(struct __call_single_data * csd)223 static void csd_lock_record(struct __call_single_data *csd)
224 {
225 }
226
csd_lock_wait(struct __call_single_data * csd)227 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
228 {
229 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
230 }
231 #endif
232
csd_lock(struct __call_single_data * csd)233 static __always_inline void csd_lock(struct __call_single_data *csd)
234 {
235 csd_lock_wait(csd);
236 csd->flags |= CSD_FLAG_LOCK;
237
238 /*
239 * prevent CPU from reordering the above assignment
240 * to ->flags with any subsequent assignments to other
241 * fields of the specified call_single_data_t structure:
242 */
243 smp_wmb();
244 }
245
csd_unlock(struct __call_single_data * csd)246 static __always_inline void csd_unlock(struct __call_single_data *csd)
247 {
248 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
249
250 /*
251 * ensure we're all done before releasing data:
252 */
253 smp_store_release(&csd->flags, 0);
254 }
255
256 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
257
__smp_call_single_queue(int cpu,struct llist_node * node)258 void __smp_call_single_queue(int cpu, struct llist_node *node)
259 {
260 /*
261 * The list addition should be visible before sending the IPI
262 * handler locks the list to pull the entry off it because of
263 * normal cache coherency rules implied by spinlocks.
264 *
265 * If IPIs can go out of order to the cache coherency protocol
266 * in an architecture, sufficient synchronisation should be added
267 * to arch code to make it appear to obey cache coherency WRT
268 * locking and barrier primitives. Generic code isn't really
269 * equipped to do the right thing...
270 */
271 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
272 send_call_function_single_ipi(cpu);
273 }
274
275 /*
276 * Insert a previously allocated call_single_data_t element
277 * for execution on the given CPU. data must already have
278 * ->func, ->info, and ->flags set.
279 */
generic_exec_single(int cpu,struct __call_single_data * csd)280 static int generic_exec_single(int cpu, struct __call_single_data *csd)
281 {
282 if (cpu == smp_processor_id()) {
283 smp_call_func_t func = csd->func;
284 void *info = csd->info;
285 unsigned long flags;
286
287 /*
288 * We can unlock early even for the synchronous on-stack case,
289 * since we're doing this from the same CPU..
290 */
291 csd_lock_record(csd);
292 csd_unlock(csd);
293 local_irq_save(flags);
294 func(info);
295 csd_lock_record(NULL);
296 local_irq_restore(flags);
297 return 0;
298 }
299
300 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
301 csd_unlock(csd);
302 return -ENXIO;
303 }
304
305 __smp_call_single_queue(cpu, &csd->llist);
306
307 return 0;
308 }
309
310 /**
311 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
312 *
313 * Invoked by arch to handle an IPI for call function single.
314 * Must be called with interrupts disabled.
315 */
generic_smp_call_function_single_interrupt(void)316 void generic_smp_call_function_single_interrupt(void)
317 {
318 flush_smp_call_function_queue(true);
319 }
320
321 /**
322 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
323 *
324 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
325 * offline CPU. Skip this check if set to 'false'.
326 *
327 * Flush any pending smp-call-function callbacks queued on this CPU. This is
328 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
329 * to ensure that all pending IPI callbacks are run before it goes completely
330 * offline.
331 *
332 * Loop through the call_single_queue and run all the queued callbacks.
333 * Must be called with interrupts disabled.
334 */
flush_smp_call_function_queue(bool warn_cpu_offline)335 static void flush_smp_call_function_queue(bool warn_cpu_offline)
336 {
337 call_single_data_t *csd, *csd_next;
338 struct llist_node *entry, *prev;
339 struct llist_head *head;
340 static bool warned;
341
342 lockdep_assert_irqs_disabled();
343
344 head = this_cpu_ptr(&call_single_queue);
345 entry = llist_del_all(head);
346 entry = llist_reverse_order(entry);
347
348 /* There shouldn't be any pending callbacks on an offline CPU. */
349 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
350 !warned && entry != NULL)) {
351 warned = true;
352 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
353
354 /*
355 * We don't have to use the _safe() variant here
356 * because we are not invoking the IPI handlers yet.
357 */
358 llist_for_each_entry(csd, entry, llist) {
359 switch (CSD_TYPE(csd)) {
360 case CSD_TYPE_ASYNC:
361 case CSD_TYPE_SYNC:
362 case CSD_TYPE_IRQ_WORK:
363 pr_warn("IPI callback %pS sent to offline CPU\n",
364 csd->func);
365 break;
366
367 case CSD_TYPE_TTWU:
368 pr_warn("IPI task-wakeup sent to offline CPU\n");
369 break;
370
371 default:
372 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
373 CSD_TYPE(csd));
374 break;
375 }
376 }
377 }
378
379 /*
380 * First; run all SYNC callbacks, people are waiting for us.
381 */
382 prev = NULL;
383 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
384 /* Do we wait until *after* callback? */
385 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
386 smp_call_func_t func = csd->func;
387 void *info = csd->info;
388
389 if (prev) {
390 prev->next = &csd_next->llist;
391 } else {
392 entry = &csd_next->llist;
393 }
394
395 csd_lock_record(csd);
396 func(info);
397 csd_unlock(csd);
398 csd_lock_record(NULL);
399 } else {
400 prev = &csd->llist;
401 }
402 }
403
404 if (!entry)
405 return;
406
407 /*
408 * Second; run all !SYNC callbacks.
409 */
410 prev = NULL;
411 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
412 int type = CSD_TYPE(csd);
413
414 if (type != CSD_TYPE_TTWU) {
415 if (prev) {
416 prev->next = &csd_next->llist;
417 } else {
418 entry = &csd_next->llist;
419 }
420
421 if (type == CSD_TYPE_ASYNC) {
422 smp_call_func_t func = csd->func;
423 void *info = csd->info;
424
425 csd_lock_record(csd);
426 csd_unlock(csd);
427 func(info);
428 csd_lock_record(NULL);
429 } else if (type == CSD_TYPE_IRQ_WORK) {
430 irq_work_single(csd);
431 }
432
433 } else {
434 prev = &csd->llist;
435 }
436 }
437
438 /*
439 * Third; only CSD_TYPE_TTWU is left, issue those.
440 */
441 if (entry)
442 sched_ttwu_pending(entry);
443 }
444
flush_smp_call_function_from_idle(void)445 void flush_smp_call_function_from_idle(void)
446 {
447 unsigned long flags;
448
449 if (llist_empty(this_cpu_ptr(&call_single_queue)))
450 return;
451
452 local_irq_save(flags);
453 flush_smp_call_function_queue(true);
454 if (local_softirq_pending())
455 do_softirq();
456
457 local_irq_restore(flags);
458 }
459
460 /*
461 * smp_call_function_single - Run a function on a specific CPU
462 * @func: The function to run. This must be fast and non-blocking.
463 * @info: An arbitrary pointer to pass to the function.
464 * @wait: If true, wait until function has completed on other CPUs.
465 *
466 * Returns 0 on success, else a negative status code.
467 */
smp_call_function_single(int cpu,smp_call_func_t func,void * info,int wait)468 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
469 int wait)
470 {
471 call_single_data_t *csd;
472 call_single_data_t csd_stack = {
473 .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
474 };
475 int this_cpu;
476 int err;
477
478 /*
479 * prevent preemption and reschedule on another processor,
480 * as well as CPU removal
481 */
482 this_cpu = get_cpu();
483
484 /*
485 * Can deadlock when called with interrupts disabled.
486 * We allow cpu's that are not yet online though, as no one else can
487 * send smp call function interrupt to this cpu and as such deadlocks
488 * can't happen.
489 */
490 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
491 && !oops_in_progress);
492
493 /*
494 * When @wait we can deadlock when we interrupt between llist_add() and
495 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
496 * csd_lock() on because the interrupt context uses the same csd
497 * storage.
498 */
499 WARN_ON_ONCE(!in_task());
500
501 csd = &csd_stack;
502 if (!wait) {
503 csd = this_cpu_ptr(&csd_data);
504 csd_lock(csd);
505 }
506
507 csd->func = func;
508 csd->info = info;
509 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
510 csd->src = smp_processor_id();
511 csd->dst = cpu;
512 #endif
513
514 err = generic_exec_single(cpu, csd);
515
516 if (wait)
517 csd_lock_wait(csd);
518
519 put_cpu();
520
521 return err;
522 }
523 EXPORT_SYMBOL(smp_call_function_single);
524
525 /**
526 * smp_call_function_single_async(): Run an asynchronous function on a
527 * specific CPU.
528 * @cpu: The CPU to run on.
529 * @csd: Pre-allocated and setup data structure
530 *
531 * Like smp_call_function_single(), but the call is asynchonous and
532 * can thus be done from contexts with disabled interrupts.
533 *
534 * The caller passes his own pre-allocated data structure
535 * (ie: embedded in an object) and is responsible for synchronizing it
536 * such that the IPIs performed on the @csd are strictly serialized.
537 *
538 * If the function is called with one csd which has not yet been
539 * processed by previous call to smp_call_function_single_async(), the
540 * function will return immediately with -EBUSY showing that the csd
541 * object is still in progress.
542 *
543 * NOTE: Be careful, there is unfortunately no current debugging facility to
544 * validate the correctness of this serialization.
545 */
smp_call_function_single_async(int cpu,struct __call_single_data * csd)546 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
547 {
548 int err = 0;
549
550 preempt_disable();
551
552 if (csd->flags & CSD_FLAG_LOCK) {
553 err = -EBUSY;
554 goto out;
555 }
556
557 csd->flags = CSD_FLAG_LOCK;
558 smp_wmb();
559
560 err = generic_exec_single(cpu, csd);
561
562 out:
563 preempt_enable();
564
565 return err;
566 }
567 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
568
569 /*
570 * smp_call_function_any - Run a function on any of the given cpus
571 * @mask: The mask of cpus it can run on.
572 * @func: The function to run. This must be fast and non-blocking.
573 * @info: An arbitrary pointer to pass to the function.
574 * @wait: If true, wait until function has completed.
575 *
576 * Returns 0 on success, else a negative status code (if no cpus were online).
577 *
578 * Selection preference:
579 * 1) current cpu if in @mask
580 * 2) any cpu of current node if in @mask
581 * 3) any other online cpu in @mask
582 */
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)583 int smp_call_function_any(const struct cpumask *mask,
584 smp_call_func_t func, void *info, int wait)
585 {
586 unsigned int cpu;
587 const struct cpumask *nodemask;
588 int ret;
589
590 /* Try for same CPU (cheapest) */
591 cpu = get_cpu();
592 if (cpumask_test_cpu(cpu, mask))
593 goto call;
594
595 /* Try for same node. */
596 nodemask = cpumask_of_node(cpu_to_node(cpu));
597 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
598 cpu = cpumask_next_and(cpu, nodemask, mask)) {
599 if (cpu_online(cpu))
600 goto call;
601 }
602
603 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
604 cpu = cpumask_any_and(mask, cpu_online_mask);
605 call:
606 ret = smp_call_function_single(cpu, func, info, wait);
607 put_cpu();
608 return ret;
609 }
610 EXPORT_SYMBOL_GPL(smp_call_function_any);
611
smp_call_function_many_cond(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait,smp_cond_func_t cond_func)612 static void smp_call_function_many_cond(const struct cpumask *mask,
613 smp_call_func_t func, void *info,
614 bool wait, smp_cond_func_t cond_func)
615 {
616 struct call_function_data *cfd;
617 int cpu, next_cpu, this_cpu = smp_processor_id();
618
619 /*
620 * Can deadlock when called with interrupts disabled.
621 * We allow cpu's that are not yet online though, as no one else can
622 * send smp call function interrupt to this cpu and as such deadlocks
623 * can't happen.
624 */
625 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
626 && !oops_in_progress && !early_boot_irqs_disabled);
627
628 /*
629 * When @wait we can deadlock when we interrupt between llist_add() and
630 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
631 * csd_lock() on because the interrupt context uses the same csd
632 * storage.
633 */
634 WARN_ON_ONCE(!in_task());
635
636 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
637 cpu = cpumask_first_and(mask, cpu_online_mask);
638 if (cpu == this_cpu)
639 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
640
641 /* No online cpus? We're done. */
642 if (cpu >= nr_cpu_ids)
643 return;
644
645 /* Do we have another CPU which isn't us? */
646 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
647 if (next_cpu == this_cpu)
648 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
649
650 /* Fastpath: do that cpu by itself. */
651 if (next_cpu >= nr_cpu_ids) {
652 if (!cond_func || cond_func(cpu, info))
653 smp_call_function_single(cpu, func, info, wait);
654 return;
655 }
656
657 cfd = this_cpu_ptr(&cfd_data);
658
659 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
660 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
661
662 /* Some callers race with other cpus changing the passed mask */
663 if (unlikely(!cpumask_weight(cfd->cpumask)))
664 return;
665
666 cpumask_clear(cfd->cpumask_ipi);
667 for_each_cpu(cpu, cfd->cpumask) {
668 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
669
670 if (cond_func && !cond_func(cpu, info))
671 continue;
672
673 csd_lock(csd);
674 if (wait)
675 csd->flags |= CSD_TYPE_SYNC;
676 csd->func = func;
677 csd->info = info;
678 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
679 csd->src = smp_processor_id();
680 csd->dst = cpu;
681 #endif
682 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
683 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
684 }
685
686 /* Send a message to all CPUs in the map */
687 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
688
689 if (wait) {
690 for_each_cpu(cpu, cfd->cpumask) {
691 call_single_data_t *csd;
692
693 csd = per_cpu_ptr(cfd->csd, cpu);
694 csd_lock_wait(csd);
695 }
696 }
697 }
698
699 /**
700 * smp_call_function_many(): Run a function on a set of other CPUs.
701 * @mask: The set of cpus to run on (only runs on online subset).
702 * @func: The function to run. This must be fast and non-blocking.
703 * @info: An arbitrary pointer to pass to the function.
704 * @wait: If true, wait (atomically) until function has completed
705 * on other CPUs.
706 *
707 * If @wait is true, then returns once @func has returned.
708 *
709 * You must not call this function with disabled interrupts or from a
710 * hardware interrupt handler or from a bottom half handler. Preemption
711 * must be disabled when calling this function.
712 */
smp_call_function_many(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)713 void smp_call_function_many(const struct cpumask *mask,
714 smp_call_func_t func, void *info, bool wait)
715 {
716 smp_call_function_many_cond(mask, func, info, wait, NULL);
717 }
718 EXPORT_SYMBOL(smp_call_function_many);
719
720 /**
721 * smp_call_function(): Run a function on all other CPUs.
722 * @func: The function to run. This must be fast and non-blocking.
723 * @info: An arbitrary pointer to pass to the function.
724 * @wait: If true, wait (atomically) until function has completed
725 * on other CPUs.
726 *
727 * Returns 0.
728 *
729 * If @wait is true, then returns once @func has returned; otherwise
730 * it returns just before the target cpu calls @func.
731 *
732 * You must not call this function with disabled interrupts or from a
733 * hardware interrupt handler or from a bottom half handler.
734 */
smp_call_function(smp_call_func_t func,void * info,int wait)735 void smp_call_function(smp_call_func_t func, void *info, int wait)
736 {
737 preempt_disable();
738 smp_call_function_many(cpu_online_mask, func, info, wait);
739 preempt_enable();
740 }
741 EXPORT_SYMBOL(smp_call_function);
742
743 /* Setup configured maximum number of CPUs to activate */
744 unsigned int setup_max_cpus = NR_CPUS;
745 EXPORT_SYMBOL(setup_max_cpus);
746
747
748 /*
749 * Setup routine for controlling SMP activation
750 *
751 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
752 * activation entirely (the MPS table probe still happens, though).
753 *
754 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
755 * greater than 0, limits the maximum number of CPUs activated in
756 * SMP mode to <NUM>.
757 */
758
arch_disable_smp_support(void)759 void __weak arch_disable_smp_support(void) { }
760
nosmp(char * str)761 static int __init nosmp(char *str)
762 {
763 setup_max_cpus = 0;
764 arch_disable_smp_support();
765
766 return 0;
767 }
768
769 early_param("nosmp", nosmp);
770
771 /* this is hard limit */
nrcpus(char * str)772 static int __init nrcpus(char *str)
773 {
774 int nr_cpus;
775
776 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
777 nr_cpu_ids = nr_cpus;
778
779 return 0;
780 }
781
782 early_param("nr_cpus", nrcpus);
783
maxcpus(char * str)784 static int __init maxcpus(char *str)
785 {
786 get_option(&str, &setup_max_cpus);
787 if (setup_max_cpus == 0)
788 arch_disable_smp_support();
789
790 return 0;
791 }
792
793 early_param("maxcpus", maxcpus);
794
795 /* Setup number of possible processor ids */
796 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
797 EXPORT_SYMBOL(nr_cpu_ids);
798
799 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
setup_nr_cpu_ids(void)800 void __init setup_nr_cpu_ids(void)
801 {
802 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
803 }
804
805 /* Called by boot processor to activate the rest. */
smp_init(void)806 void __init smp_init(void)
807 {
808 int num_nodes, num_cpus;
809
810 idle_threads_init();
811 cpuhp_threads_init();
812
813 pr_info("Bringing up secondary CPUs ...\n");
814
815 bringup_nonboot_cpus(setup_max_cpus);
816
817 num_nodes = num_online_nodes();
818 num_cpus = num_online_cpus();
819 pr_info("Brought up %d node%s, %d CPU%s\n",
820 num_nodes, (num_nodes > 1 ? "s" : ""),
821 num_cpus, (num_cpus > 1 ? "s" : ""));
822
823 /* Any cleanup work */
824 smp_cpus_done(setup_max_cpus);
825 }
826
827 /*
828 * Call a function on all processors. May be used during early boot while
829 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
830 * of local_irq_disable/enable().
831 */
on_each_cpu(smp_call_func_t func,void * info,int wait)832 void on_each_cpu(smp_call_func_t func, void *info, int wait)
833 {
834 unsigned long flags;
835
836 preempt_disable();
837 smp_call_function(func, info, wait);
838 local_irq_save(flags);
839 func(info);
840 local_irq_restore(flags);
841 preempt_enable();
842 }
843 EXPORT_SYMBOL(on_each_cpu);
844
845 /**
846 * on_each_cpu_mask(): Run a function on processors specified by
847 * cpumask, which may include the local processor.
848 * @mask: The set of cpus to run on (only runs on online subset).
849 * @func: The function to run. This must be fast and non-blocking.
850 * @info: An arbitrary pointer to pass to the function.
851 * @wait: If true, wait (atomically) until function has completed
852 * on other CPUs.
853 *
854 * If @wait is true, then returns once @func has returned.
855 *
856 * You must not call this function with disabled interrupts or from a
857 * hardware interrupt handler or from a bottom half handler. The
858 * exception is that it may be used during early boot while
859 * early_boot_irqs_disabled is set.
860 */
on_each_cpu_mask(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)861 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
862 void *info, bool wait)
863 {
864 int cpu = get_cpu();
865
866 smp_call_function_many(mask, func, info, wait);
867 if (cpumask_test_cpu(cpu, mask)) {
868 unsigned long flags;
869 local_irq_save(flags);
870 func(info);
871 local_irq_restore(flags);
872 }
873 put_cpu();
874 }
875 EXPORT_SYMBOL(on_each_cpu_mask);
876
877 /*
878 * on_each_cpu_cond(): Call a function on each processor for which
879 * the supplied function cond_func returns true, optionally waiting
880 * for all the required CPUs to finish. This may include the local
881 * processor.
882 * @cond_func: A callback function that is passed a cpu id and
883 * the info parameter. The function is called
884 * with preemption disabled. The function should
885 * return a blooean value indicating whether to IPI
886 * the specified CPU.
887 * @func: The function to run on all applicable CPUs.
888 * This must be fast and non-blocking.
889 * @info: An arbitrary pointer to pass to both functions.
890 * @wait: If true, wait (atomically) until function has
891 * completed on other CPUs.
892 *
893 * Preemption is disabled to protect against CPUs going offline but not online.
894 * CPUs going online during the call will not be seen or sent an IPI.
895 *
896 * You must not call this function with disabled interrupts or
897 * from a hardware interrupt handler or from a bottom half handler.
898 */
on_each_cpu_cond_mask(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait,const struct cpumask * mask)899 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
900 void *info, bool wait, const struct cpumask *mask)
901 {
902 int cpu = get_cpu();
903
904 smp_call_function_many_cond(mask, func, info, wait, cond_func);
905 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
906 unsigned long flags;
907
908 local_irq_save(flags);
909 func(info);
910 local_irq_restore(flags);
911 }
912 put_cpu();
913 }
914 EXPORT_SYMBOL(on_each_cpu_cond_mask);
915
on_each_cpu_cond(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait)916 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
917 void *info, bool wait)
918 {
919 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
920 }
921 EXPORT_SYMBOL(on_each_cpu_cond);
922
do_nothing(void * unused)923 static void do_nothing(void *unused)
924 {
925 }
926
927 /**
928 * kick_all_cpus_sync - Force all cpus out of idle
929 *
930 * Used to synchronize the update of pm_idle function pointer. It's
931 * called after the pointer is updated and returns after the dummy
932 * callback function has been executed on all cpus. The execution of
933 * the function can only happen on the remote cpus after they have
934 * left the idle function which had been called via pm_idle function
935 * pointer. So it's guaranteed that nothing uses the previous pointer
936 * anymore.
937 */
kick_all_cpus_sync(void)938 void kick_all_cpus_sync(void)
939 {
940 /* Make sure the change is visible before we kick the cpus */
941 smp_mb();
942 smp_call_function(do_nothing, NULL, 1);
943 }
944 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
945
946 /**
947 * wake_up_all_idle_cpus - break all cpus out of idle
948 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
949 * including idle polling cpus, for non-idle cpus, we will do nothing
950 * for them.
951 */
wake_up_all_idle_cpus(void)952 void wake_up_all_idle_cpus(void)
953 {
954 int cpu;
955
956 preempt_disable();
957 for_each_online_cpu(cpu) {
958 if (cpu == smp_processor_id())
959 continue;
960
961 #if IS_ENABLED(CONFIG_SUSPEND)
962 if (s2idle_state == S2IDLE_STATE_ENTER || cpu_active(cpu))
963 #endif
964 wake_up_if_idle(cpu);
965 }
966 preempt_enable();
967 }
968 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
969
970 /**
971 * wake_up_all_online_idle_cpus - break all online cpus out of idle
972 * wake_up_all_online_idle_cpus try to break all online cpus which is in idle
973 * state even including idle polling cpus, for non-idle cpus, we will do nothing
974 * for them.
975 */
wake_up_all_online_idle_cpus(void)976 void wake_up_all_online_idle_cpus(void)
977 {
978 int cpu;
979
980 preempt_disable();
981 for_each_online_cpu(cpu) {
982 if (cpu == smp_processor_id())
983 continue;
984
985 wake_up_if_idle(cpu);
986 }
987 preempt_enable();
988 }
989 EXPORT_SYMBOL_GPL(wake_up_all_online_idle_cpus);
990
991 /**
992 * smp_call_on_cpu - Call a function on a specific cpu
993 *
994 * Used to call a function on a specific cpu and wait for it to return.
995 * Optionally make sure the call is done on a specified physical cpu via vcpu
996 * pinning in order to support virtualized environments.
997 */
998 struct smp_call_on_cpu_struct {
999 struct work_struct work;
1000 struct completion done;
1001 int (*func)(void *);
1002 void *data;
1003 int ret;
1004 int cpu;
1005 };
1006
smp_call_on_cpu_callback(struct work_struct * work)1007 static void smp_call_on_cpu_callback(struct work_struct *work)
1008 {
1009 struct smp_call_on_cpu_struct *sscs;
1010
1011 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1012 if (sscs->cpu >= 0)
1013 hypervisor_pin_vcpu(sscs->cpu);
1014 sscs->ret = sscs->func(sscs->data);
1015 if (sscs->cpu >= 0)
1016 hypervisor_pin_vcpu(-1);
1017
1018 complete(&sscs->done);
1019 }
1020
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)1021 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1022 {
1023 struct smp_call_on_cpu_struct sscs = {
1024 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1025 .func = func,
1026 .data = par,
1027 .cpu = phys ? cpu : -1,
1028 };
1029
1030 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1031
1032 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1033 return -ENXIO;
1034
1035 queue_work_on(cpu, system_wq, &sscs.work);
1036 wait_for_completion(&sscs.done);
1037
1038 return sscs.ret;
1039 }
1040 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1041