1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/jump_label.h>
28
29 #include "smpboot.h"
30 #include "sched/smp.h"
31
32 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
33
34 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
35 union cfd_seq_cnt {
36 u64 val;
37 struct {
38 u64 src:16;
39 u64 dst:16;
40 #define CFD_SEQ_NOCPU 0xffff
41 u64 type:4;
42 #define CFD_SEQ_QUEUE 0
43 #define CFD_SEQ_IPI 1
44 #define CFD_SEQ_NOIPI 2
45 #define CFD_SEQ_PING 3
46 #define CFD_SEQ_PINGED 4
47 #define CFD_SEQ_HANDLE 5
48 #define CFD_SEQ_DEQUEUE 6
49 #define CFD_SEQ_IDLE 7
50 #define CFD_SEQ_GOTIPI 8
51 #define CFD_SEQ_HDLEND 9
52 u64 cnt:28;
53 } u;
54 };
55
56 static char *seq_type[] = {
57 [CFD_SEQ_QUEUE] = "queue",
58 [CFD_SEQ_IPI] = "ipi",
59 [CFD_SEQ_NOIPI] = "noipi",
60 [CFD_SEQ_PING] = "ping",
61 [CFD_SEQ_PINGED] = "pinged",
62 [CFD_SEQ_HANDLE] = "handle",
63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
64 [CFD_SEQ_IDLE] = "idle",
65 [CFD_SEQ_GOTIPI] = "gotipi",
66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
67 };
68
69 struct cfd_seq_local {
70 u64 ping;
71 u64 pinged;
72 u64 handle;
73 u64 dequeue;
74 u64 idle;
75 u64 gotipi;
76 u64 hdlend;
77 };
78 #endif
79
80 struct cfd_percpu {
81 call_single_data_t csd;
82 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
83 u64 seq_queue;
84 u64 seq_ipi;
85 u64 seq_noipi;
86 #endif
87 };
88
89 struct call_function_data {
90 struct cfd_percpu __percpu *pcpu;
91 cpumask_var_t cpumask;
92 cpumask_var_t cpumask_ipi;
93 };
94
95 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
96
97 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
98
99 static void __flush_smp_call_function_queue(bool warn_cpu_offline);
100
smpcfd_prepare_cpu(unsigned int cpu)101 int smpcfd_prepare_cpu(unsigned int cpu)
102 {
103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
104
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
106 cpu_to_node(cpu)))
107 return -ENOMEM;
108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
109 cpu_to_node(cpu))) {
110 free_cpumask_var(cfd->cpumask);
111 return -ENOMEM;
112 }
113 cfd->pcpu = alloc_percpu(struct cfd_percpu);
114 if (!cfd->pcpu) {
115 free_cpumask_var(cfd->cpumask);
116 free_cpumask_var(cfd->cpumask_ipi);
117 return -ENOMEM;
118 }
119
120 return 0;
121 }
122
smpcfd_dead_cpu(unsigned int cpu)123 int smpcfd_dead_cpu(unsigned int cpu)
124 {
125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
126
127 free_cpumask_var(cfd->cpumask);
128 free_cpumask_var(cfd->cpumask_ipi);
129 free_percpu(cfd->pcpu);
130 return 0;
131 }
132
smpcfd_dying_cpu(unsigned int cpu)133 int smpcfd_dying_cpu(unsigned int cpu)
134 {
135 /*
136 * The IPIs for the smp-call-function callbacks queued by other
137 * CPUs might arrive late, either due to hardware latencies or
138 * because this CPU disabled interrupts (inside stop-machine)
139 * before the IPIs were sent. So flush out any pending callbacks
140 * explicitly (without waiting for the IPIs to arrive), to
141 * ensure that the outgoing CPU doesn't go offline with work
142 * still pending.
143 */
144 __flush_smp_call_function_queue(false);
145 irq_work_run();
146 return 0;
147 }
148
call_function_init(void)149 void __init call_function_init(void)
150 {
151 int i;
152
153 for_each_possible_cpu(i)
154 init_llist_head(&per_cpu(call_single_queue, i));
155
156 smpcfd_prepare_cpu(smp_processor_id());
157 }
158
159 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
160
161 static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
162 static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
163
csdlock_debug(char * str)164 static int __init csdlock_debug(char *str)
165 {
166 unsigned int val = 0;
167
168 if (str && !strcmp(str, "ext")) {
169 val = 1;
170 static_branch_enable(&csdlock_debug_extended);
171 } else
172 get_option(&str, &val);
173
174 if (val)
175 static_branch_enable(&csdlock_debug_enabled);
176
177 return 1;
178 }
179 __setup("csdlock_debug=", csdlock_debug);
180
181 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
182 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
183 static DEFINE_PER_CPU(void *, cur_csd_info);
184 static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
185
186 static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
187 module_param(csd_lock_timeout, ulong, 0444);
188 static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
189 module_param(panic_on_ipistall, int, 0444);
190
191 static atomic_t csd_bug_count = ATOMIC_INIT(0);
192 static u64 cfd_seq;
193
194 #define CFD_SEQ(s, d, t, c) \
195 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
196
cfd_seq_inc(unsigned int src,unsigned int dst,unsigned int type)197 static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
198 {
199 union cfd_seq_cnt new, old;
200
201 new = CFD_SEQ(src, dst, type, 0);
202
203 do {
204 old.val = READ_ONCE(cfd_seq);
205 new.u.cnt = old.u.cnt + 1;
206 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
207
208 return old.val;
209 }
210
211 #define cfd_seq_store(var, src, dst, type) \
212 do { \
213 if (static_branch_unlikely(&csdlock_debug_extended)) \
214 var = cfd_seq_inc(src, dst, type); \
215 } while (0)
216
217 /* Record current CSD work for current CPU, NULL to erase. */
__csd_lock_record(struct __call_single_data * csd)218 static void __csd_lock_record(struct __call_single_data *csd)
219 {
220 if (!csd) {
221 smp_mb(); /* NULL cur_csd after unlock. */
222 __this_cpu_write(cur_csd, NULL);
223 return;
224 }
225 __this_cpu_write(cur_csd_func, csd->func);
226 __this_cpu_write(cur_csd_info, csd->info);
227 smp_wmb(); /* func and info before csd. */
228 __this_cpu_write(cur_csd, csd);
229 smp_mb(); /* Update cur_csd before function call. */
230 /* Or before unlock, as the case may be. */
231 }
232
csd_lock_record(struct __call_single_data * csd)233 static __always_inline void csd_lock_record(struct __call_single_data *csd)
234 {
235 if (static_branch_unlikely(&csdlock_debug_enabled))
236 __csd_lock_record(csd);
237 }
238
csd_lock_wait_getcpu(struct __call_single_data * csd)239 static int csd_lock_wait_getcpu(struct __call_single_data *csd)
240 {
241 unsigned int csd_type;
242
243 csd_type = CSD_TYPE(csd);
244 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
245 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
246 return -1;
247 }
248
cfd_seq_data_add(u64 val,unsigned int src,unsigned int dst,unsigned int type,union cfd_seq_cnt * data,unsigned int * n_data,unsigned int now)249 static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
250 unsigned int type, union cfd_seq_cnt *data,
251 unsigned int *n_data, unsigned int now)
252 {
253 union cfd_seq_cnt new[2];
254 unsigned int i, j, k;
255
256 new[0].val = val;
257 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
258
259 for (i = 0; i < 2; i++) {
260 if (new[i].u.cnt <= now)
261 new[i].u.cnt |= 0x80000000U;
262 for (j = 0; j < *n_data; j++) {
263 if (new[i].u.cnt == data[j].u.cnt) {
264 /* Direct read value trumps generated one. */
265 if (i == 0)
266 data[j].val = new[i].val;
267 break;
268 }
269 if (new[i].u.cnt < data[j].u.cnt) {
270 for (k = *n_data; k > j; k--)
271 data[k].val = data[k - 1].val;
272 data[j].val = new[i].val;
273 (*n_data)++;
274 break;
275 }
276 }
277 if (j == *n_data) {
278 data[j].val = new[i].val;
279 (*n_data)++;
280 }
281 }
282 }
283
csd_lock_get_type(unsigned int type)284 static const char *csd_lock_get_type(unsigned int type)
285 {
286 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
287 }
288
csd_lock_print_extended(struct __call_single_data * csd,int cpu)289 static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
290 {
291 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
292 unsigned int srccpu = csd->node.src;
293 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
294 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
295 unsigned int now;
296 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
297 unsigned int n_data = 0, i;
298
299 data[0].val = READ_ONCE(cfd_seq);
300 now = data[0].u.cnt;
301
302 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
303 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
304 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
305
306 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
307 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
308
309 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
310 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
311 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
312 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
313 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
314
315 for (i = 0; i < n_data; i++) {
316 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
317 data[i].u.cnt & ~0x80000000U, data[i].u.src,
318 data[i].u.dst, csd_lock_get_type(data[i].u.type));
319 }
320 pr_alert("\tcsd: cnt now: %07x\n", now);
321 }
322
323 /*
324 * Complain if too much time spent waiting. Note that only
325 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
326 * so waiting on other types gets much less information.
327 */
csd_lock_wait_toolong(struct __call_single_data * csd,u64 ts0,u64 * ts1,int * bug_id)328 static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
329 {
330 int cpu = -1;
331 int cpux;
332 bool firsttime;
333 u64 ts2, ts_delta;
334 call_single_data_t *cpu_cur_csd;
335 unsigned int flags = READ_ONCE(csd->node.u_flags);
336 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
337
338 if (!(flags & CSD_FLAG_LOCK)) {
339 if (!unlikely(*bug_id))
340 return true;
341 cpu = csd_lock_wait_getcpu(csd);
342 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
343 *bug_id, raw_smp_processor_id(), cpu);
344 return true;
345 }
346
347 ts2 = sched_clock();
348 /* How long since we last checked for a stuck CSD lock.*/
349 ts_delta = ts2 - *ts1;
350 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
351 return false;
352
353 firsttime = !*bug_id;
354 if (firsttime)
355 *bug_id = atomic_inc_return(&csd_bug_count);
356 cpu = csd_lock_wait_getcpu(csd);
357 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
358 cpux = 0;
359 else
360 cpux = cpu;
361 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
362 /* How long since this CSD lock was stuck. */
363 ts_delta = ts2 - ts0;
364 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
365 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
366 cpu, csd->func, csd->info);
367 /*
368 * If the CSD lock is still stuck after 5 minutes, it is unlikely
369 * to become unstuck. Use a signed comparison to avoid triggering
370 * on underflows when the TSC is out of sync between sockets.
371 */
372 BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
373 if (cpu_cur_csd && csd != cpu_cur_csd) {
374 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
375 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
376 READ_ONCE(per_cpu(cur_csd_info, cpux)));
377 } else {
378 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
379 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
380 }
381 if (cpu >= 0) {
382 if (static_branch_unlikely(&csdlock_debug_extended))
383 csd_lock_print_extended(csd, cpu);
384 dump_cpu_task(cpu);
385 if (!cpu_cur_csd) {
386 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
387 arch_send_call_function_single_ipi(cpu);
388 }
389 }
390 dump_stack();
391 *ts1 = ts2;
392
393 return false;
394 }
395
396 /*
397 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
398 *
399 * For non-synchronous ipi calls the csd can still be in use by the
400 * previous function call. For multi-cpu calls its even more interesting
401 * as we'll have to ensure no other cpu is observing our csd.
402 */
__csd_lock_wait(struct __call_single_data * csd)403 static void __csd_lock_wait(struct __call_single_data *csd)
404 {
405 int bug_id = 0;
406 u64 ts0, ts1;
407
408 ts1 = ts0 = sched_clock();
409 for (;;) {
410 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
411 break;
412 cpu_relax();
413 }
414 smp_acquire__after_ctrl_dep();
415 }
416
csd_lock_wait(struct __call_single_data * csd)417 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
418 {
419 if (static_branch_unlikely(&csdlock_debug_enabled)) {
420 __csd_lock_wait(csd);
421 return;
422 }
423
424 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
425 }
426
__smp_call_single_queue_debug(int cpu,struct llist_node * node)427 static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
428 {
429 unsigned int this_cpu = smp_processor_id();
430 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
431 struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
432 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
433
434 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
435 if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
436 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
437 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
438 send_call_function_single_ipi(cpu);
439 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
440 } else {
441 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
442 }
443 }
444 #else
445 #define cfd_seq_store(var, src, dst, type)
446
csd_lock_record(struct __call_single_data * csd)447 static void csd_lock_record(struct __call_single_data *csd)
448 {
449 }
450
csd_lock_wait(struct __call_single_data * csd)451 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
452 {
453 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
454 }
455 #endif
456
csd_lock(struct __call_single_data * csd)457 static __always_inline void csd_lock(struct __call_single_data *csd)
458 {
459 csd_lock_wait(csd);
460 csd->node.u_flags |= CSD_FLAG_LOCK;
461
462 /*
463 * prevent CPU from reordering the above assignment
464 * to ->flags with any subsequent assignments to other
465 * fields of the specified call_single_data_t structure:
466 */
467 smp_wmb();
468 }
469
csd_unlock(struct __call_single_data * csd)470 static __always_inline void csd_unlock(struct __call_single_data *csd)
471 {
472 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
473
474 /*
475 * ensure we're all done before releasing data:
476 */
477 smp_store_release(&csd->node.u_flags, 0);
478 }
479
480 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
481
__smp_call_single_queue(int cpu,struct llist_node * node)482 void __smp_call_single_queue(int cpu, struct llist_node *node)
483 {
484 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
485 if (static_branch_unlikely(&csdlock_debug_extended)) {
486 unsigned int type;
487
488 type = CSD_TYPE(container_of(node, call_single_data_t,
489 node.llist));
490 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
491 __smp_call_single_queue_debug(cpu, node);
492 return;
493 }
494 }
495 #endif
496
497 /*
498 * The list addition should be visible before sending the IPI
499 * handler locks the list to pull the entry off it because of
500 * normal cache coherency rules implied by spinlocks.
501 *
502 * If IPIs can go out of order to the cache coherency protocol
503 * in an architecture, sufficient synchronisation should be added
504 * to arch code to make it appear to obey cache coherency WRT
505 * locking and barrier primitives. Generic code isn't really
506 * equipped to do the right thing...
507 */
508 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
509 send_call_function_single_ipi(cpu);
510 }
511
512 /*
513 * Insert a previously allocated call_single_data_t element
514 * for execution on the given CPU. data must already have
515 * ->func, ->info, and ->flags set.
516 */
generic_exec_single(int cpu,struct __call_single_data * csd)517 static int generic_exec_single(int cpu, struct __call_single_data *csd)
518 {
519 if (cpu == smp_processor_id()) {
520 smp_call_func_t func = csd->func;
521 void *info = csd->info;
522 unsigned long flags;
523
524 /*
525 * We can unlock early even for the synchronous on-stack case,
526 * since we're doing this from the same CPU..
527 */
528 csd_lock_record(csd);
529 csd_unlock(csd);
530 local_irq_save(flags);
531 func(info);
532 csd_lock_record(NULL);
533 local_irq_restore(flags);
534 return 0;
535 }
536
537 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
538 csd_unlock(csd);
539 return -ENXIO;
540 }
541
542 __smp_call_single_queue(cpu, &csd->node.llist);
543
544 return 0;
545 }
546
547 /**
548 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
549 *
550 * Invoked by arch to handle an IPI for call function single.
551 * Must be called with interrupts disabled.
552 */
generic_smp_call_function_single_interrupt(void)553 void generic_smp_call_function_single_interrupt(void)
554 {
555 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
556 smp_processor_id(), CFD_SEQ_GOTIPI);
557 __flush_smp_call_function_queue(true);
558 }
559
560 /**
561 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
562 *
563 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
564 * offline CPU. Skip this check if set to 'false'.
565 *
566 * Flush any pending smp-call-function callbacks queued on this CPU. This is
567 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
568 * to ensure that all pending IPI callbacks are run before it goes completely
569 * offline.
570 *
571 * Loop through the call_single_queue and run all the queued callbacks.
572 * Must be called with interrupts disabled.
573 */
__flush_smp_call_function_queue(bool warn_cpu_offline)574 static void __flush_smp_call_function_queue(bool warn_cpu_offline)
575 {
576 call_single_data_t *csd, *csd_next;
577 struct llist_node *entry, *prev;
578 struct llist_head *head;
579 static bool warned;
580
581 lockdep_assert_irqs_disabled();
582
583 head = this_cpu_ptr(&call_single_queue);
584 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
585 smp_processor_id(), CFD_SEQ_HANDLE);
586 entry = llist_del_all(head);
587 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
588 /* Special meaning of source cpu: 0 == queue empty */
589 entry ? CFD_SEQ_NOCPU : 0,
590 smp_processor_id(), CFD_SEQ_DEQUEUE);
591 entry = llist_reverse_order(entry);
592
593 /* There shouldn't be any pending callbacks on an offline CPU. */
594 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
595 !warned && entry != NULL)) {
596 warned = true;
597 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
598
599 /*
600 * We don't have to use the _safe() variant here
601 * because we are not invoking the IPI handlers yet.
602 */
603 llist_for_each_entry(csd, entry, node.llist) {
604 switch (CSD_TYPE(csd)) {
605 case CSD_TYPE_ASYNC:
606 case CSD_TYPE_SYNC:
607 case CSD_TYPE_IRQ_WORK:
608 pr_warn("IPI callback %pS sent to offline CPU\n",
609 csd->func);
610 break;
611
612 case CSD_TYPE_TTWU:
613 pr_warn("IPI task-wakeup sent to offline CPU\n");
614 break;
615
616 default:
617 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
618 CSD_TYPE(csd));
619 break;
620 }
621 }
622 }
623
624 /*
625 * First; run all SYNC callbacks, people are waiting for us.
626 */
627 prev = NULL;
628 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
629 /* Do we wait until *after* callback? */
630 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
631 smp_call_func_t func = csd->func;
632 void *info = csd->info;
633
634 if (prev) {
635 prev->next = &csd_next->node.llist;
636 } else {
637 entry = &csd_next->node.llist;
638 }
639
640 csd_lock_record(csd);
641 func(info);
642 csd_unlock(csd);
643 csd_lock_record(NULL);
644 } else {
645 prev = &csd->node.llist;
646 }
647 }
648
649 if (!entry) {
650 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
651 0, smp_processor_id(),
652 CFD_SEQ_HDLEND);
653 return;
654 }
655
656 /*
657 * Second; run all !SYNC callbacks.
658 */
659 prev = NULL;
660 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
661 int type = CSD_TYPE(csd);
662
663 if (type != CSD_TYPE_TTWU) {
664 if (prev) {
665 prev->next = &csd_next->node.llist;
666 } else {
667 entry = &csd_next->node.llist;
668 }
669
670 if (type == CSD_TYPE_ASYNC) {
671 smp_call_func_t func = csd->func;
672 void *info = csd->info;
673
674 csd_lock_record(csd);
675 csd_unlock(csd);
676 func(info);
677 csd_lock_record(NULL);
678 } else if (type == CSD_TYPE_IRQ_WORK) {
679 irq_work_single(csd);
680 }
681
682 } else {
683 prev = &csd->node.llist;
684 }
685 }
686
687 /*
688 * Third; only CSD_TYPE_TTWU is left, issue those.
689 */
690 if (entry)
691 sched_ttwu_pending(entry);
692
693 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
694 smp_processor_id(), CFD_SEQ_HDLEND);
695 }
696
697
698 /**
699 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
700 * from task context (idle, migration thread)
701 *
702 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
703 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
704 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
705 * handle queued SMP function calls before scheduling.
706 *
707 * The migration thread has to ensure that an eventually pending wakeup has
708 * been handled before it migrates a task.
709 */
flush_smp_call_function_queue(void)710 void flush_smp_call_function_queue(void)
711 {
712 unsigned int was_pending;
713 unsigned long flags;
714
715 if (llist_empty(this_cpu_ptr(&call_single_queue)))
716 return;
717
718 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
719 smp_processor_id(), CFD_SEQ_IDLE);
720 local_irq_save(flags);
721 /* Get the already pending soft interrupts for RT enabled kernels */
722 was_pending = local_softirq_pending();
723 __flush_smp_call_function_queue(true);
724 if (local_softirq_pending())
725 do_softirq_post_smp_call_flush(was_pending);
726
727 local_irq_restore(flags);
728 }
729
730 /*
731 * smp_call_function_single - Run a function on a specific CPU
732 * @func: The function to run. This must be fast and non-blocking.
733 * @info: An arbitrary pointer to pass to the function.
734 * @wait: If true, wait until function has completed on other CPUs.
735 *
736 * Returns 0 on success, else a negative status code.
737 */
smp_call_function_single(int cpu,smp_call_func_t func,void * info,int wait)738 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
739 int wait)
740 {
741 call_single_data_t *csd;
742 call_single_data_t csd_stack = {
743 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
744 };
745 int this_cpu;
746 int err;
747
748 /*
749 * prevent preemption and reschedule on another processor,
750 * as well as CPU removal
751 */
752 this_cpu = get_cpu();
753
754 /*
755 * Can deadlock when called with interrupts disabled.
756 * We allow cpu's that are not yet online though, as no one else can
757 * send smp call function interrupt to this cpu and as such deadlocks
758 * can't happen.
759 */
760 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
761 && !oops_in_progress);
762
763 /*
764 * When @wait we can deadlock when we interrupt between llist_add() and
765 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
766 * csd_lock() on because the interrupt context uses the same csd
767 * storage.
768 */
769 WARN_ON_ONCE(!in_task());
770
771 csd = &csd_stack;
772 if (!wait) {
773 csd = this_cpu_ptr(&csd_data);
774 csd_lock(csd);
775 }
776
777 csd->func = func;
778 csd->info = info;
779 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
780 csd->node.src = smp_processor_id();
781 csd->node.dst = cpu;
782 #endif
783
784 err = generic_exec_single(cpu, csd);
785
786 if (wait)
787 csd_lock_wait(csd);
788
789 put_cpu();
790
791 return err;
792 }
793 EXPORT_SYMBOL(smp_call_function_single);
794
795 /**
796 * smp_call_function_single_async() - Run an asynchronous function on a
797 * specific CPU.
798 * @cpu: The CPU to run on.
799 * @csd: Pre-allocated and setup data structure
800 *
801 * Like smp_call_function_single(), but the call is asynchonous and
802 * can thus be done from contexts with disabled interrupts.
803 *
804 * The caller passes his own pre-allocated data structure
805 * (ie: embedded in an object) and is responsible for synchronizing it
806 * such that the IPIs performed on the @csd are strictly serialized.
807 *
808 * If the function is called with one csd which has not yet been
809 * processed by previous call to smp_call_function_single_async(), the
810 * function will return immediately with -EBUSY showing that the csd
811 * object is still in progress.
812 *
813 * NOTE: Be careful, there is unfortunately no current debugging facility to
814 * validate the correctness of this serialization.
815 *
816 * Return: %0 on success or negative errno value on error
817 */
smp_call_function_single_async(int cpu,struct __call_single_data * csd)818 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
819 {
820 int err = 0;
821
822 preempt_disable();
823
824 if (csd->node.u_flags & CSD_FLAG_LOCK) {
825 err = -EBUSY;
826 goto out;
827 }
828
829 csd->node.u_flags = CSD_FLAG_LOCK;
830 smp_wmb();
831
832 err = generic_exec_single(cpu, csd);
833
834 out:
835 preempt_enable();
836
837 return err;
838 }
839 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
840
841 /*
842 * smp_call_function_any - Run a function on any of the given cpus
843 * @mask: The mask of cpus it can run on.
844 * @func: The function to run. This must be fast and non-blocking.
845 * @info: An arbitrary pointer to pass to the function.
846 * @wait: If true, wait until function has completed.
847 *
848 * Returns 0 on success, else a negative status code (if no cpus were online).
849 *
850 * Selection preference:
851 * 1) current cpu if in @mask
852 * 2) any cpu of current node if in @mask
853 * 3) any other online cpu in @mask
854 */
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)855 int smp_call_function_any(const struct cpumask *mask,
856 smp_call_func_t func, void *info, int wait)
857 {
858 unsigned int cpu;
859 const struct cpumask *nodemask;
860 int ret;
861
862 /* Try for same CPU (cheapest) */
863 cpu = get_cpu();
864 if (cpumask_test_cpu(cpu, mask))
865 goto call;
866
867 /* Try for same node. */
868 nodemask = cpumask_of_node(cpu_to_node(cpu));
869 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
870 cpu = cpumask_next_and(cpu, nodemask, mask)) {
871 if (cpu_online(cpu))
872 goto call;
873 }
874
875 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
876 cpu = cpumask_any_and(mask, cpu_online_mask);
877 call:
878 ret = smp_call_function_single(cpu, func, info, wait);
879 put_cpu();
880 return ret;
881 }
882 EXPORT_SYMBOL_GPL(smp_call_function_any);
883
884 /*
885 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
886 *
887 * %SCF_WAIT: Wait until function execution is completed
888 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
889 */
890 #define SCF_WAIT (1U << 0)
891 #define SCF_RUN_LOCAL (1U << 1)
892
smp_call_function_many_cond(const struct cpumask * mask,smp_call_func_t func,void * info,unsigned int scf_flags,smp_cond_func_t cond_func)893 static void smp_call_function_many_cond(const struct cpumask *mask,
894 smp_call_func_t func, void *info,
895 unsigned int scf_flags,
896 smp_cond_func_t cond_func)
897 {
898 int cpu, last_cpu, this_cpu = smp_processor_id();
899 struct call_function_data *cfd;
900 bool wait = scf_flags & SCF_WAIT;
901 bool run_remote = false;
902 bool run_local = false;
903 int nr_cpus = 0;
904
905 lockdep_assert_preemption_disabled();
906
907 /*
908 * Can deadlock when called with interrupts disabled.
909 * We allow cpu's that are not yet online though, as no one else can
910 * send smp call function interrupt to this cpu and as such deadlocks
911 * can't happen.
912 */
913 if (cpu_online(this_cpu) && !oops_in_progress &&
914 !early_boot_irqs_disabled)
915 lockdep_assert_irqs_enabled();
916
917 /*
918 * When @wait we can deadlock when we interrupt between llist_add() and
919 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
920 * csd_lock() on because the interrupt context uses the same csd
921 * storage.
922 */
923 WARN_ON_ONCE(!in_task());
924
925 /* Check if we need local execution. */
926 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
927 run_local = true;
928
929 /* Check if we need remote execution, i.e., any CPU excluding this one. */
930 cpu = cpumask_first_and(mask, cpu_online_mask);
931 if (cpu == this_cpu)
932 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
933 if (cpu < nr_cpu_ids)
934 run_remote = true;
935
936 if (run_remote) {
937 cfd = this_cpu_ptr(&cfd_data);
938 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
939 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
940
941 cpumask_clear(cfd->cpumask_ipi);
942 for_each_cpu(cpu, cfd->cpumask) {
943 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
944 call_single_data_t *csd = &pcpu->csd;
945
946 if (cond_func && !cond_func(cpu, info))
947 continue;
948
949 csd_lock(csd);
950 if (wait)
951 csd->node.u_flags |= CSD_TYPE_SYNC;
952 csd->func = func;
953 csd->info = info;
954 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
955 csd->node.src = smp_processor_id();
956 csd->node.dst = cpu;
957 #endif
958 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
959 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
960 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
961 nr_cpus++;
962 last_cpu = cpu;
963
964 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
965 } else {
966 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
967 }
968 }
969
970 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
971
972 /*
973 * Choose the most efficient way to send an IPI. Note that the
974 * number of CPUs might be zero due to concurrent changes to the
975 * provided mask.
976 */
977 if (nr_cpus == 1)
978 send_call_function_single_ipi(last_cpu);
979 else if (likely(nr_cpus > 1))
980 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
981
982 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
983 }
984
985 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
986 unsigned long flags;
987
988 local_irq_save(flags);
989 func(info);
990 local_irq_restore(flags);
991 }
992
993 if (run_remote && wait) {
994 for_each_cpu(cpu, cfd->cpumask) {
995 call_single_data_t *csd;
996
997 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
998 csd_lock_wait(csd);
999 }
1000 }
1001 }
1002
1003 /**
1004 * smp_call_function_many(): Run a function on a set of CPUs.
1005 * @mask: The set of cpus to run on (only runs on online subset).
1006 * @func: The function to run. This must be fast and non-blocking.
1007 * @info: An arbitrary pointer to pass to the function.
1008 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
1009 * (atomically) until function has completed on other CPUs. If
1010 * %SCF_RUN_LOCAL is set, the function will also be run locally
1011 * if the local CPU is set in the @cpumask.
1012 *
1013 * If @wait is true, then returns once @func has returned.
1014 *
1015 * You must not call this function with disabled interrupts or from a
1016 * hardware interrupt handler or from a bottom half handler. Preemption
1017 * must be disabled when calling this function.
1018 */
smp_call_function_many(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)1019 void smp_call_function_many(const struct cpumask *mask,
1020 smp_call_func_t func, void *info, bool wait)
1021 {
1022 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
1023 }
1024 EXPORT_SYMBOL(smp_call_function_many);
1025
1026 /**
1027 * smp_call_function(): Run a function on all other CPUs.
1028 * @func: The function to run. This must be fast and non-blocking.
1029 * @info: An arbitrary pointer to pass to the function.
1030 * @wait: If true, wait (atomically) until function has completed
1031 * on other CPUs.
1032 *
1033 * Returns 0.
1034 *
1035 * If @wait is true, then returns once @func has returned; otherwise
1036 * it returns just before the target cpu calls @func.
1037 *
1038 * You must not call this function with disabled interrupts or from a
1039 * hardware interrupt handler or from a bottom half handler.
1040 */
smp_call_function(smp_call_func_t func,void * info,int wait)1041 void smp_call_function(smp_call_func_t func, void *info, int wait)
1042 {
1043 preempt_disable();
1044 smp_call_function_many(cpu_online_mask, func, info, wait);
1045 preempt_enable();
1046 }
1047 EXPORT_SYMBOL(smp_call_function);
1048
1049 /* Setup configured maximum number of CPUs to activate */
1050 unsigned int setup_max_cpus = NR_CPUS;
1051 EXPORT_SYMBOL(setup_max_cpus);
1052
1053
1054 /*
1055 * Setup routine for controlling SMP activation
1056 *
1057 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
1058 * activation entirely (the MPS table probe still happens, though).
1059 *
1060 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1061 * greater than 0, limits the maximum number of CPUs activated in
1062 * SMP mode to <NUM>.
1063 */
1064
arch_disable_smp_support(void)1065 void __weak arch_disable_smp_support(void) { }
1066
nosmp(char * str)1067 static int __init nosmp(char *str)
1068 {
1069 setup_max_cpus = 0;
1070 arch_disable_smp_support();
1071
1072 return 0;
1073 }
1074
1075 early_param("nosmp", nosmp);
1076
1077 /* this is hard limit */
nrcpus(char * str)1078 static int __init nrcpus(char *str)
1079 {
1080 int nr_cpus;
1081
1082 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
1083 set_nr_cpu_ids(nr_cpus);
1084
1085 return 0;
1086 }
1087
1088 early_param("nr_cpus", nrcpus);
1089
maxcpus(char * str)1090 static int __init maxcpus(char *str)
1091 {
1092 get_option(&str, &setup_max_cpus);
1093 if (setup_max_cpus == 0)
1094 arch_disable_smp_support();
1095
1096 return 0;
1097 }
1098
1099 early_param("maxcpus", maxcpus);
1100
1101 #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
1102 /* Setup number of possible processor ids */
1103 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
1104 EXPORT_SYMBOL(nr_cpu_ids);
1105 #endif
1106
1107 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
setup_nr_cpu_ids(void)1108 void __init setup_nr_cpu_ids(void)
1109 {
1110 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
1111 }
1112
1113 /* Called by boot processor to activate the rest. */
smp_init(void)1114 void __init smp_init(void)
1115 {
1116 int num_nodes, num_cpus;
1117
1118 idle_threads_init();
1119 cpuhp_threads_init();
1120
1121 pr_info("Bringing up secondary CPUs ...\n");
1122
1123 bringup_nonboot_cpus(setup_max_cpus);
1124
1125 num_nodes = num_online_nodes();
1126 num_cpus = num_online_cpus();
1127 pr_info("Brought up %d node%s, %d CPU%s\n",
1128 num_nodes, (num_nodes > 1 ? "s" : ""),
1129 num_cpus, (num_cpus > 1 ? "s" : ""));
1130
1131 /* Any cleanup work */
1132 smp_cpus_done(setup_max_cpus);
1133 }
1134
1135 /*
1136 * on_each_cpu_cond(): Call a function on each processor for which
1137 * the supplied function cond_func returns true, optionally waiting
1138 * for all the required CPUs to finish. This may include the local
1139 * processor.
1140 * @cond_func: A callback function that is passed a cpu id and
1141 * the info parameter. The function is called
1142 * with preemption disabled. The function should
1143 * return a blooean value indicating whether to IPI
1144 * the specified CPU.
1145 * @func: The function to run on all applicable CPUs.
1146 * This must be fast and non-blocking.
1147 * @info: An arbitrary pointer to pass to both functions.
1148 * @wait: If true, wait (atomically) until function has
1149 * completed on other CPUs.
1150 *
1151 * Preemption is disabled to protect against CPUs going offline but not online.
1152 * CPUs going online during the call will not be seen or sent an IPI.
1153 *
1154 * You must not call this function with disabled interrupts or
1155 * from a hardware interrupt handler or from a bottom half handler.
1156 */
on_each_cpu_cond_mask(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait,const struct cpumask * mask)1157 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1158 void *info, bool wait, const struct cpumask *mask)
1159 {
1160 unsigned int scf_flags = SCF_RUN_LOCAL;
1161
1162 if (wait)
1163 scf_flags |= SCF_WAIT;
1164
1165 preempt_disable();
1166 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1167 preempt_enable();
1168 }
1169 EXPORT_SYMBOL(on_each_cpu_cond_mask);
1170
do_nothing(void * unused)1171 static void do_nothing(void *unused)
1172 {
1173 }
1174
1175 /**
1176 * kick_all_cpus_sync - Force all cpus out of idle
1177 *
1178 * Used to synchronize the update of pm_idle function pointer. It's
1179 * called after the pointer is updated and returns after the dummy
1180 * callback function has been executed on all cpus. The execution of
1181 * the function can only happen on the remote cpus after they have
1182 * left the idle function which had been called via pm_idle function
1183 * pointer. So it's guaranteed that nothing uses the previous pointer
1184 * anymore.
1185 */
kick_all_cpus_sync(void)1186 void kick_all_cpus_sync(void)
1187 {
1188 /* Make sure the change is visible before we kick the cpus */
1189 smp_mb();
1190 smp_call_function(do_nothing, NULL, 1);
1191 }
1192 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1193
1194 /**
1195 * wake_up_all_idle_cpus - break all cpus out of idle
1196 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1197 * including idle polling cpus, for non-idle cpus, we will do nothing
1198 * for them.
1199 */
wake_up_all_idle_cpus(void)1200 void wake_up_all_idle_cpus(void)
1201 {
1202 int cpu;
1203
1204 for_each_possible_cpu(cpu) {
1205 preempt_disable();
1206 if (cpu != smp_processor_id() && cpu_online(cpu))
1207 wake_up_if_idle(cpu);
1208 preempt_enable();
1209 }
1210 }
1211 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1212
1213 /**
1214 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1215 * @work: &work_struct
1216 * @done: &completion to signal
1217 * @func: function to call
1218 * @data: function's data argument
1219 * @ret: return value from @func
1220 * @cpu: target CPU (%-1 for any CPU)
1221 *
1222 * Used to call a function on a specific cpu and wait for it to return.
1223 * Optionally make sure the call is done on a specified physical cpu via vcpu
1224 * pinning in order to support virtualized environments.
1225 */
1226 struct smp_call_on_cpu_struct {
1227 struct work_struct work;
1228 struct completion done;
1229 int (*func)(void *);
1230 void *data;
1231 int ret;
1232 int cpu;
1233 };
1234
smp_call_on_cpu_callback(struct work_struct * work)1235 static void smp_call_on_cpu_callback(struct work_struct *work)
1236 {
1237 struct smp_call_on_cpu_struct *sscs;
1238
1239 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1240 if (sscs->cpu >= 0)
1241 hypervisor_pin_vcpu(sscs->cpu);
1242 sscs->ret = sscs->func(sscs->data);
1243 if (sscs->cpu >= 0)
1244 hypervisor_pin_vcpu(-1);
1245
1246 complete(&sscs->done);
1247 }
1248
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)1249 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1250 {
1251 struct smp_call_on_cpu_struct sscs = {
1252 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1253 .func = func,
1254 .data = par,
1255 .cpu = phys ? cpu : -1,
1256 };
1257
1258 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1259
1260 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1261 return -ENXIO;
1262
1263 queue_work_on(cpu, system_wq, &sscs.work);
1264 wait_for_completion(&sscs.done);
1265
1266 return sscs.ret;
1267 }
1268 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1269