1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/jump_label.h>
28
29 #include "smpboot.h"
30 #include "sched/smp.h"
31
32 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
33
34 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
35 union cfd_seq_cnt {
36 u64 val;
37 struct {
38 u64 src:16;
39 u64 dst:16;
40 #define CFD_SEQ_NOCPU 0xffff
41 u64 type:4;
42 #define CFD_SEQ_QUEUE 0
43 #define CFD_SEQ_IPI 1
44 #define CFD_SEQ_NOIPI 2
45 #define CFD_SEQ_PING 3
46 #define CFD_SEQ_PINGED 4
47 #define CFD_SEQ_HANDLE 5
48 #define CFD_SEQ_DEQUEUE 6
49 #define CFD_SEQ_IDLE 7
50 #define CFD_SEQ_GOTIPI 8
51 #define CFD_SEQ_HDLEND 9
52 u64 cnt:28;
53 } u;
54 };
55
56 static char *seq_type[] = {
57 [CFD_SEQ_QUEUE] = "queue",
58 [CFD_SEQ_IPI] = "ipi",
59 [CFD_SEQ_NOIPI] = "noipi",
60 [CFD_SEQ_PING] = "ping",
61 [CFD_SEQ_PINGED] = "pinged",
62 [CFD_SEQ_HANDLE] = "handle",
63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
64 [CFD_SEQ_IDLE] = "idle",
65 [CFD_SEQ_GOTIPI] = "gotipi",
66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
67 };
68
69 struct cfd_seq_local {
70 u64 ping;
71 u64 pinged;
72 u64 handle;
73 u64 dequeue;
74 u64 idle;
75 u64 gotipi;
76 u64 hdlend;
77 };
78 #endif
79
80 struct cfd_percpu {
81 call_single_data_t csd;
82 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
83 u64 seq_queue;
84 u64 seq_ipi;
85 u64 seq_noipi;
86 #endif
87 };
88
89 struct call_function_data {
90 struct cfd_percpu __percpu *pcpu;
91 cpumask_var_t cpumask;
92 cpumask_var_t cpumask_ipi;
93 };
94
95 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
96
97 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
98
99 static void flush_smp_call_function_queue(bool warn_cpu_offline);
100
smpcfd_prepare_cpu(unsigned int cpu)101 int smpcfd_prepare_cpu(unsigned int cpu)
102 {
103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
104
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
106 cpu_to_node(cpu)))
107 return -ENOMEM;
108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
109 cpu_to_node(cpu))) {
110 free_cpumask_var(cfd->cpumask);
111 return -ENOMEM;
112 }
113 cfd->pcpu = alloc_percpu(struct cfd_percpu);
114 if (!cfd->pcpu) {
115 free_cpumask_var(cfd->cpumask);
116 free_cpumask_var(cfd->cpumask_ipi);
117 return -ENOMEM;
118 }
119
120 return 0;
121 }
122
smpcfd_dead_cpu(unsigned int cpu)123 int smpcfd_dead_cpu(unsigned int cpu)
124 {
125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
126
127 free_cpumask_var(cfd->cpumask);
128 free_cpumask_var(cfd->cpumask_ipi);
129 free_percpu(cfd->pcpu);
130 return 0;
131 }
132
smpcfd_dying_cpu(unsigned int cpu)133 int smpcfd_dying_cpu(unsigned int cpu)
134 {
135 /*
136 * The IPIs for the smp-call-function callbacks queued by other
137 * CPUs might arrive late, either due to hardware latencies or
138 * because this CPU disabled interrupts (inside stop-machine)
139 * before the IPIs were sent. So flush out any pending callbacks
140 * explicitly (without waiting for the IPIs to arrive), to
141 * ensure that the outgoing CPU doesn't go offline with work
142 * still pending.
143 */
144 flush_smp_call_function_queue(false);
145 irq_work_run();
146 return 0;
147 }
148
call_function_init(void)149 void __init call_function_init(void)
150 {
151 int i;
152
153 for_each_possible_cpu(i)
154 init_llist_head(&per_cpu(call_single_queue, i));
155
156 smpcfd_prepare_cpu(smp_processor_id());
157 }
158
159 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
160
161 static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
162 static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
163
csdlock_debug(char * str)164 static int __init csdlock_debug(char *str)
165 {
166 unsigned int val = 0;
167
168 if (str && !strcmp(str, "ext")) {
169 val = 1;
170 static_branch_enable(&csdlock_debug_extended);
171 } else
172 get_option(&str, &val);
173
174 if (val)
175 static_branch_enable(&csdlock_debug_enabled);
176
177 return 1;
178 }
179 __setup("csdlock_debug=", csdlock_debug);
180
181 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
182 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
183 static DEFINE_PER_CPU(void *, cur_csd_info);
184 static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
185
186 #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
187 static atomic_t csd_bug_count = ATOMIC_INIT(0);
188 static u64 cfd_seq;
189
190 #define CFD_SEQ(s, d, t, c) \
191 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
192
cfd_seq_inc(unsigned int src,unsigned int dst,unsigned int type)193 static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
194 {
195 union cfd_seq_cnt new, old;
196
197 new = CFD_SEQ(src, dst, type, 0);
198
199 do {
200 old.val = READ_ONCE(cfd_seq);
201 new.u.cnt = old.u.cnt + 1;
202 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
203
204 return old.val;
205 }
206
207 #define cfd_seq_store(var, src, dst, type) \
208 do { \
209 if (static_branch_unlikely(&csdlock_debug_extended)) \
210 var = cfd_seq_inc(src, dst, type); \
211 } while (0)
212
213 /* Record current CSD work for current CPU, NULL to erase. */
__csd_lock_record(struct __call_single_data * csd)214 static void __csd_lock_record(struct __call_single_data *csd)
215 {
216 if (!csd) {
217 smp_mb(); /* NULL cur_csd after unlock. */
218 __this_cpu_write(cur_csd, NULL);
219 return;
220 }
221 __this_cpu_write(cur_csd_func, csd->func);
222 __this_cpu_write(cur_csd_info, csd->info);
223 smp_wmb(); /* func and info before csd. */
224 __this_cpu_write(cur_csd, csd);
225 smp_mb(); /* Update cur_csd before function call. */
226 /* Or before unlock, as the case may be. */
227 }
228
csd_lock_record(struct __call_single_data * csd)229 static __always_inline void csd_lock_record(struct __call_single_data *csd)
230 {
231 if (static_branch_unlikely(&csdlock_debug_enabled))
232 __csd_lock_record(csd);
233 }
234
csd_lock_wait_getcpu(struct __call_single_data * csd)235 static int csd_lock_wait_getcpu(struct __call_single_data *csd)
236 {
237 unsigned int csd_type;
238
239 csd_type = CSD_TYPE(csd);
240 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
241 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
242 return -1;
243 }
244
cfd_seq_data_add(u64 val,unsigned int src,unsigned int dst,unsigned int type,union cfd_seq_cnt * data,unsigned int * n_data,unsigned int now)245 static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
246 unsigned int type, union cfd_seq_cnt *data,
247 unsigned int *n_data, unsigned int now)
248 {
249 union cfd_seq_cnt new[2];
250 unsigned int i, j, k;
251
252 new[0].val = val;
253 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
254
255 for (i = 0; i < 2; i++) {
256 if (new[i].u.cnt <= now)
257 new[i].u.cnt |= 0x80000000U;
258 for (j = 0; j < *n_data; j++) {
259 if (new[i].u.cnt == data[j].u.cnt) {
260 /* Direct read value trumps generated one. */
261 if (i == 0)
262 data[j].val = new[i].val;
263 break;
264 }
265 if (new[i].u.cnt < data[j].u.cnt) {
266 for (k = *n_data; k > j; k--)
267 data[k].val = data[k - 1].val;
268 data[j].val = new[i].val;
269 (*n_data)++;
270 break;
271 }
272 }
273 if (j == *n_data) {
274 data[j].val = new[i].val;
275 (*n_data)++;
276 }
277 }
278 }
279
csd_lock_get_type(unsigned int type)280 static const char *csd_lock_get_type(unsigned int type)
281 {
282 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
283 }
284
csd_lock_print_extended(struct __call_single_data * csd,int cpu)285 static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
286 {
287 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
288 unsigned int srccpu = csd->node.src;
289 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
290 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
291 unsigned int now;
292 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
293 unsigned int n_data = 0, i;
294
295 data[0].val = READ_ONCE(cfd_seq);
296 now = data[0].u.cnt;
297
298 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
299 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
300 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
301
302 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
303 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
304
305 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
306 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
307 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
308 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
309 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
310
311 for (i = 0; i < n_data; i++) {
312 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
313 data[i].u.cnt & ~0x80000000U, data[i].u.src,
314 data[i].u.dst, csd_lock_get_type(data[i].u.type));
315 }
316 pr_alert("\tcsd: cnt now: %07x\n", now);
317 }
318
319 /*
320 * Complain if too much time spent waiting. Note that only
321 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
322 * so waiting on other types gets much less information.
323 */
csd_lock_wait_toolong(struct __call_single_data * csd,u64 ts0,u64 * ts1,int * bug_id)324 static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
325 {
326 int cpu = -1;
327 int cpux;
328 bool firsttime;
329 u64 ts2, ts_delta;
330 call_single_data_t *cpu_cur_csd;
331 unsigned int flags = READ_ONCE(csd->node.u_flags);
332
333 if (!(flags & CSD_FLAG_LOCK)) {
334 if (!unlikely(*bug_id))
335 return true;
336 cpu = csd_lock_wait_getcpu(csd);
337 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
338 *bug_id, raw_smp_processor_id(), cpu);
339 return true;
340 }
341
342 ts2 = sched_clock();
343 ts_delta = ts2 - *ts1;
344 if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
345 return false;
346
347 firsttime = !*bug_id;
348 if (firsttime)
349 *bug_id = atomic_inc_return(&csd_bug_count);
350 cpu = csd_lock_wait_getcpu(csd);
351 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
352 cpux = 0;
353 else
354 cpux = cpu;
355 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
356 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
357 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
358 cpu, csd->func, csd->info);
359 if (cpu_cur_csd && csd != cpu_cur_csd) {
360 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
361 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
362 READ_ONCE(per_cpu(cur_csd_info, cpux)));
363 } else {
364 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
365 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
366 }
367 if (cpu >= 0) {
368 if (static_branch_unlikely(&csdlock_debug_extended))
369 csd_lock_print_extended(csd, cpu);
370 if (!trigger_single_cpu_backtrace(cpu))
371 dump_cpu_task(cpu);
372 if (!cpu_cur_csd) {
373 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
374 arch_send_call_function_single_ipi(cpu);
375 }
376 }
377 dump_stack();
378 *ts1 = ts2;
379
380 return false;
381 }
382
383 /*
384 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
385 *
386 * For non-synchronous ipi calls the csd can still be in use by the
387 * previous function call. For multi-cpu calls its even more interesting
388 * as we'll have to ensure no other cpu is observing our csd.
389 */
__csd_lock_wait(struct __call_single_data * csd)390 static void __csd_lock_wait(struct __call_single_data *csd)
391 {
392 int bug_id = 0;
393 u64 ts0, ts1;
394
395 ts1 = ts0 = sched_clock();
396 for (;;) {
397 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
398 break;
399 cpu_relax();
400 }
401 smp_acquire__after_ctrl_dep();
402 }
403
csd_lock_wait(struct __call_single_data * csd)404 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
405 {
406 if (static_branch_unlikely(&csdlock_debug_enabled)) {
407 __csd_lock_wait(csd);
408 return;
409 }
410
411 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
412 }
413
__smp_call_single_queue_debug(int cpu,struct llist_node * node)414 static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
415 {
416 unsigned int this_cpu = smp_processor_id();
417 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
418 struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
419 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
420
421 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
422 if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
423 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
424 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
425 send_call_function_single_ipi(cpu);
426 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
427 } else {
428 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
429 }
430 }
431 #else
432 #define cfd_seq_store(var, src, dst, type)
433
csd_lock_record(struct __call_single_data * csd)434 static void csd_lock_record(struct __call_single_data *csd)
435 {
436 }
437
csd_lock_wait(struct __call_single_data * csd)438 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
439 {
440 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
441 }
442 #endif
443
csd_lock(struct __call_single_data * csd)444 static __always_inline void csd_lock(struct __call_single_data *csd)
445 {
446 csd_lock_wait(csd);
447 csd->node.u_flags |= CSD_FLAG_LOCK;
448
449 /*
450 * prevent CPU from reordering the above assignment
451 * to ->flags with any subsequent assignments to other
452 * fields of the specified call_single_data_t structure:
453 */
454 smp_wmb();
455 }
456
csd_unlock(struct __call_single_data * csd)457 static __always_inline void csd_unlock(struct __call_single_data *csd)
458 {
459 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
460
461 /*
462 * ensure we're all done before releasing data:
463 */
464 smp_store_release(&csd->node.u_flags, 0);
465 }
466
467 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
468
__smp_call_single_queue(int cpu,struct llist_node * node)469 void __smp_call_single_queue(int cpu, struct llist_node *node)
470 {
471 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
472 if (static_branch_unlikely(&csdlock_debug_extended)) {
473 unsigned int type;
474
475 type = CSD_TYPE(container_of(node, call_single_data_t,
476 node.llist));
477 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
478 __smp_call_single_queue_debug(cpu, node);
479 return;
480 }
481 }
482 #endif
483
484 /*
485 * The list addition should be visible before sending the IPI
486 * handler locks the list to pull the entry off it because of
487 * normal cache coherency rules implied by spinlocks.
488 *
489 * If IPIs can go out of order to the cache coherency protocol
490 * in an architecture, sufficient synchronisation should be added
491 * to arch code to make it appear to obey cache coherency WRT
492 * locking and barrier primitives. Generic code isn't really
493 * equipped to do the right thing...
494 */
495 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
496 send_call_function_single_ipi(cpu);
497 }
498
499 /*
500 * Insert a previously allocated call_single_data_t element
501 * for execution on the given CPU. data must already have
502 * ->func, ->info, and ->flags set.
503 */
generic_exec_single(int cpu,struct __call_single_data * csd)504 static int generic_exec_single(int cpu, struct __call_single_data *csd)
505 {
506 if (cpu == smp_processor_id()) {
507 smp_call_func_t func = csd->func;
508 void *info = csd->info;
509 unsigned long flags;
510
511 /*
512 * We can unlock early even for the synchronous on-stack case,
513 * since we're doing this from the same CPU..
514 */
515 csd_lock_record(csd);
516 csd_unlock(csd);
517 local_irq_save(flags);
518 func(info);
519 csd_lock_record(NULL);
520 local_irq_restore(flags);
521 return 0;
522 }
523
524 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
525 csd_unlock(csd);
526 return -ENXIO;
527 }
528
529 __smp_call_single_queue(cpu, &csd->node.llist);
530
531 return 0;
532 }
533
534 /**
535 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
536 *
537 * Invoked by arch to handle an IPI for call function single.
538 * Must be called with interrupts disabled.
539 */
generic_smp_call_function_single_interrupt(void)540 void generic_smp_call_function_single_interrupt(void)
541 {
542 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
543 smp_processor_id(), CFD_SEQ_GOTIPI);
544 flush_smp_call_function_queue(true);
545 }
546
547 /**
548 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
549 *
550 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
551 * offline CPU. Skip this check if set to 'false'.
552 *
553 * Flush any pending smp-call-function callbacks queued on this CPU. This is
554 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
555 * to ensure that all pending IPI callbacks are run before it goes completely
556 * offline.
557 *
558 * Loop through the call_single_queue and run all the queued callbacks.
559 * Must be called with interrupts disabled.
560 */
flush_smp_call_function_queue(bool warn_cpu_offline)561 static void flush_smp_call_function_queue(bool warn_cpu_offline)
562 {
563 call_single_data_t *csd, *csd_next;
564 struct llist_node *entry, *prev;
565 struct llist_head *head;
566 static bool warned;
567
568 lockdep_assert_irqs_disabled();
569
570 head = this_cpu_ptr(&call_single_queue);
571 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
572 smp_processor_id(), CFD_SEQ_HANDLE);
573 entry = llist_del_all(head);
574 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
575 /* Special meaning of source cpu: 0 == queue empty */
576 entry ? CFD_SEQ_NOCPU : 0,
577 smp_processor_id(), CFD_SEQ_DEQUEUE);
578 entry = llist_reverse_order(entry);
579
580 /* There shouldn't be any pending callbacks on an offline CPU. */
581 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
582 !warned && entry != NULL)) {
583 warned = true;
584 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
585
586 /*
587 * We don't have to use the _safe() variant here
588 * because we are not invoking the IPI handlers yet.
589 */
590 llist_for_each_entry(csd, entry, node.llist) {
591 switch (CSD_TYPE(csd)) {
592 case CSD_TYPE_ASYNC:
593 case CSD_TYPE_SYNC:
594 case CSD_TYPE_IRQ_WORK:
595 pr_warn("IPI callback %pS sent to offline CPU\n",
596 csd->func);
597 break;
598
599 case CSD_TYPE_TTWU:
600 pr_warn("IPI task-wakeup sent to offline CPU\n");
601 break;
602
603 default:
604 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
605 CSD_TYPE(csd));
606 break;
607 }
608 }
609 }
610
611 /*
612 * First; run all SYNC callbacks, people are waiting for us.
613 */
614 prev = NULL;
615 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
616 /* Do we wait until *after* callback? */
617 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
618 smp_call_func_t func = csd->func;
619 void *info = csd->info;
620
621 if (prev) {
622 prev->next = &csd_next->node.llist;
623 } else {
624 entry = &csd_next->node.llist;
625 }
626
627 csd_lock_record(csd);
628 func(info);
629 csd_unlock(csd);
630 csd_lock_record(NULL);
631 } else {
632 prev = &csd->node.llist;
633 }
634 }
635
636 if (!entry) {
637 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
638 0, smp_processor_id(),
639 CFD_SEQ_HDLEND);
640 return;
641 }
642
643 /*
644 * Second; run all !SYNC callbacks.
645 */
646 prev = NULL;
647 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
648 int type = CSD_TYPE(csd);
649
650 if (type != CSD_TYPE_TTWU) {
651 if (prev) {
652 prev->next = &csd_next->node.llist;
653 } else {
654 entry = &csd_next->node.llist;
655 }
656
657 if (type == CSD_TYPE_ASYNC) {
658 smp_call_func_t func = csd->func;
659 void *info = csd->info;
660
661 csd_lock_record(csd);
662 csd_unlock(csd);
663 func(info);
664 csd_lock_record(NULL);
665 } else if (type == CSD_TYPE_IRQ_WORK) {
666 irq_work_single(csd);
667 }
668
669 } else {
670 prev = &csd->node.llist;
671 }
672 }
673
674 /*
675 * Third; only CSD_TYPE_TTWU is left, issue those.
676 */
677 if (entry)
678 sched_ttwu_pending(entry);
679
680 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
681 smp_processor_id(), CFD_SEQ_HDLEND);
682 }
683
flush_smp_call_function_from_idle(void)684 void flush_smp_call_function_from_idle(void)
685 {
686 unsigned long flags;
687
688 if (llist_empty(this_cpu_ptr(&call_single_queue)))
689 return;
690
691 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
692 smp_processor_id(), CFD_SEQ_IDLE);
693 local_irq_save(flags);
694 flush_smp_call_function_queue(true);
695 if (local_softirq_pending())
696 do_softirq();
697
698 local_irq_restore(flags);
699 }
700
701 /*
702 * smp_call_function_single - Run a function on a specific CPU
703 * @func: The function to run. This must be fast and non-blocking.
704 * @info: An arbitrary pointer to pass to the function.
705 * @wait: If true, wait until function has completed on other CPUs.
706 *
707 * Returns 0 on success, else a negative status code.
708 */
smp_call_function_single(int cpu,smp_call_func_t func,void * info,int wait)709 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
710 int wait)
711 {
712 call_single_data_t *csd;
713 call_single_data_t csd_stack = {
714 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
715 };
716 int this_cpu;
717 int err;
718
719 /*
720 * prevent preemption and reschedule on another processor,
721 * as well as CPU removal
722 */
723 this_cpu = get_cpu();
724
725 /*
726 * Can deadlock when called with interrupts disabled.
727 * We allow cpu's that are not yet online though, as no one else can
728 * send smp call function interrupt to this cpu and as such deadlocks
729 * can't happen.
730 */
731 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
732 && !oops_in_progress);
733
734 /*
735 * When @wait we can deadlock when we interrupt between llist_add() and
736 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
737 * csd_lock() on because the interrupt context uses the same csd
738 * storage.
739 */
740 WARN_ON_ONCE(!in_task());
741
742 csd = &csd_stack;
743 if (!wait) {
744 csd = this_cpu_ptr(&csd_data);
745 csd_lock(csd);
746 }
747
748 csd->func = func;
749 csd->info = info;
750 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
751 csd->node.src = smp_processor_id();
752 csd->node.dst = cpu;
753 #endif
754
755 err = generic_exec_single(cpu, csd);
756
757 if (wait)
758 csd_lock_wait(csd);
759
760 put_cpu();
761
762 return err;
763 }
764 EXPORT_SYMBOL(smp_call_function_single);
765
766 /**
767 * smp_call_function_single_async() - Run an asynchronous function on a
768 * specific CPU.
769 * @cpu: The CPU to run on.
770 * @csd: Pre-allocated and setup data structure
771 *
772 * Like smp_call_function_single(), but the call is asynchonous and
773 * can thus be done from contexts with disabled interrupts.
774 *
775 * The caller passes his own pre-allocated data structure
776 * (ie: embedded in an object) and is responsible for synchronizing it
777 * such that the IPIs performed on the @csd are strictly serialized.
778 *
779 * If the function is called with one csd which has not yet been
780 * processed by previous call to smp_call_function_single_async(), the
781 * function will return immediately with -EBUSY showing that the csd
782 * object is still in progress.
783 *
784 * NOTE: Be careful, there is unfortunately no current debugging facility to
785 * validate the correctness of this serialization.
786 *
787 * Return: %0 on success or negative errno value on error
788 */
smp_call_function_single_async(int cpu,struct __call_single_data * csd)789 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
790 {
791 int err = 0;
792
793 preempt_disable();
794
795 if (csd->node.u_flags & CSD_FLAG_LOCK) {
796 err = -EBUSY;
797 goto out;
798 }
799
800 csd->node.u_flags = CSD_FLAG_LOCK;
801 smp_wmb();
802
803 err = generic_exec_single(cpu, csd);
804
805 out:
806 preempt_enable();
807
808 return err;
809 }
810 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
811
812 /*
813 * smp_call_function_any - Run a function on any of the given cpus
814 * @mask: The mask of cpus it can run on.
815 * @func: The function to run. This must be fast and non-blocking.
816 * @info: An arbitrary pointer to pass to the function.
817 * @wait: If true, wait until function has completed.
818 *
819 * Returns 0 on success, else a negative status code (if no cpus were online).
820 *
821 * Selection preference:
822 * 1) current cpu if in @mask
823 * 2) any cpu of current node if in @mask
824 * 3) any other online cpu in @mask
825 */
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)826 int smp_call_function_any(const struct cpumask *mask,
827 smp_call_func_t func, void *info, int wait)
828 {
829 unsigned int cpu;
830 const struct cpumask *nodemask;
831 int ret;
832
833 /* Try for same CPU (cheapest) */
834 cpu = get_cpu();
835 if (cpumask_test_cpu(cpu, mask))
836 goto call;
837
838 /* Try for same node. */
839 nodemask = cpumask_of_node(cpu_to_node(cpu));
840 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
841 cpu = cpumask_next_and(cpu, nodemask, mask)) {
842 if (cpu_online(cpu))
843 goto call;
844 }
845
846 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
847 cpu = cpumask_any_and(mask, cpu_online_mask);
848 call:
849 ret = smp_call_function_single(cpu, func, info, wait);
850 put_cpu();
851 return ret;
852 }
853 EXPORT_SYMBOL_GPL(smp_call_function_any);
854
855 /*
856 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
857 *
858 * %SCF_WAIT: Wait until function execution is completed
859 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
860 */
861 #define SCF_WAIT (1U << 0)
862 #define SCF_RUN_LOCAL (1U << 1)
863
smp_call_function_many_cond(const struct cpumask * mask,smp_call_func_t func,void * info,unsigned int scf_flags,smp_cond_func_t cond_func)864 static void smp_call_function_many_cond(const struct cpumask *mask,
865 smp_call_func_t func, void *info,
866 unsigned int scf_flags,
867 smp_cond_func_t cond_func)
868 {
869 int cpu, last_cpu, this_cpu = smp_processor_id();
870 struct call_function_data *cfd;
871 bool wait = scf_flags & SCF_WAIT;
872 bool run_remote = false;
873 bool run_local = false;
874 int nr_cpus = 0;
875
876 lockdep_assert_preemption_disabled();
877
878 /*
879 * Can deadlock when called with interrupts disabled.
880 * We allow cpu's that are not yet online though, as no one else can
881 * send smp call function interrupt to this cpu and as such deadlocks
882 * can't happen.
883 */
884 if (cpu_online(this_cpu) && !oops_in_progress &&
885 !early_boot_irqs_disabled)
886 lockdep_assert_irqs_enabled();
887
888 /*
889 * When @wait we can deadlock when we interrupt between llist_add() and
890 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
891 * csd_lock() on because the interrupt context uses the same csd
892 * storage.
893 */
894 WARN_ON_ONCE(!in_task());
895
896 /* Check if we need local execution. */
897 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
898 run_local = true;
899
900 /* Check if we need remote execution, i.e., any CPU excluding this one. */
901 cpu = cpumask_first_and(mask, cpu_online_mask);
902 if (cpu == this_cpu)
903 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
904 if (cpu < nr_cpu_ids)
905 run_remote = true;
906
907 if (run_remote) {
908 cfd = this_cpu_ptr(&cfd_data);
909 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
910 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
911
912 cpumask_clear(cfd->cpumask_ipi);
913 for_each_cpu(cpu, cfd->cpumask) {
914 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
915 call_single_data_t *csd = &pcpu->csd;
916
917 if (cond_func && !cond_func(cpu, info))
918 continue;
919
920 csd_lock(csd);
921 if (wait)
922 csd->node.u_flags |= CSD_TYPE_SYNC;
923 csd->func = func;
924 csd->info = info;
925 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
926 csd->node.src = smp_processor_id();
927 csd->node.dst = cpu;
928 #endif
929 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
930 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
931 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
932 nr_cpus++;
933 last_cpu = cpu;
934
935 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
936 } else {
937 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
938 }
939 }
940
941 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
942
943 /*
944 * Choose the most efficient way to send an IPI. Note that the
945 * number of CPUs might be zero due to concurrent changes to the
946 * provided mask.
947 */
948 if (nr_cpus == 1)
949 send_call_function_single_ipi(last_cpu);
950 else if (likely(nr_cpus > 1))
951 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
952
953 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
954 }
955
956 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
957 unsigned long flags;
958
959 local_irq_save(flags);
960 func(info);
961 local_irq_restore(flags);
962 }
963
964 if (run_remote && wait) {
965 for_each_cpu(cpu, cfd->cpumask) {
966 call_single_data_t *csd;
967
968 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
969 csd_lock_wait(csd);
970 }
971 }
972 }
973
974 /**
975 * smp_call_function_many(): Run a function on a set of CPUs.
976 * @mask: The set of cpus to run on (only runs on online subset).
977 * @func: The function to run. This must be fast and non-blocking.
978 * @info: An arbitrary pointer to pass to the function.
979 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
980 * (atomically) until function has completed on other CPUs. If
981 * %SCF_RUN_LOCAL is set, the function will also be run locally
982 * if the local CPU is set in the @cpumask.
983 *
984 * If @wait is true, then returns once @func has returned.
985 *
986 * You must not call this function with disabled interrupts or from a
987 * hardware interrupt handler or from a bottom half handler. Preemption
988 * must be disabled when calling this function.
989 */
smp_call_function_many(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)990 void smp_call_function_many(const struct cpumask *mask,
991 smp_call_func_t func, void *info, bool wait)
992 {
993 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
994 }
995 EXPORT_SYMBOL(smp_call_function_many);
996
997 /**
998 * smp_call_function(): Run a function on all other CPUs.
999 * @func: The function to run. This must be fast and non-blocking.
1000 * @info: An arbitrary pointer to pass to the function.
1001 * @wait: If true, wait (atomically) until function has completed
1002 * on other CPUs.
1003 *
1004 * Returns 0.
1005 *
1006 * If @wait is true, then returns once @func has returned; otherwise
1007 * it returns just before the target cpu calls @func.
1008 *
1009 * You must not call this function with disabled interrupts or from a
1010 * hardware interrupt handler or from a bottom half handler.
1011 */
smp_call_function(smp_call_func_t func,void * info,int wait)1012 void smp_call_function(smp_call_func_t func, void *info, int wait)
1013 {
1014 preempt_disable();
1015 smp_call_function_many(cpu_online_mask, func, info, wait);
1016 preempt_enable();
1017 }
1018 EXPORT_SYMBOL(smp_call_function);
1019
1020 /* Setup configured maximum number of CPUs to activate */
1021 unsigned int setup_max_cpus = NR_CPUS;
1022 EXPORT_SYMBOL(setup_max_cpus);
1023
1024
1025 /*
1026 * Setup routine for controlling SMP activation
1027 *
1028 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
1029 * activation entirely (the MPS table probe still happens, though).
1030 *
1031 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1032 * greater than 0, limits the maximum number of CPUs activated in
1033 * SMP mode to <NUM>.
1034 */
1035
arch_disable_smp_support(void)1036 void __weak arch_disable_smp_support(void) { }
1037
nosmp(char * str)1038 static int __init nosmp(char *str)
1039 {
1040 setup_max_cpus = 0;
1041 arch_disable_smp_support();
1042
1043 return 0;
1044 }
1045
1046 early_param("nosmp", nosmp);
1047
1048 /* this is hard limit */
nrcpus(char * str)1049 static int __init nrcpus(char *str)
1050 {
1051 int nr_cpus;
1052
1053 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
1054 nr_cpu_ids = nr_cpus;
1055
1056 return 0;
1057 }
1058
1059 early_param("nr_cpus", nrcpus);
1060
maxcpus(char * str)1061 static int __init maxcpus(char *str)
1062 {
1063 get_option(&str, &setup_max_cpus);
1064 if (setup_max_cpus == 0)
1065 arch_disable_smp_support();
1066
1067 return 0;
1068 }
1069
1070 early_param("maxcpus", maxcpus);
1071
1072 /* Setup number of possible processor ids */
1073 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
1074 EXPORT_SYMBOL(nr_cpu_ids);
1075
1076 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
setup_nr_cpu_ids(void)1077 void __init setup_nr_cpu_ids(void)
1078 {
1079 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
1080 }
1081
1082 /* Called by boot processor to activate the rest. */
smp_init(void)1083 void __init smp_init(void)
1084 {
1085 int num_nodes, num_cpus;
1086
1087 idle_threads_init();
1088 cpuhp_threads_init();
1089
1090 pr_info("Bringing up secondary CPUs ...\n");
1091
1092 bringup_nonboot_cpus(setup_max_cpus);
1093
1094 num_nodes = num_online_nodes();
1095 num_cpus = num_online_cpus();
1096 pr_info("Brought up %d node%s, %d CPU%s\n",
1097 num_nodes, (num_nodes > 1 ? "s" : ""),
1098 num_cpus, (num_cpus > 1 ? "s" : ""));
1099
1100 /* Any cleanup work */
1101 smp_cpus_done(setup_max_cpus);
1102 }
1103
1104 /*
1105 * on_each_cpu_cond(): Call a function on each processor for which
1106 * the supplied function cond_func returns true, optionally waiting
1107 * for all the required CPUs to finish. This may include the local
1108 * processor.
1109 * @cond_func: A callback function that is passed a cpu id and
1110 * the info parameter. The function is called
1111 * with preemption disabled. The function should
1112 * return a blooean value indicating whether to IPI
1113 * the specified CPU.
1114 * @func: The function to run on all applicable CPUs.
1115 * This must be fast and non-blocking.
1116 * @info: An arbitrary pointer to pass to both functions.
1117 * @wait: If true, wait (atomically) until function has
1118 * completed on other CPUs.
1119 *
1120 * Preemption is disabled to protect against CPUs going offline but not online.
1121 * CPUs going online during the call will not be seen or sent an IPI.
1122 *
1123 * You must not call this function with disabled interrupts or
1124 * from a hardware interrupt handler or from a bottom half handler.
1125 */
on_each_cpu_cond_mask(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait,const struct cpumask * mask)1126 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1127 void *info, bool wait, const struct cpumask *mask)
1128 {
1129 unsigned int scf_flags = SCF_RUN_LOCAL;
1130
1131 if (wait)
1132 scf_flags |= SCF_WAIT;
1133
1134 preempt_disable();
1135 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1136 preempt_enable();
1137 }
1138 EXPORT_SYMBOL(on_each_cpu_cond_mask);
1139
do_nothing(void * unused)1140 static void do_nothing(void *unused)
1141 {
1142 }
1143
1144 /**
1145 * kick_all_cpus_sync - Force all cpus out of idle
1146 *
1147 * Used to synchronize the update of pm_idle function pointer. It's
1148 * called after the pointer is updated and returns after the dummy
1149 * callback function has been executed on all cpus. The execution of
1150 * the function can only happen on the remote cpus after they have
1151 * left the idle function which had been called via pm_idle function
1152 * pointer. So it's guaranteed that nothing uses the previous pointer
1153 * anymore.
1154 */
kick_all_cpus_sync(void)1155 void kick_all_cpus_sync(void)
1156 {
1157 /* Make sure the change is visible before we kick the cpus */
1158 smp_mb();
1159 smp_call_function(do_nothing, NULL, 1);
1160 }
1161 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1162
1163 /**
1164 * wake_up_all_idle_cpus - break all cpus out of idle
1165 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1166 * including idle polling cpus, for non-idle cpus, we will do nothing
1167 * for them.
1168 */
wake_up_all_idle_cpus(void)1169 void wake_up_all_idle_cpus(void)
1170 {
1171 int cpu;
1172
1173 preempt_disable();
1174 for_each_online_cpu(cpu) {
1175 if (cpu == smp_processor_id())
1176 continue;
1177
1178 wake_up_if_idle(cpu);
1179 }
1180 preempt_enable();
1181 }
1182 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1183
1184 /**
1185 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1186 * @work: &work_struct
1187 * @done: &completion to signal
1188 * @func: function to call
1189 * @data: function's data argument
1190 * @ret: return value from @func
1191 * @cpu: target CPU (%-1 for any CPU)
1192 *
1193 * Used to call a function on a specific cpu and wait for it to return.
1194 * Optionally make sure the call is done on a specified physical cpu via vcpu
1195 * pinning in order to support virtualized environments.
1196 */
1197 struct smp_call_on_cpu_struct {
1198 struct work_struct work;
1199 struct completion done;
1200 int (*func)(void *);
1201 void *data;
1202 int ret;
1203 int cpu;
1204 };
1205
smp_call_on_cpu_callback(struct work_struct * work)1206 static void smp_call_on_cpu_callback(struct work_struct *work)
1207 {
1208 struct smp_call_on_cpu_struct *sscs;
1209
1210 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1211 if (sscs->cpu >= 0)
1212 hypervisor_pin_vcpu(sscs->cpu);
1213 sscs->ret = sscs->func(sscs->data);
1214 if (sscs->cpu >= 0)
1215 hypervisor_pin_vcpu(-1);
1216
1217 complete(&sscs->done);
1218 }
1219
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)1220 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1221 {
1222 struct smp_call_on_cpu_struct sscs = {
1223 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1224 .func = func,
1225 .data = par,
1226 .cpu = phys ? cpu : -1,
1227 };
1228
1229 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1230
1231 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1232 return -ENXIO;
1233
1234 queue_work_on(cpu, system_wq, &sscs.work);
1235 wait_for_completion(&sscs.done);
1236
1237 return sscs.ret;
1238 }
1239 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1240