1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/smt.h>
14 #include <linux/unistd.h>
15 #include <linux/cpu.h>
16 #include <linux/oom.h>
17 #include <linux/rcupdate.h>
18 #include <linux/export.h>
19 #include <linux/bug.h>
20 #include <linux/kthread.h>
21 #include <linux/stop_machine.h>
22 #include <linux/mutex.h>
23 #include <linux/gfp.h>
24 #include <linux/suspend.h>
25 #include <linux/lockdep.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 #include <linux/nmi.h>
29 #include <linux/smpboot.h>
30 #include <linux/relay.h>
31 #include <linux/slab.h>
32 #include <linux/percpu-rwsem.h>
33
34 #include <trace/events/power.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpuhp.h>
37
38 #include "smpboot.h"
39
40 /**
41 * cpuhp_cpu_state - Per cpu hotplug state storage
42 * @state: The current cpu state
43 * @target: The target state
44 * @thread: Pointer to the hotplug thread
45 * @should_run: Thread should execute
46 * @rollback: Perform a rollback
47 * @single: Single callback invocation
48 * @bringup: Single callback bringup or teardown selector
49 * @cb_state: The state for a single callback (install/uninstall)
50 * @result: Result of the operation
51 * @done_up: Signal completion to the issuer of the task for cpu-up
52 * @done_down: Signal completion to the issuer of the task for cpu-down
53 */
54 struct cpuhp_cpu_state {
55 enum cpuhp_state state;
56 enum cpuhp_state target;
57 enum cpuhp_state fail;
58 #ifdef CONFIG_SMP
59 struct task_struct *thread;
60 bool should_run;
61 bool rollback;
62 bool single;
63 bool bringup;
64 bool booted_once;
65 struct hlist_node *node;
66 struct hlist_node *last;
67 enum cpuhp_state cb_state;
68 int result;
69 struct completion done_up;
70 struct completion done_down;
71 #endif
72 };
73
74 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
75 .fail = CPUHP_INVALID,
76 };
77
78 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
79 static struct lockdep_map cpuhp_state_up_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
81 static struct lockdep_map cpuhp_state_down_map =
82 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
83
84
cpuhp_lock_acquire(bool bringup)85 static void inline cpuhp_lock_acquire(bool bringup)
86 {
87 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
88 }
89
cpuhp_lock_release(bool bringup)90 static void inline cpuhp_lock_release(bool bringup)
91 {
92 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
93 }
94 #else
95
cpuhp_lock_acquire(bool bringup)96 static void inline cpuhp_lock_acquire(bool bringup) { }
cpuhp_lock_release(bool bringup)97 static void inline cpuhp_lock_release(bool bringup) { }
98
99 #endif
100
101 /**
102 * cpuhp_step - Hotplug state machine step
103 * @name: Name of the step
104 * @startup: Startup function of the step
105 * @teardown: Teardown function of the step
106 * @skip_onerr: Do not invoke the functions on error rollback
107 * Will go away once the notifiers are gone
108 * @cant_stop: Bringup/teardown can't be stopped at this step
109 */
110 struct cpuhp_step {
111 const char *name;
112 union {
113 int (*single)(unsigned int cpu);
114 int (*multi)(unsigned int cpu,
115 struct hlist_node *node);
116 } startup;
117 union {
118 int (*single)(unsigned int cpu);
119 int (*multi)(unsigned int cpu,
120 struct hlist_node *node);
121 } teardown;
122 struct hlist_head list;
123 bool skip_onerr;
124 bool cant_stop;
125 bool multi_instance;
126 };
127
128 static DEFINE_MUTEX(cpuhp_state_mutex);
129 static struct cpuhp_step cpuhp_bp_states[];
130 static struct cpuhp_step cpuhp_ap_states[];
131
cpuhp_is_ap_state(enum cpuhp_state state)132 static bool cpuhp_is_ap_state(enum cpuhp_state state)
133 {
134 /*
135 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
136 * purposes as that state is handled explicitly in cpu_down.
137 */
138 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
139 }
140
cpuhp_get_step(enum cpuhp_state state)141 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
142 {
143 struct cpuhp_step *sp;
144
145 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
146 return sp + state;
147 }
148
149 /**
150 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
151 * @cpu: The cpu for which the callback should be invoked
152 * @state: The state to do callbacks for
153 * @bringup: True if the bringup callback should be invoked
154 * @node: For multi-instance, do a single entry callback for install/remove
155 * @lastp: For multi-instance rollback, remember how far we got
156 *
157 * Called from cpu hotplug and from the state register machinery.
158 */
cpuhp_invoke_callback(unsigned int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node,struct hlist_node ** lastp)159 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
160 bool bringup, struct hlist_node *node,
161 struct hlist_node **lastp)
162 {
163 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
164 struct cpuhp_step *step = cpuhp_get_step(state);
165 int (*cbm)(unsigned int cpu, struct hlist_node *node);
166 int (*cb)(unsigned int cpu);
167 int ret, cnt;
168
169 if (st->fail == state) {
170 st->fail = CPUHP_INVALID;
171
172 if (!(bringup ? step->startup.single : step->teardown.single))
173 return 0;
174
175 return -EAGAIN;
176 }
177
178 if (!step->multi_instance) {
179 WARN_ON_ONCE(lastp && *lastp);
180 cb = bringup ? step->startup.single : step->teardown.single;
181 if (!cb)
182 return 0;
183 trace_cpuhp_enter(cpu, st->target, state, cb);
184 ret = cb(cpu);
185 trace_cpuhp_exit(cpu, st->state, state, ret);
186 return ret;
187 }
188 cbm = bringup ? step->startup.multi : step->teardown.multi;
189 if (!cbm)
190 return 0;
191
192 /* Single invocation for instance add/remove */
193 if (node) {
194 WARN_ON_ONCE(lastp && *lastp);
195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
196 ret = cbm(cpu, node);
197 trace_cpuhp_exit(cpu, st->state, state, ret);
198 return ret;
199 }
200
201 /* State transition. Invoke on all instances */
202 cnt = 0;
203 hlist_for_each(node, &step->list) {
204 if (lastp && node == *lastp)
205 break;
206
207 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
208 ret = cbm(cpu, node);
209 trace_cpuhp_exit(cpu, st->state, state, ret);
210 if (ret) {
211 if (!lastp)
212 goto err;
213
214 *lastp = node;
215 return ret;
216 }
217 cnt++;
218 }
219 if (lastp)
220 *lastp = NULL;
221 return 0;
222 err:
223 /* Rollback the instances if one failed */
224 cbm = !bringup ? step->startup.multi : step->teardown.multi;
225 if (!cbm)
226 return ret;
227
228 hlist_for_each(node, &step->list) {
229 if (!cnt--)
230 break;
231
232 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
233 ret = cbm(cpu, node);
234 trace_cpuhp_exit(cpu, st->state, state, ret);
235 /*
236 * Rollback must not fail,
237 */
238 WARN_ON_ONCE(ret);
239 }
240 return ret;
241 }
242
243 #ifdef CONFIG_SMP
wait_for_ap_thread(struct cpuhp_cpu_state * st,bool bringup)244 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
245 {
246 struct completion *done = bringup ? &st->done_up : &st->done_down;
247 wait_for_completion(done);
248 }
249
complete_ap_thread(struct cpuhp_cpu_state * st,bool bringup)250 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
251 {
252 struct completion *done = bringup ? &st->done_up : &st->done_down;
253 complete(done);
254 }
255
256 /*
257 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
258 */
cpuhp_is_atomic_state(enum cpuhp_state state)259 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
260 {
261 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
262 }
263
264 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
265 static DEFINE_MUTEX(cpu_add_remove_lock);
266 bool cpuhp_tasks_frozen;
267 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
268
269 /*
270 * The following two APIs (cpu_maps_update_begin/done) must be used when
271 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
272 */
cpu_maps_update_begin(void)273 void cpu_maps_update_begin(void)
274 {
275 mutex_lock(&cpu_add_remove_lock);
276 }
277
cpu_maps_update_done(void)278 void cpu_maps_update_done(void)
279 {
280 mutex_unlock(&cpu_add_remove_lock);
281 }
282
283 /*
284 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
285 * Should always be manipulated under cpu_add_remove_lock
286 */
287 static int cpu_hotplug_disabled;
288
289 #ifdef CONFIG_HOTPLUG_CPU
290
291 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
292
cpus_read_lock(void)293 void cpus_read_lock(void)
294 {
295 percpu_down_read(&cpu_hotplug_lock);
296 }
297 EXPORT_SYMBOL_GPL(cpus_read_lock);
298
cpus_read_unlock(void)299 void cpus_read_unlock(void)
300 {
301 percpu_up_read(&cpu_hotplug_lock);
302 }
303 EXPORT_SYMBOL_GPL(cpus_read_unlock);
304
cpus_write_lock(void)305 void cpus_write_lock(void)
306 {
307 percpu_down_write(&cpu_hotplug_lock);
308 }
309
cpus_write_unlock(void)310 void cpus_write_unlock(void)
311 {
312 percpu_up_write(&cpu_hotplug_lock);
313 }
314
lockdep_assert_cpus_held(void)315 void lockdep_assert_cpus_held(void)
316 {
317 /*
318 * We can't have hotplug operations before userspace starts running,
319 * and some init codepaths will knowingly not take the hotplug lock.
320 * This is all valid, so mute lockdep until it makes sense to report
321 * unheld locks.
322 */
323 if (system_state < SYSTEM_RUNNING)
324 return;
325
326 percpu_rwsem_assert_held(&cpu_hotplug_lock);
327 }
328
329 /*
330 * Wait for currently running CPU hotplug operations to complete (if any) and
331 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333 * hotplug path before performing hotplug operations. So acquiring that lock
334 * guarantees mutual exclusion from any currently running hotplug operations.
335 */
cpu_hotplug_disable(void)336 void cpu_hotplug_disable(void)
337 {
338 cpu_maps_update_begin();
339 cpu_hotplug_disabled++;
340 cpu_maps_update_done();
341 }
342 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
343
__cpu_hotplug_enable(void)344 static void __cpu_hotplug_enable(void)
345 {
346 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 return;
348 cpu_hotplug_disabled--;
349 }
350
cpu_hotplug_enable(void)351 void cpu_hotplug_enable(void)
352 {
353 cpu_maps_update_begin();
354 __cpu_hotplug_enable();
355 cpu_maps_update_done();
356 }
357 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
358 #endif /* CONFIG_HOTPLUG_CPU */
359
360 /*
361 * Architectures that need SMT-specific errata handling during SMT hotplug
362 * should override this.
363 */
arch_smt_update(void)364 void __weak arch_smt_update(void) { }
365
366 #ifdef CONFIG_HOTPLUG_SMT
367 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
368
cpu_smt_disable(bool force)369 void __init cpu_smt_disable(bool force)
370 {
371 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
372 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
373 return;
374
375 if (force) {
376 pr_info("SMT: Force disabled\n");
377 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
378 } else {
379 pr_info("SMT: disabled\n");
380 cpu_smt_control = CPU_SMT_DISABLED;
381 }
382 }
383
384 /*
385 * The decision whether SMT is supported can only be done after the full
386 * CPU identification. Called from architecture code.
387 */
cpu_smt_check_topology(void)388 void __init cpu_smt_check_topology(void)
389 {
390 if (!topology_smt_supported())
391 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
392 }
393
smt_cmdline_disable(char * str)394 static int __init smt_cmdline_disable(char *str)
395 {
396 cpu_smt_disable(str && !strcmp(str, "force"));
397 return 0;
398 }
399 early_param("nosmt", smt_cmdline_disable);
400
cpu_smt_allowed(unsigned int cpu)401 static inline bool cpu_smt_allowed(unsigned int cpu)
402 {
403 if (cpu_smt_control == CPU_SMT_ENABLED)
404 return true;
405
406 if (topology_is_primary_thread(cpu))
407 return true;
408
409 /*
410 * On x86 it's required to boot all logical CPUs at least once so
411 * that the init code can get a chance to set CR4.MCE on each
412 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
413 * core will shutdown the machine.
414 */
415 return !per_cpu(cpuhp_state, cpu).booted_once;
416 }
417 #else
cpu_smt_allowed(unsigned int cpu)418 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
419 #endif
420
421 static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state * st,enum cpuhp_state target)422 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
423 {
424 enum cpuhp_state prev_state = st->state;
425
426 st->rollback = false;
427 st->last = NULL;
428
429 st->target = target;
430 st->single = false;
431 st->bringup = st->state < target;
432
433 return prev_state;
434 }
435
436 static inline void
cpuhp_reset_state(struct cpuhp_cpu_state * st,enum cpuhp_state prev_state)437 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
438 {
439 st->rollback = true;
440
441 /*
442 * If we have st->last we need to undo partial multi_instance of this
443 * state first. Otherwise start undo at the previous state.
444 */
445 if (!st->last) {
446 if (st->bringup)
447 st->state--;
448 else
449 st->state++;
450 }
451
452 st->target = prev_state;
453 st->bringup = !st->bringup;
454 }
455
456 /* Regular hotplug invocation of the AP hotplug thread */
__cpuhp_kick_ap(struct cpuhp_cpu_state * st)457 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
458 {
459 if (!st->single && st->state == st->target)
460 return;
461
462 st->result = 0;
463 /*
464 * Make sure the above stores are visible before should_run becomes
465 * true. Paired with the mb() above in cpuhp_thread_fun()
466 */
467 smp_mb();
468 st->should_run = true;
469 wake_up_process(st->thread);
470 wait_for_ap_thread(st, st->bringup);
471 }
472
cpuhp_kick_ap(struct cpuhp_cpu_state * st,enum cpuhp_state target)473 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
474 {
475 enum cpuhp_state prev_state;
476 int ret;
477
478 prev_state = cpuhp_set_state(st, target);
479 __cpuhp_kick_ap(st);
480 if ((ret = st->result)) {
481 cpuhp_reset_state(st, prev_state);
482 __cpuhp_kick_ap(st);
483 }
484
485 return ret;
486 }
487
bringup_wait_for_ap(unsigned int cpu)488 static int bringup_wait_for_ap(unsigned int cpu)
489 {
490 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
491
492 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
493 wait_for_ap_thread(st, true);
494 if (WARN_ON_ONCE((!cpu_online(cpu))))
495 return -ECANCELED;
496
497 /* Unpark the hotplug thread of the target cpu */
498 kthread_unpark(st->thread);
499
500 /*
501 * SMT soft disabling on X86 requires to bring the CPU out of the
502 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
503 * CPU marked itself as booted_once in cpu_notify_starting() so the
504 * cpu_smt_allowed() check will now return false if this is not the
505 * primary sibling.
506 */
507 if (!cpu_smt_allowed(cpu))
508 return -ECANCELED;
509
510 if (st->target <= CPUHP_AP_ONLINE_IDLE)
511 return 0;
512
513 return cpuhp_kick_ap(st, st->target);
514 }
515
bringup_cpu(unsigned int cpu)516 static int bringup_cpu(unsigned int cpu)
517 {
518 struct task_struct *idle = idle_thread_get(cpu);
519 int ret;
520
521 /*
522 * Some architectures have to walk the irq descriptors to
523 * setup the vector space for the cpu which comes online.
524 * Prevent irq alloc/free across the bringup.
525 */
526 irq_lock_sparse();
527
528 /* Arch-specific enabling code. */
529 ret = __cpu_up(cpu, idle);
530 irq_unlock_sparse();
531 if (ret)
532 return ret;
533 return bringup_wait_for_ap(cpu);
534 }
535
536 /*
537 * Hotplug state machine related functions
538 */
539
undo_cpu_up(unsigned int cpu,struct cpuhp_cpu_state * st)540 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
541 {
542 for (st->state--; st->state > st->target; st->state--) {
543 struct cpuhp_step *step = cpuhp_get_step(st->state);
544
545 if (!step->skip_onerr)
546 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
547 }
548 }
549
can_rollback_cpu(struct cpuhp_cpu_state * st)550 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
551 {
552 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
553 return true;
554 /*
555 * When CPU hotplug is disabled, then taking the CPU down is not
556 * possible because takedown_cpu() and the architecture and
557 * subsystem specific mechanisms are not available. So the CPU
558 * which would be completely unplugged again needs to stay around
559 * in the current state.
560 */
561 return st->state <= CPUHP_BRINGUP_CPU;
562 }
563
cpuhp_up_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)564 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
565 enum cpuhp_state target)
566 {
567 enum cpuhp_state prev_state = st->state;
568 int ret = 0;
569
570 while (st->state < target) {
571 st->state++;
572 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
573 if (ret) {
574 if (can_rollback_cpu(st)) {
575 st->target = prev_state;
576 undo_cpu_up(cpu, st);
577 }
578 break;
579 }
580 }
581 return ret;
582 }
583
584 /*
585 * The cpu hotplug threads manage the bringup and teardown of the cpus
586 */
cpuhp_create(unsigned int cpu)587 static void cpuhp_create(unsigned int cpu)
588 {
589 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
590
591 init_completion(&st->done_up);
592 init_completion(&st->done_down);
593 }
594
cpuhp_should_run(unsigned int cpu)595 static int cpuhp_should_run(unsigned int cpu)
596 {
597 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
598
599 return st->should_run;
600 }
601
602 /*
603 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
604 * callbacks when a state gets [un]installed at runtime.
605 *
606 * Each invocation of this function by the smpboot thread does a single AP
607 * state callback.
608 *
609 * It has 3 modes of operation:
610 * - single: runs st->cb_state
611 * - up: runs ++st->state, while st->state < st->target
612 * - down: runs st->state--, while st->state > st->target
613 *
614 * When complete or on error, should_run is cleared and the completion is fired.
615 */
cpuhp_thread_fun(unsigned int cpu)616 static void cpuhp_thread_fun(unsigned int cpu)
617 {
618 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
619 bool bringup = st->bringup;
620 enum cpuhp_state state;
621
622 if (WARN_ON_ONCE(!st->should_run))
623 return;
624
625 /*
626 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
627 * that if we see ->should_run we also see the rest of the state.
628 */
629 smp_mb();
630
631 cpuhp_lock_acquire(bringup);
632
633 if (st->single) {
634 state = st->cb_state;
635 st->should_run = false;
636 } else {
637 if (bringup) {
638 st->state++;
639 state = st->state;
640 st->should_run = (st->state < st->target);
641 WARN_ON_ONCE(st->state > st->target);
642 } else {
643 state = st->state;
644 st->state--;
645 st->should_run = (st->state > st->target);
646 WARN_ON_ONCE(st->state < st->target);
647 }
648 }
649
650 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
651
652 if (st->rollback) {
653 struct cpuhp_step *step = cpuhp_get_step(state);
654 if (step->skip_onerr)
655 goto next;
656 }
657
658 if (cpuhp_is_atomic_state(state)) {
659 local_irq_disable();
660 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
661 local_irq_enable();
662
663 /*
664 * STARTING/DYING must not fail!
665 */
666 WARN_ON_ONCE(st->result);
667 } else {
668 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
669 }
670
671 if (st->result) {
672 /*
673 * If we fail on a rollback, we're up a creek without no
674 * paddle, no way forward, no way back. We loose, thanks for
675 * playing.
676 */
677 WARN_ON_ONCE(st->rollback);
678 st->should_run = false;
679 }
680
681 next:
682 cpuhp_lock_release(bringup);
683
684 if (!st->should_run)
685 complete_ap_thread(st, bringup);
686 }
687
688 /* Invoke a single callback on a remote cpu */
689 static int
cpuhp_invoke_ap_callback(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)690 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
691 struct hlist_node *node)
692 {
693 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
694 int ret;
695
696 if (!cpu_online(cpu))
697 return 0;
698
699 cpuhp_lock_acquire(false);
700 cpuhp_lock_release(false);
701
702 cpuhp_lock_acquire(true);
703 cpuhp_lock_release(true);
704
705 /*
706 * If we are up and running, use the hotplug thread. For early calls
707 * we invoke the thread function directly.
708 */
709 if (!st->thread)
710 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
711
712 st->rollback = false;
713 st->last = NULL;
714
715 st->node = node;
716 st->bringup = bringup;
717 st->cb_state = state;
718 st->single = true;
719
720 __cpuhp_kick_ap(st);
721
722 /*
723 * If we failed and did a partial, do a rollback.
724 */
725 if ((ret = st->result) && st->last) {
726 st->rollback = true;
727 st->bringup = !bringup;
728
729 __cpuhp_kick_ap(st);
730 }
731
732 /*
733 * Clean up the leftovers so the next hotplug operation wont use stale
734 * data.
735 */
736 st->node = st->last = NULL;
737 return ret;
738 }
739
cpuhp_kick_ap_work(unsigned int cpu)740 static int cpuhp_kick_ap_work(unsigned int cpu)
741 {
742 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
743 enum cpuhp_state prev_state = st->state;
744 int ret;
745
746 cpuhp_lock_acquire(false);
747 cpuhp_lock_release(false);
748
749 cpuhp_lock_acquire(true);
750 cpuhp_lock_release(true);
751
752 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
753 ret = cpuhp_kick_ap(st, st->target);
754 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
755
756 return ret;
757 }
758
759 static struct smp_hotplug_thread cpuhp_threads = {
760 .store = &cpuhp_state.thread,
761 .create = &cpuhp_create,
762 .thread_should_run = cpuhp_should_run,
763 .thread_fn = cpuhp_thread_fun,
764 .thread_comm = "cpuhp/%u",
765 .selfparking = true,
766 };
767
cpuhp_threads_init(void)768 void __init cpuhp_threads_init(void)
769 {
770 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
771 kthread_unpark(this_cpu_read(cpuhp_state.thread));
772 }
773
774 #ifdef CONFIG_HOTPLUG_CPU
775 /**
776 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
777 * @cpu: a CPU id
778 *
779 * This function walks all processes, finds a valid mm struct for each one and
780 * then clears a corresponding bit in mm's cpumask. While this all sounds
781 * trivial, there are various non-obvious corner cases, which this function
782 * tries to solve in a safe manner.
783 *
784 * Also note that the function uses a somewhat relaxed locking scheme, so it may
785 * be called only for an already offlined CPU.
786 */
clear_tasks_mm_cpumask(int cpu)787 void clear_tasks_mm_cpumask(int cpu)
788 {
789 struct task_struct *p;
790
791 /*
792 * This function is called after the cpu is taken down and marked
793 * offline, so its not like new tasks will ever get this cpu set in
794 * their mm mask. -- Peter Zijlstra
795 * Thus, we may use rcu_read_lock() here, instead of grabbing
796 * full-fledged tasklist_lock.
797 */
798 WARN_ON(cpu_online(cpu));
799 rcu_read_lock();
800 for_each_process(p) {
801 struct task_struct *t;
802
803 /*
804 * Main thread might exit, but other threads may still have
805 * a valid mm. Find one.
806 */
807 t = find_lock_task_mm(p);
808 if (!t)
809 continue;
810 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
811 task_unlock(t);
812 }
813 rcu_read_unlock();
814 }
815
816 /* Take this CPU down. */
take_cpu_down(void * _param)817 static int take_cpu_down(void *_param)
818 {
819 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
820 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
821 int err, cpu = smp_processor_id();
822 int ret;
823
824 /* Ensure this CPU doesn't handle any more interrupts. */
825 err = __cpu_disable();
826 if (err < 0)
827 return err;
828
829 /*
830 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
831 * do this step again.
832 */
833 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
834 st->state--;
835 /* Invoke the former CPU_DYING callbacks */
836 for (; st->state > target; st->state--) {
837 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
838 /*
839 * DYING must not fail!
840 */
841 WARN_ON_ONCE(ret);
842 }
843
844 /* Give up timekeeping duties */
845 tick_handover_do_timer();
846 /* Park the stopper thread */
847 stop_machine_park(cpu);
848 return 0;
849 }
850
takedown_cpu(unsigned int cpu)851 static int takedown_cpu(unsigned int cpu)
852 {
853 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
854 int err;
855
856 /* Park the smpboot threads */
857 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
858
859 /*
860 * Prevent irq alloc/free while the dying cpu reorganizes the
861 * interrupt affinities.
862 */
863 irq_lock_sparse();
864
865 /*
866 * So now all preempt/rcu users must observe !cpu_active().
867 */
868 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
869 if (err) {
870 /* CPU refused to die */
871 irq_unlock_sparse();
872 /* Unpark the hotplug thread so we can rollback there */
873 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
874 return err;
875 }
876 BUG_ON(cpu_online(cpu));
877
878 /*
879 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
880 * runnable tasks from the cpu, there's only the idle task left now
881 * that the migration thread is done doing the stop_machine thing.
882 *
883 * Wait for the stop thread to go away.
884 */
885 wait_for_ap_thread(st, false);
886 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
887
888 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
889 irq_unlock_sparse();
890
891 hotplug_cpu__broadcast_tick_pull(cpu);
892 /* This actually kills the CPU. */
893 __cpu_die(cpu);
894
895 tick_cleanup_dead_cpu(cpu);
896 rcutree_migrate_callbacks(cpu);
897 return 0;
898 }
899
cpuhp_complete_idle_dead(void * arg)900 static void cpuhp_complete_idle_dead(void *arg)
901 {
902 struct cpuhp_cpu_state *st = arg;
903
904 complete_ap_thread(st, false);
905 }
906
cpuhp_report_idle_dead(void)907 void cpuhp_report_idle_dead(void)
908 {
909 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
910
911 BUG_ON(st->state != CPUHP_AP_OFFLINE);
912 rcu_report_dead(smp_processor_id());
913 st->state = CPUHP_AP_IDLE_DEAD;
914 /*
915 * We cannot call complete after rcu_report_dead() so we delegate it
916 * to an online cpu.
917 */
918 smp_call_function_single(cpumask_first(cpu_online_mask),
919 cpuhp_complete_idle_dead, st, 0);
920 }
921
undo_cpu_down(unsigned int cpu,struct cpuhp_cpu_state * st)922 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
923 {
924 for (st->state++; st->state < st->target; st->state++) {
925 struct cpuhp_step *step = cpuhp_get_step(st->state);
926
927 if (!step->skip_onerr)
928 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
929 }
930 }
931
cpuhp_down_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)932 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
933 enum cpuhp_state target)
934 {
935 enum cpuhp_state prev_state = st->state;
936 int ret = 0;
937
938 for (; st->state > target; st->state--) {
939 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
940 if (ret) {
941 st->target = prev_state;
942 if (st->state < prev_state)
943 undo_cpu_down(cpu, st);
944 break;
945 }
946 }
947 return ret;
948 }
949
950 /* Requires cpu_add_remove_lock to be held */
_cpu_down(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)951 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
952 enum cpuhp_state target)
953 {
954 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
955 int prev_state, ret = 0;
956
957 if (num_online_cpus() == 1)
958 return -EBUSY;
959
960 if (!cpu_present(cpu))
961 return -EINVAL;
962
963 cpus_write_lock();
964
965 cpuhp_tasks_frozen = tasks_frozen;
966
967 prev_state = cpuhp_set_state(st, target);
968 /*
969 * If the current CPU state is in the range of the AP hotplug thread,
970 * then we need to kick the thread.
971 */
972 if (st->state > CPUHP_TEARDOWN_CPU) {
973 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
974 ret = cpuhp_kick_ap_work(cpu);
975 /*
976 * The AP side has done the error rollback already. Just
977 * return the error code..
978 */
979 if (ret)
980 goto out;
981
982 /*
983 * We might have stopped still in the range of the AP hotplug
984 * thread. Nothing to do anymore.
985 */
986 if (st->state > CPUHP_TEARDOWN_CPU)
987 goto out;
988
989 st->target = target;
990 }
991 /*
992 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
993 * to do the further cleanups.
994 */
995 ret = cpuhp_down_callbacks(cpu, st, target);
996 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
997 cpuhp_reset_state(st, prev_state);
998 __cpuhp_kick_ap(st);
999 }
1000
1001 out:
1002 cpus_write_unlock();
1003 /*
1004 * Do post unplug cleanup. This is still protected against
1005 * concurrent CPU hotplug via cpu_add_remove_lock.
1006 */
1007 lockup_detector_cleanup();
1008 arch_smt_update();
1009 return ret;
1010 }
1011
cpu_down_maps_locked(unsigned int cpu,enum cpuhp_state target)1012 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1013 {
1014 if (cpu_hotplug_disabled)
1015 return -EBUSY;
1016 return _cpu_down(cpu, 0, target);
1017 }
1018
do_cpu_down(unsigned int cpu,enum cpuhp_state target)1019 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1020 {
1021 int err;
1022
1023 cpu_maps_update_begin();
1024 err = cpu_down_maps_locked(cpu, target);
1025 cpu_maps_update_done();
1026 return err;
1027 }
1028
cpu_down(unsigned int cpu)1029 int cpu_down(unsigned int cpu)
1030 {
1031 return do_cpu_down(cpu, CPUHP_OFFLINE);
1032 }
1033 EXPORT_SYMBOL(cpu_down);
1034
1035 #else
1036 #define takedown_cpu NULL
1037 #endif /*CONFIG_HOTPLUG_CPU*/
1038
1039 /**
1040 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1041 * @cpu: cpu that just started
1042 *
1043 * It must be called by the arch code on the new cpu, before the new cpu
1044 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1045 */
notify_cpu_starting(unsigned int cpu)1046 void notify_cpu_starting(unsigned int cpu)
1047 {
1048 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1049 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1050 int ret;
1051
1052 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1053 st->booted_once = true;
1054 while (st->state < target) {
1055 st->state++;
1056 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1057 /*
1058 * STARTING must not fail!
1059 */
1060 WARN_ON_ONCE(ret);
1061 }
1062 }
1063
1064 /*
1065 * Called from the idle task. Wake up the controlling task which brings the
1066 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1067 * online bringup to the hotplug thread.
1068 */
cpuhp_online_idle(enum cpuhp_state state)1069 void cpuhp_online_idle(enum cpuhp_state state)
1070 {
1071 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1072
1073 /* Happens for the boot cpu */
1074 if (state != CPUHP_AP_ONLINE_IDLE)
1075 return;
1076
1077 /*
1078 * Unpart the stopper thread before we start the idle loop (and start
1079 * scheduling); this ensures the stopper task is always available.
1080 */
1081 stop_machine_unpark(smp_processor_id());
1082
1083 st->state = CPUHP_AP_ONLINE_IDLE;
1084 complete_ap_thread(st, true);
1085 }
1086
1087 /* Requires cpu_add_remove_lock to be held */
_cpu_up(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1088 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1089 {
1090 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1091 struct task_struct *idle;
1092 int ret = 0;
1093
1094 cpus_write_lock();
1095
1096 if (!cpu_present(cpu)) {
1097 ret = -EINVAL;
1098 goto out;
1099 }
1100
1101 /*
1102 * The caller of do_cpu_up might have raced with another
1103 * caller. Ignore it for now.
1104 */
1105 if (st->state >= target)
1106 goto out;
1107
1108 if (st->state == CPUHP_OFFLINE) {
1109 /* Let it fail before we try to bring the cpu up */
1110 idle = idle_thread_get(cpu);
1111 if (IS_ERR(idle)) {
1112 ret = PTR_ERR(idle);
1113 goto out;
1114 }
1115 }
1116
1117 cpuhp_tasks_frozen = tasks_frozen;
1118
1119 cpuhp_set_state(st, target);
1120 /*
1121 * If the current CPU state is in the range of the AP hotplug thread,
1122 * then we need to kick the thread once more.
1123 */
1124 if (st->state > CPUHP_BRINGUP_CPU) {
1125 ret = cpuhp_kick_ap_work(cpu);
1126 /*
1127 * The AP side has done the error rollback already. Just
1128 * return the error code..
1129 */
1130 if (ret)
1131 goto out;
1132 }
1133
1134 /*
1135 * Try to reach the target state. We max out on the BP at
1136 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1137 * responsible for bringing it up to the target state.
1138 */
1139 target = min((int)target, CPUHP_BRINGUP_CPU);
1140 ret = cpuhp_up_callbacks(cpu, st, target);
1141 out:
1142 cpus_write_unlock();
1143 arch_smt_update();
1144 return ret;
1145 }
1146
do_cpu_up(unsigned int cpu,enum cpuhp_state target)1147 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1148 {
1149 int err = 0;
1150
1151 if (!cpu_possible(cpu)) {
1152 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1153 cpu);
1154 #if defined(CONFIG_IA64)
1155 pr_err("please check additional_cpus= boot parameter\n");
1156 #endif
1157 return -EINVAL;
1158 }
1159
1160 err = try_online_node(cpu_to_node(cpu));
1161 if (err)
1162 return err;
1163
1164 cpu_maps_update_begin();
1165
1166 if (cpu_hotplug_disabled) {
1167 err = -EBUSY;
1168 goto out;
1169 }
1170 if (!cpu_smt_allowed(cpu)) {
1171 err = -EPERM;
1172 goto out;
1173 }
1174
1175 err = _cpu_up(cpu, 0, target);
1176 out:
1177 cpu_maps_update_done();
1178 return err;
1179 }
1180
cpu_up(unsigned int cpu)1181 int cpu_up(unsigned int cpu)
1182 {
1183 return do_cpu_up(cpu, CPUHP_ONLINE);
1184 }
1185 EXPORT_SYMBOL_GPL(cpu_up);
1186
1187 #ifdef CONFIG_PM_SLEEP_SMP
1188 static cpumask_var_t frozen_cpus;
1189
freeze_secondary_cpus(int primary)1190 int freeze_secondary_cpus(int primary)
1191 {
1192 int cpu, error = 0;
1193
1194 cpu_maps_update_begin();
1195 if (!cpu_online(primary))
1196 primary = cpumask_first(cpu_online_mask);
1197 /*
1198 * We take down all of the non-boot CPUs in one shot to avoid races
1199 * with the userspace trying to use the CPU hotplug at the same time
1200 */
1201 cpumask_clear(frozen_cpus);
1202
1203 pr_info("Disabling non-boot CPUs ...\n");
1204 for_each_online_cpu(cpu) {
1205 if (cpu == primary)
1206 continue;
1207 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1208 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1209 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1210 if (!error)
1211 cpumask_set_cpu(cpu, frozen_cpus);
1212 else {
1213 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1214 break;
1215 }
1216 }
1217
1218 if (!error)
1219 BUG_ON(num_online_cpus() > 1);
1220 else
1221 pr_err("Non-boot CPUs are not disabled\n");
1222
1223 /*
1224 * Make sure the CPUs won't be enabled by someone else. We need to do
1225 * this even in case of failure as all disable_nonboot_cpus() users are
1226 * supposed to do enable_nonboot_cpus() on the failure path.
1227 */
1228 cpu_hotplug_disabled++;
1229
1230 cpu_maps_update_done();
1231 return error;
1232 }
1233
arch_enable_nonboot_cpus_begin(void)1234 void __weak arch_enable_nonboot_cpus_begin(void)
1235 {
1236 }
1237
arch_enable_nonboot_cpus_end(void)1238 void __weak arch_enable_nonboot_cpus_end(void)
1239 {
1240 }
1241
enable_nonboot_cpus(void)1242 void enable_nonboot_cpus(void)
1243 {
1244 int cpu, error;
1245 struct device *cpu_device;
1246
1247 /* Allow everyone to use the CPU hotplug again */
1248 cpu_maps_update_begin();
1249 __cpu_hotplug_enable();
1250 if (cpumask_empty(frozen_cpus))
1251 goto out;
1252
1253 pr_info("Enabling non-boot CPUs ...\n");
1254
1255 arch_enable_nonboot_cpus_begin();
1256
1257 for_each_cpu(cpu, frozen_cpus) {
1258 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1259 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1260 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1261 if (!error) {
1262 pr_info("CPU%d is up\n", cpu);
1263 cpu_device = get_cpu_device(cpu);
1264 if (!cpu_device)
1265 pr_err("%s: failed to get cpu%d device\n",
1266 __func__, cpu);
1267 else
1268 kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
1269 continue;
1270 }
1271 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1272 }
1273
1274 arch_enable_nonboot_cpus_end();
1275
1276 cpumask_clear(frozen_cpus);
1277 out:
1278 cpu_maps_update_done();
1279 }
1280
alloc_frozen_cpus(void)1281 static int __init alloc_frozen_cpus(void)
1282 {
1283 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1284 return -ENOMEM;
1285 return 0;
1286 }
1287 core_initcall(alloc_frozen_cpus);
1288
1289 /*
1290 * When callbacks for CPU hotplug notifications are being executed, we must
1291 * ensure that the state of the system with respect to the tasks being frozen
1292 * or not, as reported by the notification, remains unchanged *throughout the
1293 * duration* of the execution of the callbacks.
1294 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1295 *
1296 * This synchronization is implemented by mutually excluding regular CPU
1297 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1298 * Hibernate notifications.
1299 */
1300 static int
cpu_hotplug_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)1301 cpu_hotplug_pm_callback(struct notifier_block *nb,
1302 unsigned long action, void *ptr)
1303 {
1304 switch (action) {
1305
1306 case PM_SUSPEND_PREPARE:
1307 case PM_HIBERNATION_PREPARE:
1308 cpu_hotplug_disable();
1309 break;
1310
1311 case PM_POST_SUSPEND:
1312 case PM_POST_HIBERNATION:
1313 cpu_hotplug_enable();
1314 break;
1315
1316 default:
1317 return NOTIFY_DONE;
1318 }
1319
1320 return NOTIFY_OK;
1321 }
1322
1323
cpu_hotplug_pm_sync_init(void)1324 static int __init cpu_hotplug_pm_sync_init(void)
1325 {
1326 /*
1327 * cpu_hotplug_pm_callback has higher priority than x86
1328 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1329 * to disable cpu hotplug to avoid cpu hotplug race.
1330 */
1331 pm_notifier(cpu_hotplug_pm_callback, 0);
1332 return 0;
1333 }
1334 core_initcall(cpu_hotplug_pm_sync_init);
1335
1336 #endif /* CONFIG_PM_SLEEP_SMP */
1337
1338 int __boot_cpu_id;
1339
1340 #endif /* CONFIG_SMP */
1341
1342 /* Boot processor state steps */
1343 static struct cpuhp_step cpuhp_bp_states[] = {
1344 [CPUHP_OFFLINE] = {
1345 .name = "offline",
1346 .startup.single = NULL,
1347 .teardown.single = NULL,
1348 },
1349 #ifdef CONFIG_SMP
1350 [CPUHP_CREATE_THREADS]= {
1351 .name = "threads:prepare",
1352 .startup.single = smpboot_create_threads,
1353 .teardown.single = NULL,
1354 .cant_stop = true,
1355 },
1356 [CPUHP_PERF_PREPARE] = {
1357 .name = "perf:prepare",
1358 .startup.single = perf_event_init_cpu,
1359 .teardown.single = perf_event_exit_cpu,
1360 },
1361 [CPUHP_WORKQUEUE_PREP] = {
1362 .name = "workqueue:prepare",
1363 .startup.single = workqueue_prepare_cpu,
1364 .teardown.single = NULL,
1365 },
1366 [CPUHP_HRTIMERS_PREPARE] = {
1367 .name = "hrtimers:prepare",
1368 .startup.single = hrtimers_prepare_cpu,
1369 .teardown.single = hrtimers_dead_cpu,
1370 },
1371 [CPUHP_SMPCFD_PREPARE] = {
1372 .name = "smpcfd:prepare",
1373 .startup.single = smpcfd_prepare_cpu,
1374 .teardown.single = smpcfd_dead_cpu,
1375 },
1376 [CPUHP_RELAY_PREPARE] = {
1377 .name = "relay:prepare",
1378 .startup.single = relay_prepare_cpu,
1379 .teardown.single = NULL,
1380 },
1381 [CPUHP_SLAB_PREPARE] = {
1382 .name = "slab:prepare",
1383 .startup.single = slab_prepare_cpu,
1384 .teardown.single = slab_dead_cpu,
1385 },
1386 [CPUHP_RCUTREE_PREP] = {
1387 .name = "RCU/tree:prepare",
1388 .startup.single = rcutree_prepare_cpu,
1389 .teardown.single = rcutree_dead_cpu,
1390 },
1391 /*
1392 * On the tear-down path, timers_dead_cpu() must be invoked
1393 * before blk_mq_queue_reinit_notify() from notify_dead(),
1394 * otherwise a RCU stall occurs.
1395 */
1396 [CPUHP_TIMERS_PREPARE] = {
1397 .name = "timers:dead",
1398 .startup.single = timers_prepare_cpu,
1399 .teardown.single = timers_dead_cpu,
1400 },
1401 /* Kicks the plugged cpu into life */
1402 [CPUHP_BRINGUP_CPU] = {
1403 .name = "cpu:bringup",
1404 .startup.single = bringup_cpu,
1405 .teardown.single = NULL,
1406 .cant_stop = true,
1407 },
1408 /*
1409 * Handled on controll processor until the plugged processor manages
1410 * this itself.
1411 */
1412 [CPUHP_TEARDOWN_CPU] = {
1413 .name = "cpu:teardown",
1414 .startup.single = NULL,
1415 .teardown.single = takedown_cpu,
1416 .cant_stop = true,
1417 },
1418 #else
1419 [CPUHP_BRINGUP_CPU] = { },
1420 #endif
1421 };
1422
1423 /* Application processor state steps */
1424 static struct cpuhp_step cpuhp_ap_states[] = {
1425 #ifdef CONFIG_SMP
1426 /* Final state before CPU kills itself */
1427 [CPUHP_AP_IDLE_DEAD] = {
1428 .name = "idle:dead",
1429 },
1430 /*
1431 * Last state before CPU enters the idle loop to die. Transient state
1432 * for synchronization.
1433 */
1434 [CPUHP_AP_OFFLINE] = {
1435 .name = "ap:offline",
1436 .cant_stop = true,
1437 },
1438 /* First state is scheduler control. Interrupts are disabled */
1439 [CPUHP_AP_SCHED_STARTING] = {
1440 .name = "sched:starting",
1441 .startup.single = sched_cpu_starting,
1442 .teardown.single = sched_cpu_dying,
1443 },
1444 [CPUHP_AP_RCUTREE_DYING] = {
1445 .name = "RCU/tree:dying",
1446 .startup.single = NULL,
1447 .teardown.single = rcutree_dying_cpu,
1448 },
1449 [CPUHP_AP_SMPCFD_DYING] = {
1450 .name = "smpcfd:dying",
1451 .startup.single = NULL,
1452 .teardown.single = smpcfd_dying_cpu,
1453 },
1454 /* Entry state on starting. Interrupts enabled from here on. Transient
1455 * state for synchronsization */
1456 [CPUHP_AP_ONLINE] = {
1457 .name = "ap:online",
1458 },
1459 /* Handle smpboot threads park/unpark */
1460 [CPUHP_AP_SMPBOOT_THREADS] = {
1461 .name = "smpboot/threads:online",
1462 .startup.single = smpboot_unpark_threads,
1463 .teardown.single = smpboot_park_threads,
1464 },
1465 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1466 .name = "irq/affinity:online",
1467 .startup.single = irq_affinity_online_cpu,
1468 .teardown.single = NULL,
1469 },
1470 [CPUHP_AP_PERF_ONLINE] = {
1471 .name = "perf:online",
1472 .startup.single = perf_event_init_cpu,
1473 .teardown.single = perf_event_exit_cpu,
1474 },
1475 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1476 .name = "workqueue:online",
1477 .startup.single = workqueue_online_cpu,
1478 .teardown.single = workqueue_offline_cpu,
1479 },
1480 [CPUHP_AP_RCUTREE_ONLINE] = {
1481 .name = "RCU/tree:online",
1482 .startup.single = rcutree_online_cpu,
1483 .teardown.single = rcutree_offline_cpu,
1484 },
1485 #endif
1486 /*
1487 * The dynamically registered state space is here
1488 */
1489
1490 #ifdef CONFIG_SMP
1491 /* Last state is scheduler control setting the cpu active */
1492 [CPUHP_AP_ACTIVE] = {
1493 .name = "sched:active",
1494 .startup.single = sched_cpu_activate,
1495 .teardown.single = sched_cpu_deactivate,
1496 },
1497 #endif
1498
1499 /* CPU is fully up and running. */
1500 [CPUHP_ONLINE] = {
1501 .name = "online",
1502 .startup.single = NULL,
1503 .teardown.single = NULL,
1504 },
1505 };
1506
1507 /* Sanity check for callbacks */
cpuhp_cb_check(enum cpuhp_state state)1508 static int cpuhp_cb_check(enum cpuhp_state state)
1509 {
1510 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1511 return -EINVAL;
1512 return 0;
1513 }
1514
1515 /*
1516 * Returns a free for dynamic slot assignment of the Online state. The states
1517 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1518 * by having no name assigned.
1519 */
cpuhp_reserve_state(enum cpuhp_state state)1520 static int cpuhp_reserve_state(enum cpuhp_state state)
1521 {
1522 enum cpuhp_state i, end;
1523 struct cpuhp_step *step;
1524
1525 switch (state) {
1526 case CPUHP_AP_ONLINE_DYN:
1527 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1528 end = CPUHP_AP_ONLINE_DYN_END;
1529 break;
1530 case CPUHP_BP_PREPARE_DYN:
1531 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1532 end = CPUHP_BP_PREPARE_DYN_END;
1533 break;
1534 default:
1535 return -EINVAL;
1536 }
1537
1538 for (i = state; i <= end; i++, step++) {
1539 if (!step->name)
1540 return i;
1541 }
1542 WARN(1, "No more dynamic states available for CPU hotplug\n");
1543 return -ENOSPC;
1544 }
1545
cpuhp_store_callbacks(enum cpuhp_state state,const char * name,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)1546 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1547 int (*startup)(unsigned int cpu),
1548 int (*teardown)(unsigned int cpu),
1549 bool multi_instance)
1550 {
1551 /* (Un)Install the callbacks for further cpu hotplug operations */
1552 struct cpuhp_step *sp;
1553 int ret = 0;
1554
1555 /*
1556 * If name is NULL, then the state gets removed.
1557 *
1558 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1559 * the first allocation from these dynamic ranges, so the removal
1560 * would trigger a new allocation and clear the wrong (already
1561 * empty) state, leaving the callbacks of the to be cleared state
1562 * dangling, which causes wreckage on the next hotplug operation.
1563 */
1564 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1565 state == CPUHP_BP_PREPARE_DYN)) {
1566 ret = cpuhp_reserve_state(state);
1567 if (ret < 0)
1568 return ret;
1569 state = ret;
1570 }
1571 sp = cpuhp_get_step(state);
1572 if (name && sp->name)
1573 return -EBUSY;
1574
1575 sp->startup.single = startup;
1576 sp->teardown.single = teardown;
1577 sp->name = name;
1578 sp->multi_instance = multi_instance;
1579 INIT_HLIST_HEAD(&sp->list);
1580 return ret;
1581 }
1582
cpuhp_get_teardown_cb(enum cpuhp_state state)1583 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1584 {
1585 return cpuhp_get_step(state)->teardown.single;
1586 }
1587
1588 /*
1589 * Call the startup/teardown function for a step either on the AP or
1590 * on the current CPU.
1591 */
cpuhp_issue_call(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)1592 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1593 struct hlist_node *node)
1594 {
1595 struct cpuhp_step *sp = cpuhp_get_step(state);
1596 int ret;
1597
1598 /*
1599 * If there's nothing to do, we done.
1600 * Relies on the union for multi_instance.
1601 */
1602 if ((bringup && !sp->startup.single) ||
1603 (!bringup && !sp->teardown.single))
1604 return 0;
1605 /*
1606 * The non AP bound callbacks can fail on bringup. On teardown
1607 * e.g. module removal we crash for now.
1608 */
1609 #ifdef CONFIG_SMP
1610 if (cpuhp_is_ap_state(state))
1611 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1612 else
1613 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1614 #else
1615 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1616 #endif
1617 BUG_ON(ret && !bringup);
1618 return ret;
1619 }
1620
1621 /*
1622 * Called from __cpuhp_setup_state on a recoverable failure.
1623 *
1624 * Note: The teardown callbacks for rollback are not allowed to fail!
1625 */
cpuhp_rollback_install(int failedcpu,enum cpuhp_state state,struct hlist_node * node)1626 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1627 struct hlist_node *node)
1628 {
1629 int cpu;
1630
1631 /* Roll back the already executed steps on the other cpus */
1632 for_each_present_cpu(cpu) {
1633 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1634 int cpustate = st->state;
1635
1636 if (cpu >= failedcpu)
1637 break;
1638
1639 /* Did we invoke the startup call on that cpu ? */
1640 if (cpustate >= state)
1641 cpuhp_issue_call(cpu, state, false, node);
1642 }
1643 }
1644
__cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,struct hlist_node * node,bool invoke)1645 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1646 struct hlist_node *node,
1647 bool invoke)
1648 {
1649 struct cpuhp_step *sp;
1650 int cpu;
1651 int ret;
1652
1653 lockdep_assert_cpus_held();
1654
1655 sp = cpuhp_get_step(state);
1656 if (sp->multi_instance == false)
1657 return -EINVAL;
1658
1659 mutex_lock(&cpuhp_state_mutex);
1660
1661 if (!invoke || !sp->startup.multi)
1662 goto add_node;
1663
1664 /*
1665 * Try to call the startup callback for each present cpu
1666 * depending on the hotplug state of the cpu.
1667 */
1668 for_each_present_cpu(cpu) {
1669 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1670 int cpustate = st->state;
1671
1672 if (cpustate < state)
1673 continue;
1674
1675 ret = cpuhp_issue_call(cpu, state, true, node);
1676 if (ret) {
1677 if (sp->teardown.multi)
1678 cpuhp_rollback_install(cpu, state, node);
1679 goto unlock;
1680 }
1681 }
1682 add_node:
1683 ret = 0;
1684 hlist_add_head(node, &sp->list);
1685 unlock:
1686 mutex_unlock(&cpuhp_state_mutex);
1687 return ret;
1688 }
1689
__cpuhp_state_add_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)1690 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1691 bool invoke)
1692 {
1693 int ret;
1694
1695 cpus_read_lock();
1696 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1697 cpus_read_unlock();
1698 return ret;
1699 }
1700 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1701
1702 /**
1703 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1704 * @state: The state to setup
1705 * @invoke: If true, the startup function is invoked for cpus where
1706 * cpu state >= @state
1707 * @startup: startup callback function
1708 * @teardown: teardown callback function
1709 * @multi_instance: State is set up for multiple instances which get
1710 * added afterwards.
1711 *
1712 * The caller needs to hold cpus read locked while calling this function.
1713 * Returns:
1714 * On success:
1715 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1716 * 0 for all other states
1717 * On failure: proper (negative) error code
1718 */
__cpuhp_setup_state_cpuslocked(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)1719 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1720 const char *name, bool invoke,
1721 int (*startup)(unsigned int cpu),
1722 int (*teardown)(unsigned int cpu),
1723 bool multi_instance)
1724 {
1725 int cpu, ret = 0;
1726 bool dynstate;
1727
1728 lockdep_assert_cpus_held();
1729
1730 if (cpuhp_cb_check(state) || !name)
1731 return -EINVAL;
1732
1733 mutex_lock(&cpuhp_state_mutex);
1734
1735 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1736 multi_instance);
1737
1738 dynstate = state == CPUHP_AP_ONLINE_DYN;
1739 if (ret > 0 && dynstate) {
1740 state = ret;
1741 ret = 0;
1742 }
1743
1744 if (ret || !invoke || !startup)
1745 goto out;
1746
1747 /*
1748 * Try to call the startup callback for each present cpu
1749 * depending on the hotplug state of the cpu.
1750 */
1751 for_each_present_cpu(cpu) {
1752 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1753 int cpustate = st->state;
1754
1755 if (cpustate < state)
1756 continue;
1757
1758 ret = cpuhp_issue_call(cpu, state, true, NULL);
1759 if (ret) {
1760 if (teardown)
1761 cpuhp_rollback_install(cpu, state, NULL);
1762 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1763 goto out;
1764 }
1765 }
1766 out:
1767 mutex_unlock(&cpuhp_state_mutex);
1768 /*
1769 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1770 * dynamically allocated state in case of success.
1771 */
1772 if (!ret && dynstate)
1773 return state;
1774 return ret;
1775 }
1776 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1777
__cpuhp_setup_state(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)1778 int __cpuhp_setup_state(enum cpuhp_state state,
1779 const char *name, bool invoke,
1780 int (*startup)(unsigned int cpu),
1781 int (*teardown)(unsigned int cpu),
1782 bool multi_instance)
1783 {
1784 int ret;
1785
1786 cpus_read_lock();
1787 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1788 teardown, multi_instance);
1789 cpus_read_unlock();
1790 return ret;
1791 }
1792 EXPORT_SYMBOL(__cpuhp_setup_state);
1793
__cpuhp_state_remove_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)1794 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1795 struct hlist_node *node, bool invoke)
1796 {
1797 struct cpuhp_step *sp = cpuhp_get_step(state);
1798 int cpu;
1799
1800 BUG_ON(cpuhp_cb_check(state));
1801
1802 if (!sp->multi_instance)
1803 return -EINVAL;
1804
1805 cpus_read_lock();
1806 mutex_lock(&cpuhp_state_mutex);
1807
1808 if (!invoke || !cpuhp_get_teardown_cb(state))
1809 goto remove;
1810 /*
1811 * Call the teardown callback for each present cpu depending
1812 * on the hotplug state of the cpu. This function is not
1813 * allowed to fail currently!
1814 */
1815 for_each_present_cpu(cpu) {
1816 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1817 int cpustate = st->state;
1818
1819 if (cpustate >= state)
1820 cpuhp_issue_call(cpu, state, false, node);
1821 }
1822
1823 remove:
1824 hlist_del(node);
1825 mutex_unlock(&cpuhp_state_mutex);
1826 cpus_read_unlock();
1827
1828 return 0;
1829 }
1830 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1831
1832 /**
1833 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1834 * @state: The state to remove
1835 * @invoke: If true, the teardown function is invoked for cpus where
1836 * cpu state >= @state
1837 *
1838 * The caller needs to hold cpus read locked while calling this function.
1839 * The teardown callback is currently not allowed to fail. Think
1840 * about module removal!
1841 */
__cpuhp_remove_state_cpuslocked(enum cpuhp_state state,bool invoke)1842 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1843 {
1844 struct cpuhp_step *sp = cpuhp_get_step(state);
1845 int cpu;
1846
1847 BUG_ON(cpuhp_cb_check(state));
1848
1849 lockdep_assert_cpus_held();
1850
1851 mutex_lock(&cpuhp_state_mutex);
1852 if (sp->multi_instance) {
1853 WARN(!hlist_empty(&sp->list),
1854 "Error: Removing state %d which has instances left.\n",
1855 state);
1856 goto remove;
1857 }
1858
1859 if (!invoke || !cpuhp_get_teardown_cb(state))
1860 goto remove;
1861
1862 /*
1863 * Call the teardown callback for each present cpu depending
1864 * on the hotplug state of the cpu. This function is not
1865 * allowed to fail currently!
1866 */
1867 for_each_present_cpu(cpu) {
1868 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1869 int cpustate = st->state;
1870
1871 if (cpustate >= state)
1872 cpuhp_issue_call(cpu, state, false, NULL);
1873 }
1874 remove:
1875 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1876 mutex_unlock(&cpuhp_state_mutex);
1877 }
1878 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1879
__cpuhp_remove_state(enum cpuhp_state state,bool invoke)1880 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1881 {
1882 cpus_read_lock();
1883 __cpuhp_remove_state_cpuslocked(state, invoke);
1884 cpus_read_unlock();
1885 }
1886 EXPORT_SYMBOL(__cpuhp_remove_state);
1887
1888 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
show_cpuhp_state(struct device * dev,struct device_attribute * attr,char * buf)1889 static ssize_t show_cpuhp_state(struct device *dev,
1890 struct device_attribute *attr, char *buf)
1891 {
1892 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1893
1894 return sprintf(buf, "%d\n", st->state);
1895 }
1896 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1897
write_cpuhp_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1898 static ssize_t write_cpuhp_target(struct device *dev,
1899 struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1903 struct cpuhp_step *sp;
1904 int target, ret;
1905
1906 ret = kstrtoint(buf, 10, &target);
1907 if (ret)
1908 return ret;
1909
1910 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1911 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1912 return -EINVAL;
1913 #else
1914 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1915 return -EINVAL;
1916 #endif
1917
1918 ret = lock_device_hotplug_sysfs();
1919 if (ret)
1920 return ret;
1921
1922 mutex_lock(&cpuhp_state_mutex);
1923 sp = cpuhp_get_step(target);
1924 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1925 mutex_unlock(&cpuhp_state_mutex);
1926 if (ret)
1927 goto out;
1928
1929 if (st->state < target)
1930 ret = do_cpu_up(dev->id, target);
1931 else
1932 ret = do_cpu_down(dev->id, target);
1933 out:
1934 unlock_device_hotplug();
1935 return ret ? ret : count;
1936 }
1937
show_cpuhp_target(struct device * dev,struct device_attribute * attr,char * buf)1938 static ssize_t show_cpuhp_target(struct device *dev,
1939 struct device_attribute *attr, char *buf)
1940 {
1941 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1942
1943 return sprintf(buf, "%d\n", st->target);
1944 }
1945 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1946
1947
write_cpuhp_fail(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1948 static ssize_t write_cpuhp_fail(struct device *dev,
1949 struct device_attribute *attr,
1950 const char *buf, size_t count)
1951 {
1952 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1953 struct cpuhp_step *sp;
1954 int fail, ret;
1955
1956 ret = kstrtoint(buf, 10, &fail);
1957 if (ret)
1958 return ret;
1959
1960 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
1961 return -EINVAL;
1962
1963 /*
1964 * Cannot fail STARTING/DYING callbacks.
1965 */
1966 if (cpuhp_is_atomic_state(fail))
1967 return -EINVAL;
1968
1969 /*
1970 * Cannot fail anything that doesn't have callbacks.
1971 */
1972 mutex_lock(&cpuhp_state_mutex);
1973 sp = cpuhp_get_step(fail);
1974 if (!sp->startup.single && !sp->teardown.single)
1975 ret = -EINVAL;
1976 mutex_unlock(&cpuhp_state_mutex);
1977 if (ret)
1978 return ret;
1979
1980 st->fail = fail;
1981
1982 return count;
1983 }
1984
show_cpuhp_fail(struct device * dev,struct device_attribute * attr,char * buf)1985 static ssize_t show_cpuhp_fail(struct device *dev,
1986 struct device_attribute *attr, char *buf)
1987 {
1988 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1989
1990 return sprintf(buf, "%d\n", st->fail);
1991 }
1992
1993 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1994
1995 static struct attribute *cpuhp_cpu_attrs[] = {
1996 &dev_attr_state.attr,
1997 &dev_attr_target.attr,
1998 &dev_attr_fail.attr,
1999 NULL
2000 };
2001
2002 static const struct attribute_group cpuhp_cpu_attr_group = {
2003 .attrs = cpuhp_cpu_attrs,
2004 .name = "hotplug",
2005 NULL
2006 };
2007
show_cpuhp_states(struct device * dev,struct device_attribute * attr,char * buf)2008 static ssize_t show_cpuhp_states(struct device *dev,
2009 struct device_attribute *attr, char *buf)
2010 {
2011 ssize_t cur, res = 0;
2012 int i;
2013
2014 mutex_lock(&cpuhp_state_mutex);
2015 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2016 struct cpuhp_step *sp = cpuhp_get_step(i);
2017
2018 if (sp->name) {
2019 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2020 buf += cur;
2021 res += cur;
2022 }
2023 }
2024 mutex_unlock(&cpuhp_state_mutex);
2025 return res;
2026 }
2027 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2028
2029 static struct attribute *cpuhp_cpu_root_attrs[] = {
2030 &dev_attr_states.attr,
2031 NULL
2032 };
2033
2034 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2035 .attrs = cpuhp_cpu_root_attrs,
2036 .name = "hotplug",
2037 NULL
2038 };
2039
2040 #ifdef CONFIG_HOTPLUG_SMT
2041
2042 static const char *smt_states[] = {
2043 [CPU_SMT_ENABLED] = "on",
2044 [CPU_SMT_DISABLED] = "off",
2045 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2046 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2047 };
2048
2049 static ssize_t
show_smt_control(struct device * dev,struct device_attribute * attr,char * buf)2050 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2051 {
2052 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2053 }
2054
cpuhp_offline_cpu_device(unsigned int cpu)2055 static void cpuhp_offline_cpu_device(unsigned int cpu)
2056 {
2057 struct device *dev = get_cpu_device(cpu);
2058
2059 dev->offline = true;
2060 /* Tell user space about the state change */
2061 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2062 }
2063
cpuhp_online_cpu_device(unsigned int cpu)2064 static void cpuhp_online_cpu_device(unsigned int cpu)
2065 {
2066 struct device *dev = get_cpu_device(cpu);
2067
2068 dev->offline = false;
2069 /* Tell user space about the state change */
2070 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2071 }
2072
cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)2073 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2074 {
2075 int cpu, ret = 0;
2076
2077 cpu_maps_update_begin();
2078 for_each_online_cpu(cpu) {
2079 if (topology_is_primary_thread(cpu))
2080 continue;
2081 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2082 if (ret)
2083 break;
2084 /*
2085 * As this needs to hold the cpu maps lock it's impossible
2086 * to call device_offline() because that ends up calling
2087 * cpu_down() which takes cpu maps lock. cpu maps lock
2088 * needs to be held as this might race against in kernel
2089 * abusers of the hotplug machinery (thermal management).
2090 *
2091 * So nothing would update device:offline state. That would
2092 * leave the sysfs entry stale and prevent onlining after
2093 * smt control has been changed to 'off' again. This is
2094 * called under the sysfs hotplug lock, so it is properly
2095 * serialized against the regular offline usage.
2096 */
2097 cpuhp_offline_cpu_device(cpu);
2098 }
2099 if (!ret) {
2100 cpu_smt_control = ctrlval;
2101 arch_smt_update();
2102 }
2103 cpu_maps_update_done();
2104 return ret;
2105 }
2106
cpuhp_smt_enable(void)2107 int cpuhp_smt_enable(void)
2108 {
2109 int cpu, ret = 0;
2110
2111 cpu_maps_update_begin();
2112 cpu_smt_control = CPU_SMT_ENABLED;
2113 arch_smt_update();
2114 for_each_present_cpu(cpu) {
2115 /* Skip online CPUs and CPUs on offline nodes */
2116 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2117 continue;
2118 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2119 if (ret)
2120 break;
2121 /* See comment in cpuhp_smt_disable() */
2122 cpuhp_online_cpu_device(cpu);
2123 }
2124 cpu_maps_update_done();
2125 return ret;
2126 }
2127
2128 static ssize_t
store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2129 store_smt_control(struct device *dev, struct device_attribute *attr,
2130 const char *buf, size_t count)
2131 {
2132 int ctrlval, ret;
2133
2134 if (sysfs_streq(buf, "on"))
2135 ctrlval = CPU_SMT_ENABLED;
2136 else if (sysfs_streq(buf, "off"))
2137 ctrlval = CPU_SMT_DISABLED;
2138 else if (sysfs_streq(buf, "forceoff"))
2139 ctrlval = CPU_SMT_FORCE_DISABLED;
2140 else
2141 return -EINVAL;
2142
2143 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2144 return -EPERM;
2145
2146 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2147 return -ENODEV;
2148
2149 ret = lock_device_hotplug_sysfs();
2150 if (ret)
2151 return ret;
2152
2153 if (ctrlval != cpu_smt_control) {
2154 switch (ctrlval) {
2155 case CPU_SMT_ENABLED:
2156 ret = cpuhp_smt_enable();
2157 break;
2158 case CPU_SMT_DISABLED:
2159 case CPU_SMT_FORCE_DISABLED:
2160 ret = cpuhp_smt_disable(ctrlval);
2161 break;
2162 }
2163 }
2164
2165 unlock_device_hotplug();
2166 return ret ? ret : count;
2167 }
2168 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2169
2170 static ssize_t
show_smt_active(struct device * dev,struct device_attribute * attr,char * buf)2171 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2172 {
2173 bool active = topology_max_smt_threads() > 1;
2174
2175 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2176 }
2177 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2178
2179 static struct attribute *cpuhp_smt_attrs[] = {
2180 &dev_attr_control.attr,
2181 &dev_attr_active.attr,
2182 NULL
2183 };
2184
2185 static const struct attribute_group cpuhp_smt_attr_group = {
2186 .attrs = cpuhp_smt_attrs,
2187 .name = "smt",
2188 NULL
2189 };
2190
cpu_smt_state_init(void)2191 static int __init cpu_smt_state_init(void)
2192 {
2193 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2194 &cpuhp_smt_attr_group);
2195 }
2196
2197 #else
cpu_smt_state_init(void)2198 static inline int cpu_smt_state_init(void) { return 0; }
2199 #endif
2200
cpuhp_sysfs_init(void)2201 static int __init cpuhp_sysfs_init(void)
2202 {
2203 int cpu, ret;
2204
2205 ret = cpu_smt_state_init();
2206 if (ret)
2207 return ret;
2208
2209 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2210 &cpuhp_cpu_root_attr_group);
2211 if (ret)
2212 return ret;
2213
2214 for_each_possible_cpu(cpu) {
2215 struct device *dev = get_cpu_device(cpu);
2216
2217 if (!dev)
2218 continue;
2219 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2220 if (ret)
2221 return ret;
2222 }
2223 return 0;
2224 }
2225 device_initcall(cpuhp_sysfs_init);
2226 #endif
2227
2228 /*
2229 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2230 * represents all NR_CPUS bits binary values of 1<<nr.
2231 *
2232 * It is used by cpumask_of() to get a constant address to a CPU
2233 * mask value that has a single bit set only.
2234 */
2235
2236 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2237 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2238 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2239 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2240 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2241
2242 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2243
2244 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2245 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2246 #if BITS_PER_LONG > 32
2247 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2248 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2249 #endif
2250 };
2251 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2252
2253 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2254 EXPORT_SYMBOL(cpu_all_bits);
2255
2256 #ifdef CONFIG_INIT_ALL_POSSIBLE
2257 struct cpumask __cpu_possible_mask __read_mostly
2258 = {CPU_BITS_ALL};
2259 #else
2260 struct cpumask __cpu_possible_mask __read_mostly;
2261 #endif
2262 EXPORT_SYMBOL(__cpu_possible_mask);
2263
2264 struct cpumask __cpu_online_mask __read_mostly;
2265 EXPORT_SYMBOL(__cpu_online_mask);
2266
2267 struct cpumask __cpu_present_mask __read_mostly;
2268 EXPORT_SYMBOL(__cpu_present_mask);
2269
2270 struct cpumask __cpu_active_mask __read_mostly;
2271 EXPORT_SYMBOL(__cpu_active_mask);
2272
init_cpu_present(const struct cpumask * src)2273 void init_cpu_present(const struct cpumask *src)
2274 {
2275 cpumask_copy(&__cpu_present_mask, src);
2276 }
2277
init_cpu_possible(const struct cpumask * src)2278 void init_cpu_possible(const struct cpumask *src)
2279 {
2280 cpumask_copy(&__cpu_possible_mask, src);
2281 }
2282
init_cpu_online(const struct cpumask * src)2283 void init_cpu_online(const struct cpumask *src)
2284 {
2285 cpumask_copy(&__cpu_online_mask, src);
2286 }
2287
2288 /*
2289 * Activate the first processor.
2290 */
boot_cpu_init(void)2291 void __init boot_cpu_init(void)
2292 {
2293 int cpu = smp_processor_id();
2294
2295 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2296 set_cpu_online(cpu, true);
2297 set_cpu_active(cpu, true);
2298 set_cpu_present(cpu, true);
2299 set_cpu_possible(cpu, true);
2300
2301 #ifdef CONFIG_SMP
2302 __boot_cpu_id = cpu;
2303 #endif
2304 }
2305
2306 /*
2307 * Must be called _AFTER_ setting up the per_cpu areas
2308 */
boot_cpu_hotplug_init(void)2309 void __init boot_cpu_hotplug_init(void)
2310 {
2311 #ifdef CONFIG_SMP
2312 this_cpu_write(cpuhp_state.booted_once, true);
2313 #endif
2314 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2315 }
2316
2317 /*
2318 * These are used for a global "mitigations=" cmdline option for toggling
2319 * optional CPU mitigations.
2320 */
2321 enum cpu_mitigations {
2322 CPU_MITIGATIONS_OFF,
2323 CPU_MITIGATIONS_AUTO,
2324 CPU_MITIGATIONS_AUTO_NOSMT,
2325 };
2326
2327 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2328 CPU_MITIGATIONS_AUTO;
2329
mitigations_parse_cmdline(char * arg)2330 static int __init mitigations_parse_cmdline(char *arg)
2331 {
2332 if (!strcmp(arg, "off"))
2333 cpu_mitigations = CPU_MITIGATIONS_OFF;
2334 else if (!strcmp(arg, "auto"))
2335 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2336 else if (!strcmp(arg, "auto,nosmt"))
2337 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2338 else
2339 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2340 arg);
2341
2342 return 0;
2343 }
2344 early_param("mitigations", mitigations_parse_cmdline);
2345
2346 /* mitigations=off */
cpu_mitigations_off(void)2347 bool cpu_mitigations_off(void)
2348 {
2349 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2350 }
2351 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2352
2353 /* mitigations=auto,nosmt */
cpu_mitigations_auto_nosmt(void)2354 bool cpu_mitigations_auto_nosmt(void)
2355 {
2356 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2357 }
2358 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
2359