• Home
  • Raw
  • Download

Lines Matching +full:single +full:- +full:cpu

1 /* CPU control.
17 #include <linux/cpu.h>
35 #include <linux/percpu-rwsem.h>
46 * cpuhp_cpu_state - Per cpu hotplug state storage
47 * @state: The current cpu state
52 * @single: Single callback invocation
53 * @bringup: Single callback bringup or teardown selector
54 * @cb_state: The state for a single callback (install/uninstall)
56 * @done_up: Signal completion to the issuer of the task for cpu-up
57 * @done_down: Signal completion to the issuer of the task for cpu-down
67 bool single; member
88 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
90 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
110 * cpuhp_step - Hotplug state machine step
119 int (*single)(unsigned int cpu); member
120 int (*multi)(unsigned int cpu,
124 int (*single)(unsigned int cpu); member
125 int (*multi)(unsigned int cpu,
143 * @cpu: The cpu for which the callback should be invoked
146 * @node: For multi-instance, do a single entry callback for install/remove
147 * @lastp: For multi-instance rollback, remember how far we got
149 * Called from cpu hotplug and from the state register machinery.
151 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
155 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
157 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
158 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback()
161 if (st->fail == state) { in cpuhp_invoke_callback()
162 st->fail = CPUHP_INVALID; in cpuhp_invoke_callback()
164 if (!(bringup ? step->startup.single : step->teardown.single)) in cpuhp_invoke_callback()
167 return -EAGAIN; in cpuhp_invoke_callback()
170 if (!step->multi_instance) { in cpuhp_invoke_callback()
172 cb = bringup ? step->startup.single : step->teardown.single; in cpuhp_invoke_callback()
175 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
176 ret = cb(cpu); in cpuhp_invoke_callback()
177 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
180 cbm = bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
184 /* Single invocation for instance add/remove */ in cpuhp_invoke_callback()
187 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
188 ret = cbm(cpu, node); in cpuhp_invoke_callback()
189 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
195 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
199 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
200 ret = cbm(cpu, node); in cpuhp_invoke_callback()
201 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
216 cbm = !bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
220 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
221 if (!cnt--) in cpuhp_invoke_callback()
224 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
225 ret = cbm(cpu, node); in cpuhp_invoke_callback()
226 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
247 struct completion *done = bringup ? &st->done_up : &st->done_down; in wait_for_ap_thread()
253 struct completion *done = bringup ? &st->done_up : &st->done_down; in complete_ap_thread()
285 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
347 * Wait for currently running CPU hotplug operations to complete (if any) and
348 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
363 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) in __cpu_hotplug_enable()
365 cpu_hotplug_disabled--; in __cpu_hotplug_enable()
389 * Architectures that need SMT-specific errata handling during SMT hotplug
413 * CPU identification. Called from architecture code.
428 static inline bool cpu_smt_allowed(unsigned int cpu) in cpu_smt_allowed() argument
433 if (topology_is_primary_thread(cpu)) in cpu_smt_allowed()
439 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any in cpu_smt_allowed()
442 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); in cpu_smt_allowed()
453 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } in cpu_smt_allowed() argument
459 enum cpuhp_state prev_state = st->state; in cpuhp_set_state()
461 st->rollback = false; in cpuhp_set_state()
462 st->last = NULL; in cpuhp_set_state()
464 st->target = target; in cpuhp_set_state()
465 st->single = false; in cpuhp_set_state()
466 st->bringup = st->state < target; in cpuhp_set_state()
474 st->rollback = true; in cpuhp_reset_state()
477 * If we have st->last we need to undo partial multi_instance of this in cpuhp_reset_state()
480 if (!st->last) { in cpuhp_reset_state()
481 if (st->bringup) in cpuhp_reset_state()
482 st->state--; in cpuhp_reset_state()
484 st->state++; in cpuhp_reset_state()
487 st->target = prev_state; in cpuhp_reset_state()
488 st->bringup = !st->bringup; in cpuhp_reset_state()
494 if (!st->single && st->state == st->target) in __cpuhp_kick_ap()
497 st->result = 0; in __cpuhp_kick_ap()
503 st->should_run = true; in __cpuhp_kick_ap()
504 wake_up_process(st->thread); in __cpuhp_kick_ap()
505 wait_for_ap_thread(st, st->bringup); in __cpuhp_kick_ap()
515 if ((ret = st->result)) { in cpuhp_kick_ap()
523 static int bringup_wait_for_ap(unsigned int cpu) in bringup_wait_for_ap() argument
525 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap()
527 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ in bringup_wait_for_ap()
529 if (WARN_ON_ONCE((!cpu_online(cpu)))) in bringup_wait_for_ap()
530 return -ECANCELED; in bringup_wait_for_ap()
532 /* Unpark the hotplug thread of the target cpu */ in bringup_wait_for_ap()
533 kthread_unpark(st->thread); in bringup_wait_for_ap()
536 * SMT soft disabling on X86 requires to bring the CPU out of the in bringup_wait_for_ap()
538 * CPU marked itself as booted_once in notify_cpu_starting() so the in bringup_wait_for_ap()
542 if (!cpu_smt_allowed(cpu)) in bringup_wait_for_ap()
543 return -ECANCELED; in bringup_wait_for_ap()
545 if (st->target <= CPUHP_AP_ONLINE_IDLE) in bringup_wait_for_ap()
548 return cpuhp_kick_ap(st, st->target); in bringup_wait_for_ap()
551 static int bringup_cpu(unsigned int cpu) in bringup_cpu() argument
553 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu()
557 * Reset stale stack state from the last time this CPU was online. in bringup_cpu()
564 * setup the vector space for the cpu which comes online. in bringup_cpu()
569 /* Arch-specific enabling code. */ in bringup_cpu()
570 ret = __cpu_up(cpu, idle); in bringup_cpu()
574 return bringup_wait_for_ap(cpu); in bringup_cpu()
577 static int finish_cpu(unsigned int cpu) in finish_cpu() argument
579 struct task_struct *idle = idle_thread_get(cpu); in finish_cpu()
580 struct mm_struct *mm = idle->active_mm; in finish_cpu()
587 idle->active_mm = &init_mm; in finish_cpu()
596 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) in undo_cpu_up() argument
598 for (st->state--; st->state > st->target; st->state--) in undo_cpu_up()
599 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in undo_cpu_up()
607 * When CPU hotplug is disabled, then taking the CPU down is not in can_rollback_cpu()
609 * subsystem specific mechanisms are not available. So the CPU in can_rollback_cpu()
613 return st->state <= CPUHP_BRINGUP_CPU; in can_rollback_cpu()
616 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_up_callbacks() argument
619 enum cpuhp_state prev_state = st->state; in cpuhp_up_callbacks()
622 while (st->state < target) { in cpuhp_up_callbacks()
623 st->state++; in cpuhp_up_callbacks()
624 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in cpuhp_up_callbacks()
627 st->target = prev_state; in cpuhp_up_callbacks()
628 undo_cpu_up(cpu, st); in cpuhp_up_callbacks()
637 * The cpu hotplug threads manage the bringup and teardown of the cpus
639 static void cpuhp_create(unsigned int cpu) in cpuhp_create() argument
641 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_create()
643 init_completion(&st->done_up); in cpuhp_create()
644 init_completion(&st->done_down); in cpuhp_create()
647 static int cpuhp_should_run(unsigned int cpu) in cpuhp_should_run() argument
651 return st->should_run; in cpuhp_should_run()
655 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
658 * Each invocation of this function by the smpboot thread does a single AP
662 * - single: runs st->cb_state
663 * - up: runs ++st->state, while st->state < st->target
664 * - down: runs st->state--, while st->state > st->target
668 static void cpuhp_thread_fun(unsigned int cpu) in cpuhp_thread_fun() argument
671 bool bringup = st->bringup; in cpuhp_thread_fun()
674 if (WARN_ON_ONCE(!st->should_run)) in cpuhp_thread_fun()
678 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures in cpuhp_thread_fun()
679 * that if we see ->should_run we also see the rest of the state. in cpuhp_thread_fun()
691 if (st->single) { in cpuhp_thread_fun()
692 state = st->cb_state; in cpuhp_thread_fun()
693 st->should_run = false; in cpuhp_thread_fun()
696 st->state++; in cpuhp_thread_fun()
697 state = st->state; in cpuhp_thread_fun()
698 st->should_run = (st->state < st->target); in cpuhp_thread_fun()
699 WARN_ON_ONCE(st->state > st->target); in cpuhp_thread_fun()
701 state = st->state; in cpuhp_thread_fun()
702 st->state--; in cpuhp_thread_fun()
703 st->should_run = (st->state > st->target); in cpuhp_thread_fun()
704 WARN_ON_ONCE(st->state < st->target); in cpuhp_thread_fun()
712 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
718 WARN_ON_ONCE(st->result); in cpuhp_thread_fun()
720 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
723 if (st->result) { in cpuhp_thread_fun()
729 WARN_ON_ONCE(st->rollback); in cpuhp_thread_fun()
730 st->should_run = false; in cpuhp_thread_fun()
736 if (!st->should_run) in cpuhp_thread_fun()
740 /* Invoke a single callback on a remote cpu */
742 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_invoke_ap_callback() argument
745 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback()
748 if (!cpu_online(cpu)) in cpuhp_invoke_ap_callback()
761 if (!st->thread) in cpuhp_invoke_ap_callback()
762 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_invoke_ap_callback()
764 st->rollback = false; in cpuhp_invoke_ap_callback()
765 st->last = NULL; in cpuhp_invoke_ap_callback()
767 st->node = node; in cpuhp_invoke_ap_callback()
768 st->bringup = bringup; in cpuhp_invoke_ap_callback()
769 st->cb_state = state; in cpuhp_invoke_ap_callback()
770 st->single = true; in cpuhp_invoke_ap_callback()
777 if ((ret = st->result) && st->last) { in cpuhp_invoke_ap_callback()
778 st->rollback = true; in cpuhp_invoke_ap_callback()
779 st->bringup = !bringup; in cpuhp_invoke_ap_callback()
788 st->node = st->last = NULL; in cpuhp_invoke_ap_callback()
792 static int cpuhp_kick_ap_work(unsigned int cpu) in cpuhp_kick_ap_work() argument
794 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work()
795 enum cpuhp_state prev_state = st->state; in cpuhp_kick_ap_work()
804 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
805 ret = cpuhp_kick_ap(st, st->target); in cpuhp_kick_ap_work()
806 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
831 * The operation is still serialized against concurrent CPU hotplug via
832 * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
837 * This is required for subsystems which are unfixable vs. CPU hotplug and
858 * user space when a new CPU is brought up. The CPU plug uevent in cpu_up_down_serialize_trainwrecks()
860 * move tasks to the newly plugged CPU up to the point where the in cpu_up_down_serialize_trainwrecks()
861 * work has finished because up to that point the newly plugged CPU in cpu_up_down_serialize_trainwrecks()
874 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) argument
878 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
879 * @cpu: a CPU id
883 * trivial, there are various non-obvious corner cases, which this function
887 * be called only for an already offlined CPU.
889 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument
894 * This function is called after the cpu is taken down and marked in clear_tasks_mm_cpumask()
895 * offline, so its not like new tasks will ever get this cpu set in in clear_tasks_mm_cpumask()
896 * their mm mask. -- Peter Zijlstra in clear_tasks_mm_cpumask()
898 * full-fledged tasklist_lock. in clear_tasks_mm_cpumask()
900 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask()
912 arch_clear_mm_cpumask_cpu(cpu, t->mm); in clear_tasks_mm_cpumask()
918 /* Take this CPU down. */
922 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); in take_cpu_down()
923 int err, cpu = smp_processor_id(); in take_cpu_down() local
926 /* Ensure this CPU doesn't handle any more interrupts. */ in take_cpu_down()
935 WARN_ON(st->state != CPUHP_TEARDOWN_CPU); in take_cpu_down()
936 st->state--; in take_cpu_down()
938 for (; st->state > target; st->state--) { in take_cpu_down()
939 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in take_cpu_down()
948 /* Remove CPU from timer broadcasting */ in take_cpu_down()
949 tick_offline_cpu(cpu); in take_cpu_down()
951 stop_machine_park(cpu); in take_cpu_down()
955 static int takedown_cpu(unsigned int cpu) in takedown_cpu() argument
957 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu()
961 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu()
964 * Prevent irq alloc/free while the dying cpu reorganizes the in takedown_cpu()
972 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); in takedown_cpu()
974 /* CPU refused to die */ in takedown_cpu()
977 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu()
980 BUG_ON(cpu_online(cpu)); in takedown_cpu()
984 * all runnable tasks from the CPU, there's only the idle task left now in takedown_cpu()
990 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); in takedown_cpu()
992 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ in takedown_cpu()
995 hotplug_cpu__broadcast_tick_pull(cpu); in takedown_cpu()
996 /* This actually kills the CPU. */ in takedown_cpu()
997 __cpu_die(cpu); in takedown_cpu()
999 tick_cleanup_dead_cpu(cpu); in takedown_cpu()
1000 rcutree_migrate_callbacks(cpu); in takedown_cpu()
1015 BUG_ON(st->state != CPUHP_AP_OFFLINE); in cpuhp_report_idle_dead()
1017 st->state = CPUHP_AP_IDLE_DEAD; in cpuhp_report_idle_dead()
1020 * to an online cpu. in cpuhp_report_idle_dead()
1026 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) in undo_cpu_down() argument
1028 for (st->state++; st->state < st->target; st->state++) in undo_cpu_down()
1029 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in undo_cpu_down()
1032 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_down_callbacks() argument
1035 enum cpuhp_state prev_state = st->state; in cpuhp_down_callbacks()
1038 for (; st->state > target; st->state--) { in cpuhp_down_callbacks()
1039 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in cpuhp_down_callbacks()
1041 st->target = prev_state; in cpuhp_down_callbacks()
1042 if (st->state < prev_state) in cpuhp_down_callbacks()
1043 undo_cpu_down(cpu, st); in cpuhp_down_callbacks()
1051 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, in _cpu_down() argument
1054 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down()
1058 return -EBUSY; in _cpu_down()
1060 if (!cpu_present(cpu)) in _cpu_down()
1061 return -EINVAL; in _cpu_down()
1064 if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1) in _cpu_down()
1065 return -EBUSY; in _cpu_down()
1074 * If the current CPU state is in the range of the AP hotplug thread, in _cpu_down()
1077 if (st->state > CPUHP_TEARDOWN_CPU) { in _cpu_down()
1078 st->target = max((int)target, CPUHP_TEARDOWN_CPU); in _cpu_down()
1079 ret = cpuhp_kick_ap_work(cpu); in _cpu_down()
1091 if (st->state > CPUHP_TEARDOWN_CPU) in _cpu_down()
1094 st->target = target; in _cpu_down()
1100 ret = cpuhp_down_callbacks(cpu, st, target); in _cpu_down()
1101 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { in _cpu_down()
1110 * concurrent CPU hotplug via cpu_add_remove_lock. in _cpu_down()
1118 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) in cpu_down_maps_locked() argument
1121 return -EBUSY; in cpu_down_maps_locked()
1122 return _cpu_down(cpu, 0, target); in cpu_down_maps_locked()
1125 static int cpu_down(unsigned int cpu, enum cpuhp_state target) in cpu_down() argument
1130 err = cpu_down_maps_locked(cpu, target); in cpu_down()
1136 * cpu_device_down - Bring down a cpu device
1137 * @dev: Pointer to the cpu device to offline
1139 * This function is meant to be used by device core cpu subsystem only.
1145 return cpu_down(dev->id, CPUHP_OFFLINE); in cpu_device_down()
1148 int remove_cpu(unsigned int cpu) in remove_cpu() argument
1153 ret = device_offline(get_cpu_device(cpu)); in remove_cpu()
1162 unsigned int cpu; in smp_shutdown_nonboot_cpus() local
1168 * Make certain the cpu I'm about to reboot on is online. in smp_shutdown_nonboot_cpus()
1175 for_each_online_cpu(cpu) { in smp_shutdown_nonboot_cpus()
1176 if (cpu == primary_cpu) in smp_shutdown_nonboot_cpus()
1179 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in smp_shutdown_nonboot_cpus()
1181 pr_err("Failed to offline CPU%d - error=%d", in smp_shutdown_nonboot_cpus()
1182 cpu, error); in smp_shutdown_nonboot_cpus()
1188 * Ensure all but the reboot CPU are offline. in smp_shutdown_nonboot_cpus()
1207 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1208 * @cpu: cpu that just started
1210 * It must be called by the arch code on the new cpu, before the new cpu
1211 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1213 void notify_cpu_starting(unsigned int cpu) in notify_cpu_starting() argument
1215 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting()
1216 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); in notify_cpu_starting()
1219 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ in notify_cpu_starting()
1220 cpumask_set_cpu(cpu, &cpus_booted_once_mask); in notify_cpu_starting()
1221 while (st->state < target) { in notify_cpu_starting()
1222 st->state++; in notify_cpu_starting()
1223 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in notify_cpu_starting()
1233 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1240 /* Happens for the boot cpu */ in cpuhp_online_idle()
1250 st->state = CPUHP_AP_ONLINE_IDLE; in cpuhp_online_idle()
1255 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) in _cpu_up() argument
1257 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_up()
1263 if (!cpu_present(cpu)) { in _cpu_up()
1264 ret = -EINVAL; in _cpu_up()
1272 if (st->state >= target) in _cpu_up()
1275 if (st->state == CPUHP_OFFLINE) { in _cpu_up()
1276 /* Let it fail before we try to bring the cpu up */ in _cpu_up()
1277 idle = idle_thread_get(cpu); in _cpu_up()
1288 * If the current CPU state is in the range of the AP hotplug thread, in _cpu_up()
1291 if (st->state > CPUHP_BRINGUP_CPU) { in _cpu_up()
1292 ret = cpuhp_kick_ap_work(cpu); in _cpu_up()
1307 ret = cpuhp_up_callbacks(cpu, st, target); in _cpu_up()
1315 static int cpu_up(unsigned int cpu, enum cpuhp_state target) in cpu_up() argument
1319 if (!cpu_possible(cpu)) { in cpu_up()
1320 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", in cpu_up()
1321 cpu); in cpu_up()
1325 return -EINVAL; in cpu_up()
1328 err = try_online_node(cpu_to_node(cpu)); in cpu_up()
1335 err = -EBUSY; in cpu_up()
1338 if (!cpu_smt_allowed(cpu)) { in cpu_up()
1339 err = -EPERM; in cpu_up()
1343 err = _cpu_up(cpu, 0, target); in cpu_up()
1350 * cpu_device_up - Bring up a cpu device
1351 * @dev: Pointer to the cpu device to online
1353 * This function is meant to be used by device core cpu subsystem only.
1359 return cpu_up(dev->id, CPUHP_ONLINE); in cpu_device_up()
1362 int add_cpu(unsigned int cpu) in add_cpu() argument
1367 ret = device_online(get_cpu_device(cpu)); in add_cpu()
1375 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1376 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1378 * On some architectures like arm64, we can hibernate on any CPU, but on
1379 * wake up the CPU we hibernated on might be offline as a side effect of
1387 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); in bringup_hibernate_cpu()
1390 pr_err("Failed to bring hibernate-CPU up!\n"); in bringup_hibernate_cpu()
1399 unsigned int cpu; in bringup_nonboot_cpus() local
1401 for_each_present_cpu(cpu) { in bringup_nonboot_cpus()
1404 if (!cpu_online(cpu)) in bringup_nonboot_cpus()
1405 cpu_up(cpu, CPUHP_ONLINE); in bringup_nonboot_cpus()
1414 int cpu, error = 0; in freeze_secondary_cpus() local
1417 if (primary == -1) { in freeze_secondary_cpus()
1427 * We take down all of the non-boot CPUs in one shot to avoid races in freeze_secondary_cpus()
1428 * with the userspace trying to use the CPU hotplug at the same time in freeze_secondary_cpus()
1432 pr_info("Disabling non-boot CPUs ...\n"); in freeze_secondary_cpus()
1433 for_each_online_cpu(cpu) { in freeze_secondary_cpus()
1434 if (cpu == primary) in freeze_secondary_cpus()
1438 pr_info("Wakeup pending. Abort CPU freeze\n"); in freeze_secondary_cpus()
1439 error = -EBUSY; in freeze_secondary_cpus()
1443 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); in freeze_secondary_cpus()
1444 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); in freeze_secondary_cpus()
1445 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); in freeze_secondary_cpus()
1447 cpumask_set_cpu(cpu, frozen_cpus); in freeze_secondary_cpus()
1449 pr_err("Error taking CPU%d down: %d\n", cpu, error); in freeze_secondary_cpus()
1457 pr_err("Non-boot CPUs are not disabled\n"); in freeze_secondary_cpus()
1480 int cpu, error; in thaw_secondary_cpus() local
1482 /* Allow everyone to use the CPU hotplug again */ in thaw_secondary_cpus()
1488 pr_info("Enabling non-boot CPUs ...\n"); in thaw_secondary_cpus()
1492 for_each_cpu(cpu, frozen_cpus) { in thaw_secondary_cpus()
1493 trace_suspend_resume(TPS("CPU_ON"), cpu, true); in thaw_secondary_cpus()
1494 error = _cpu_up(cpu, 1, CPUHP_ONLINE); in thaw_secondary_cpus()
1495 trace_suspend_resume(TPS("CPU_ON"), cpu, false); in thaw_secondary_cpus()
1497 pr_info("CPU%d is up\n", cpu); in thaw_secondary_cpus()
1500 pr_warn("Error taking CPU%d up: %d\n", cpu, error); in thaw_secondary_cpus()
1513 return -ENOMEM; in alloc_frozen_cpus()
1519 * When callbacks for CPU hotplug notifications are being executed, we must
1523 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1525 * This synchronization is implemented by mutually excluding regular CPU
1558 * to disable cpu hotplug to avoid cpu hotplug race. in cpu_hotplug_pm_sync_init()
1575 .startup.single = NULL,
1576 .teardown.single = NULL,
1581 .startup.single = smpboot_create_threads,
1582 .teardown.single = NULL,
1587 .startup.single = perf_event_init_cpu,
1588 .teardown.single = perf_event_exit_cpu,
1592 .startup.single = random_prepare_cpu,
1593 .teardown.single = NULL,
1597 .startup.single = workqueue_prepare_cpu,
1598 .teardown.single = NULL,
1602 .startup.single = hrtimers_prepare_cpu,
1603 .teardown.single = hrtimers_dead_cpu,
1607 .startup.single = smpcfd_prepare_cpu,
1608 .teardown.single = smpcfd_dead_cpu,
1612 .startup.single = relay_prepare_cpu,
1613 .teardown.single = NULL,
1617 .startup.single = slab_prepare_cpu,
1618 .teardown.single = slab_dead_cpu,
1622 .startup.single = rcutree_prepare_cpu,
1623 .teardown.single = rcutree_dead_cpu,
1626 * On the tear-down path, timers_dead_cpu() must be invoked
1632 .startup.single = timers_prepare_cpu,
1633 .teardown.single = timers_dead_cpu,
1635 /* Kicks the plugged cpu into life */
1637 .name = "cpu:bringup",
1638 .startup.single = bringup_cpu,
1639 .teardown.single = finish_cpu,
1642 /* Final state before CPU kills itself */
1647 * Last state before CPU enters the idle loop to die. Transient state
1657 .startup.single = sched_cpu_starting,
1658 .teardown.single = sched_cpu_dying,
1662 .startup.single = NULL,
1663 .teardown.single = rcutree_dying_cpu,
1667 .startup.single = NULL,
1668 .teardown.single = smpcfd_dying_cpu,
1680 .name = "cpu:teardown",
1681 .startup.single = NULL,
1682 .teardown.single = takedown_cpu,
1688 .startup.single = smpboot_unpark_threads,
1689 .teardown.single = smpboot_park_threads,
1693 .startup.single = irq_affinity_online_cpu,
1694 .teardown.single = NULL,
1698 .startup.single = perf_event_init_cpu,
1699 .teardown.single = perf_event_exit_cpu,
1703 .startup.single = lockup_detector_online_cpu,
1704 .teardown.single = lockup_detector_offline_cpu,
1708 .startup.single = workqueue_online_cpu,
1709 .teardown.single = workqueue_offline_cpu,
1713 .startup.single = random_online_cpu,
1714 .teardown.single = NULL,
1718 .startup.single = rcutree_online_cpu,
1719 .teardown.single = rcutree_offline_cpu,
1727 /* Last state is scheduler control setting the cpu active */
1730 .startup.single = sched_cpu_activate,
1731 .teardown.single = sched_cpu_deactivate,
1735 /* CPU is fully up and running. */
1738 .startup.single = NULL,
1739 .teardown.single = NULL,
1747 return -EINVAL; in cpuhp_cb_check()
1771 return -EINVAL; in cpuhp_reserve_state()
1775 if (!step->name) in cpuhp_reserve_state()
1778 WARN(1, "No more dynamic states available for CPU hotplug\n"); in cpuhp_reserve_state()
1779 return -ENOSPC; in cpuhp_reserve_state()
1783 int (*startup)(unsigned int cpu), in cpuhp_store_callbacks() argument
1784 int (*teardown)(unsigned int cpu), in cpuhp_store_callbacks() argument
1787 /* (Un)Install the callbacks for further cpu hotplug operations */ in cpuhp_store_callbacks()
1808 if (name && sp->name) in cpuhp_store_callbacks()
1809 return -EBUSY; in cpuhp_store_callbacks()
1811 sp->startup.single = startup; in cpuhp_store_callbacks()
1812 sp->teardown.single = teardown; in cpuhp_store_callbacks()
1813 sp->name = name; in cpuhp_store_callbacks()
1814 sp->multi_instance = multi_instance; in cpuhp_store_callbacks()
1815 INIT_HLIST_HEAD(&sp->list); in cpuhp_store_callbacks()
1821 return cpuhp_get_step(state)->teardown.single; in cpuhp_get_teardown_cb()
1826 * on the current CPU.
1828 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_issue_call() argument
1838 if ((bringup && !sp->startup.single) || in cpuhp_issue_call()
1839 (!bringup && !sp->teardown.single)) in cpuhp_issue_call()
1847 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); in cpuhp_issue_call()
1849 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
1851 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
1865 int cpu; in cpuhp_rollback_install() local
1868 for_each_present_cpu(cpu) { in cpuhp_rollback_install()
1869 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_rollback_install()
1870 int cpustate = st->state; in cpuhp_rollback_install()
1872 if (cpu >= failedcpu) in cpuhp_rollback_install()
1875 /* Did we invoke the startup call on that cpu ? */ in cpuhp_rollback_install()
1877 cpuhp_issue_call(cpu, state, false, node); in cpuhp_rollback_install()
1886 int cpu; in __cpuhp_state_add_instance_cpuslocked() local
1892 if (sp->multi_instance == false) in __cpuhp_state_add_instance_cpuslocked()
1893 return -EINVAL; in __cpuhp_state_add_instance_cpuslocked()
1897 if (!invoke || !sp->startup.multi) in __cpuhp_state_add_instance_cpuslocked()
1901 * Try to call the startup callback for each present cpu in __cpuhp_state_add_instance_cpuslocked()
1902 * depending on the hotplug state of the cpu. in __cpuhp_state_add_instance_cpuslocked()
1904 for_each_present_cpu(cpu) { in __cpuhp_state_add_instance_cpuslocked()
1905 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_add_instance_cpuslocked()
1906 int cpustate = st->state; in __cpuhp_state_add_instance_cpuslocked()
1911 ret = cpuhp_issue_call(cpu, state, true, node); in __cpuhp_state_add_instance_cpuslocked()
1913 if (sp->teardown.multi) in __cpuhp_state_add_instance_cpuslocked()
1914 cpuhp_rollback_install(cpu, state, node); in __cpuhp_state_add_instance_cpuslocked()
1920 hlist_add_head(node, &sp->list); in __cpuhp_state_add_instance_cpuslocked()
1939 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1942 * cpu state >= @state
1957 int (*startup)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
1958 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
1961 int cpu, ret = 0; in __cpuhp_setup_state_cpuslocked() local
1967 return -EINVAL; in __cpuhp_setup_state_cpuslocked()
1984 * Try to call the startup callback for each present cpu in __cpuhp_setup_state_cpuslocked()
1985 * depending on the hotplug state of the cpu. in __cpuhp_setup_state_cpuslocked()
1987 for_each_present_cpu(cpu) { in __cpuhp_setup_state_cpuslocked()
1988 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_setup_state_cpuslocked()
1989 int cpustate = st->state; in __cpuhp_setup_state_cpuslocked()
1994 ret = cpuhp_issue_call(cpu, state, true, NULL); in __cpuhp_setup_state_cpuslocked()
1997 cpuhp_rollback_install(cpu, state, NULL); in __cpuhp_setup_state_cpuslocked()
2016 int (*startup)(unsigned int cpu), in __cpuhp_setup_state() argument
2017 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state() argument
2034 int cpu; in __cpuhp_state_remove_instance() local
2038 if (!sp->multi_instance) in __cpuhp_state_remove_instance()
2039 return -EINVAL; in __cpuhp_state_remove_instance()
2047 * Call the teardown callback for each present cpu depending in __cpuhp_state_remove_instance()
2048 * on the hotplug state of the cpu. This function is not in __cpuhp_state_remove_instance()
2051 for_each_present_cpu(cpu) { in __cpuhp_state_remove_instance()
2052 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_remove_instance()
2053 int cpustate = st->state; in __cpuhp_state_remove_instance()
2056 cpuhp_issue_call(cpu, state, false, node); in __cpuhp_state_remove_instance()
2069 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2072 * cpu state >= @state
2081 int cpu; in __cpuhp_remove_state_cpuslocked() local
2088 if (sp->multi_instance) { in __cpuhp_remove_state_cpuslocked()
2089 WARN(!hlist_empty(&sp->list), in __cpuhp_remove_state_cpuslocked()
2099 * Call the teardown callback for each present cpu depending in __cpuhp_remove_state_cpuslocked()
2100 * on the hotplug state of the cpu. This function is not in __cpuhp_remove_state_cpuslocked()
2103 for_each_present_cpu(cpu) { in __cpuhp_remove_state_cpuslocked()
2104 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_remove_state_cpuslocked()
2105 int cpustate = st->state; in __cpuhp_remove_state_cpuslocked()
2108 cpuhp_issue_call(cpu, state, false, NULL); in __cpuhp_remove_state_cpuslocked()
2125 static void cpuhp_offline_cpu_device(unsigned int cpu) in cpuhp_offline_cpu_device() argument
2127 struct device *dev = get_cpu_device(cpu); in cpuhp_offline_cpu_device()
2129 dev->offline = true; in cpuhp_offline_cpu_device()
2131 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); in cpuhp_offline_cpu_device()
2134 static void cpuhp_online_cpu_device(unsigned int cpu) in cpuhp_online_cpu_device() argument
2136 struct device *dev = get_cpu_device(cpu); in cpuhp_online_cpu_device()
2138 dev->offline = false; in cpuhp_online_cpu_device()
2140 kobject_uevent(&dev->kobj, KOBJ_ONLINE); in cpuhp_online_cpu_device()
2145 int cpu, ret = 0; in cpuhp_smt_disable() local
2148 for_each_online_cpu(cpu) { in cpuhp_smt_disable()
2149 if (topology_is_primary_thread(cpu)) in cpuhp_smt_disable()
2151 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in cpuhp_smt_disable()
2155 * As this needs to hold the cpu maps lock it's impossible in cpuhp_smt_disable()
2157 * cpu_down() which takes cpu maps lock. cpu maps lock in cpuhp_smt_disable()
2167 cpuhp_offline_cpu_device(cpu); in cpuhp_smt_disable()
2177 int cpu, ret = 0; in cpuhp_smt_enable() local
2181 for_each_present_cpu(cpu) { in cpuhp_smt_enable()
2183 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) in cpuhp_smt_enable()
2185 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); in cpuhp_smt_enable()
2189 cpuhp_online_cpu_device(cpu); in cpuhp_smt_enable()
2200 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in show_cpuhp_state()
2202 return sprintf(buf, "%d\n", st->state); in show_cpuhp_state()
2210 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in write_cpuhp_target()
2220 return -EINVAL; in write_cpuhp_target()
2223 return -EINVAL; in write_cpuhp_target()
2232 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; in write_cpuhp_target()
2237 if (st->state < target) in write_cpuhp_target()
2238 ret = cpu_up(dev->id, target); in write_cpuhp_target()
2239 else if (st->state > target) in write_cpuhp_target()
2240 ret = cpu_down(dev->id, target); in write_cpuhp_target()
2241 else if (WARN_ON(st->target != target)) in write_cpuhp_target()
2242 st->target = target; in write_cpuhp_target()
2251 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in show_cpuhp_target()
2253 return sprintf(buf, "%d\n", st->target); in show_cpuhp_target()
2262 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in write_cpuhp_fail()
2271 return -EINVAL; in write_cpuhp_fail()
2277 return -EINVAL; in write_cpuhp_fail()
2284 if (!sp->startup.single && !sp->teardown.single) in write_cpuhp_fail()
2285 ret = -EINVAL; in write_cpuhp_fail()
2290 st->fail = fail; in write_cpuhp_fail()
2298 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in show_cpuhp_fail()
2300 return sprintf(buf, "%d\n", st->fail); in show_cpuhp_fail()
2328 if (sp->name) { in show_cpuhp_states()
2329 cur = sprintf(buf, "%3d: %s\n", i, sp->name); in show_cpuhp_states()
2365 return -EINVAL; in __store_smt_control()
2368 return -EPERM; in __store_smt_control()
2371 return -ENODEV; in __store_smt_control()
2398 return -ENODEV; in __store_smt_control()
2415 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); in show_smt_control()
2429 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); in show_smt_active()
2447 return sysfs_create_group(&cpu_subsys.dev_root->kobj, in cpu_smt_sysfs_init()
2453 int cpu, ret; in cpuhp_sysfs_init() local
2459 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, in cpuhp_sysfs_init()
2464 for_each_possible_cpu(cpu) { in cpuhp_sysfs_init()
2465 struct device *dev = get_cpu_device(cpu); in cpuhp_sysfs_init()
2469 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); in cpuhp_sysfs_init()
2482 * It is used by cpumask_of() to get a constant address to a CPU
2483 * mask value that has a single bit set only.
2486 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2553 void set_cpu_online(unsigned int cpu, bool online) in set_cpu_online() argument
2559 * regular CPU hotplug is properly serialized. in set_cpu_online()
2566 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2569 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2579 int cpu = smp_processor_id(); in boot_cpu_init() local
2581 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ in boot_cpu_init()
2582 set_cpu_online(cpu, true); in boot_cpu_init()
2583 set_cpu_active(cpu, true); in boot_cpu_init()
2584 set_cpu_present(cpu, true); in boot_cpu_init()
2585 set_cpu_possible(cpu, true); in boot_cpu_init()
2588 __boot_cpu_id = cpu; in boot_cpu_init()
2605 * optional CPU mitigations.