• Home
  • Raw
  • Download

Lines Matching refs:cpu

126 		int		(*single)(unsigned int cpu);
127 int (*multi)(unsigned int cpu,
131 int (*single)(unsigned int cpu);
132 int (*multi)(unsigned int cpu,
167 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
171 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
173 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
174 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback()
191 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
192 ret = cb(cpu); in cpuhp_invoke_callback()
193 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
201 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
202 ret = cbm(cpu, node); in cpuhp_invoke_callback()
203 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
213 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
214 ret = cbm(cpu, node); in cpuhp_invoke_callback()
215 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
238 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
239 ret = cbm(cpu, node); in cpuhp_invoke_callback()
240 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
454 static inline bool cpu_smt_thread_allowed(unsigned int cpu) in cpu_smt_thread_allowed() argument
457 return topology_smt_thread_allowed(cpu); in cpu_smt_thread_allowed()
463 static inline bool cpu_bootable(unsigned int cpu) in cpu_bootable() argument
465 if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) in cpu_bootable()
476 if (topology_is_primary_thread(cpu)) in cpu_bootable()
485 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); in cpu_bootable()
496 static inline bool cpu_bootable(unsigned int cpu) { return true; } in cpu_bootable() argument
500 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) in cpuhp_set_state() argument
511 if (cpu_dying(cpu) != !bringup) in cpuhp_set_state()
512 set_cpu_dying(cpu, !bringup); in cpuhp_set_state()
518 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st, in cpuhp_reset_state() argument
546 if (cpu_dying(cpu) != !bringup) in cpuhp_reset_state()
547 set_cpu_dying(cpu, !bringup); in cpuhp_reset_state()
567 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, in cpuhp_kick_ap() argument
573 prev_state = cpuhp_set_state(cpu, st, target); in cpuhp_kick_ap()
576 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_kick_ap()
583 static int bringup_wait_for_ap(unsigned int cpu) in bringup_wait_for_ap() argument
585 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap()
589 if (WARN_ON_ONCE((!cpu_online(cpu)))) in bringup_wait_for_ap()
602 if (!cpu_bootable(cpu)) in bringup_wait_for_ap()
608 return cpuhp_kick_ap(cpu, st, st->target); in bringup_wait_for_ap()
611 static int bringup_cpu(unsigned int cpu) in bringup_cpu() argument
613 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu()
630 ret = __cpu_up(cpu, idle); in bringup_cpu()
634 return bringup_wait_for_ap(cpu); in bringup_cpu()
637 static int finish_cpu(unsigned int cpu) in finish_cpu() argument
639 struct task_struct *idle = idle_thread_get(cpu); in finish_cpu()
689 unsigned int cpu, in __cpuhp_invoke_callback_range() argument
700 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); in __cpuhp_invoke_callback_range()
706 cpu, bringup ? "UP" : "DOWN", in __cpuhp_invoke_callback_range()
720 unsigned int cpu, in cpuhp_invoke_callback_range() argument
724 return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); in cpuhp_invoke_callback_range()
728 unsigned int cpu, in cpuhp_invoke_callback_range_nofail() argument
732 __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); in cpuhp_invoke_callback_range_nofail()
749 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_up_callbacks() argument
755 ret = cpuhp_invoke_callback_range(true, cpu, st, target); in cpuhp_up_callbacks()
758 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_up_callbacks()
761 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_up_callbacks()
763 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, in cpuhp_up_callbacks()
772 static int cpuhp_should_run(unsigned int cpu) in cpuhp_should_run() argument
793 static void cpuhp_thread_fun(unsigned int cpu) in cpuhp_thread_fun() argument
829 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
837 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
860 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_invoke_ap_callback() argument
863 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback()
866 if (!cpu_online(cpu)) in cpuhp_invoke_ap_callback()
880 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_invoke_ap_callback()
910 static int cpuhp_kick_ap_work(unsigned int cpu) in cpuhp_kick_ap_work() argument
912 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work()
922 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
923 ret = cpuhp_kick_ap(cpu, st, st->target); in cpuhp_kick_ap_work()
924 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
940 int cpu; in cpuhp_init_state() local
942 for_each_possible_cpu(cpu) { in cpuhp_init_state()
943 st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_init_state()
1004 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) argument
1019 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument
1030 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask()
1042 arch_clear_mm_cpumask_cpu(cpu, t->mm); in clear_tasks_mm_cpumask()
1053 int err, cpu = smp_processor_id(); in take_cpu_down() local
1069 cpuhp_invoke_callback_range_nofail(false, cpu, st, target); in take_cpu_down()
1074 tick_offline_cpu(cpu); in take_cpu_down()
1076 stop_machine_park(cpu); in take_cpu_down()
1080 static int takedown_cpu(unsigned int cpu) in takedown_cpu() argument
1082 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu()
1097 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); in takedown_cpu()
1105 BUG_ON(cpu_online(cpu)); in takedown_cpu()
1120 hotplug_cpu__broadcast_tick_pull(cpu); in takedown_cpu()
1122 __cpu_die(cpu); in takedown_cpu()
1124 tick_cleanup_dead_cpu(cpu); in takedown_cpu()
1125 rcutree_migrate_callbacks(cpu); in takedown_cpu()
1151 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_down_callbacks() argument
1157 ret = cpuhp_invoke_callback_range(false, cpu, st, target); in cpuhp_down_callbacks()
1160 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_down_callbacks()
1163 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_down_callbacks()
1166 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st, in cpuhp_down_callbacks()
1174 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, in _cpu_down() argument
1177 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down()
1183 if (!cpu_present(cpu)) in _cpu_down()
1190 prev_state = cpuhp_set_state(cpu, st, target); in _cpu_down()
1197 ret = cpuhp_kick_ap_work(cpu); in _cpu_down()
1218 ret = cpuhp_down_callbacks(cpu, st, target); in _cpu_down()
1221 cpuhp_reset_state(cpu, st, prev_state); in _cpu_down()
1224 WARN(1, "DEAD callback error for CPU%d", cpu); in _cpu_down()
1241 unsigned int cpu; member
1249 return _cpu_down(work->cpu, 0, work->target); in __cpu_down_maps_locked()
1252 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) in cpu_down_maps_locked() argument
1254 struct cpu_down_work work = { .cpu = cpu, .target = target, }; in cpu_down_maps_locked()
1271 for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) { in cpu_down_maps_locked()
1272 if (cpu != work.cpu) in cpu_down_maps_locked()
1273 return work_on_cpu(cpu, __cpu_down_maps_locked, &work); in cpu_down_maps_locked()
1278 static int cpu_down(unsigned int cpu, enum cpuhp_state target) in cpu_down() argument
1283 err = cpu_down_maps_locked(cpu, target); in cpu_down()
1303 int remove_cpu(unsigned int cpu) in remove_cpu() argument
1308 ret = device_offline(get_cpu_device(cpu)); in remove_cpu()
1317 unsigned int cpu; in smp_shutdown_nonboot_cpus() local
1330 for_each_online_cpu(cpu) { in smp_shutdown_nonboot_cpus()
1331 if (cpu == primary_cpu) in smp_shutdown_nonboot_cpus()
1334 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in smp_shutdown_nonboot_cpus()
1337 cpu, error); in smp_shutdown_nonboot_cpus()
1368 void notify_cpu_starting(unsigned int cpu) in notify_cpu_starting() argument
1370 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting()
1373 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ in notify_cpu_starting()
1374 cpumask_set_cpu(cpu, &cpus_booted_once_mask); in notify_cpu_starting()
1379 cpuhp_invoke_callback_range_nofail(true, cpu, st, target); in notify_cpu_starting()
1406 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) in _cpu_up() argument
1408 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_up()
1414 if (!cpu_present(cpu)) { in _cpu_up()
1428 idle = idle_thread_get(cpu); in _cpu_up()
1437 cpuhp_set_state(cpu, st, target); in _cpu_up()
1443 ret = cpuhp_kick_ap_work(cpu); in _cpu_up()
1458 ret = cpuhp_up_callbacks(cpu, st, target); in _cpu_up()
1466 static int cpu_up(unsigned int cpu, enum cpuhp_state target) in cpu_up() argument
1470 if (!cpu_possible(cpu)) { in cpu_up()
1472 cpu); in cpu_up()
1479 err = try_online_node(cpu_to_node(cpu)); in cpu_up()
1489 if (!cpu_bootable(cpu)) { in cpu_up()
1494 err = _cpu_up(cpu, 0, target); in cpu_up()
1515 int add_cpu(unsigned int cpu) in add_cpu() argument
1520 ret = device_online(get_cpu_device(cpu)); in add_cpu()
1554 unsigned int cpu; in bringup_nonboot_cpus() local
1556 for_each_present_cpu(cpu) { in bringup_nonboot_cpus()
1559 if (!cpu_online(cpu)) in bringup_nonboot_cpus()
1560 cpu_up(cpu, CPUHP_ONLINE); in bringup_nonboot_cpus()
1569 int cpu, error = 0; in freeze_secondary_cpus() local
1588 for_each_online_cpu(cpu) { in freeze_secondary_cpus()
1589 if (cpu == primary) in freeze_secondary_cpus()
1598 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); in freeze_secondary_cpus()
1599 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); in freeze_secondary_cpus()
1600 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); in freeze_secondary_cpus()
1602 cpumask_set_cpu(cpu, frozen_cpus); in freeze_secondary_cpus()
1604 pr_err("Error taking CPU%d down: %d\n", cpu, error); in freeze_secondary_cpus()
1635 int cpu, error; in thaw_secondary_cpus() local
1647 for_each_cpu(cpu, frozen_cpus) { in thaw_secondary_cpus()
1648 trace_suspend_resume(TPS("CPU_ON"), cpu, true); in thaw_secondary_cpus()
1649 error = _cpu_up(cpu, 1, CPUHP_ONLINE); in thaw_secondary_cpus()
1650 trace_suspend_resume(TPS("CPU_ON"), cpu, false); in thaw_secondary_cpus()
1652 pr_info("CPU%d is up\n", cpu); in thaw_secondary_cpus()
1655 pr_warn("Error taking CPU%d up: %d\n", cpu, error); in thaw_secondary_cpus()
1945 int (*startup)(unsigned int cpu), in cpuhp_store_callbacks() argument
1946 int (*teardown)(unsigned int cpu), in cpuhp_store_callbacks() argument
1990 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_issue_call() argument
2008 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); in cpuhp_issue_call()
2010 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
2012 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
2026 int cpu; in cpuhp_rollback_install() local
2029 for_each_present_cpu(cpu) { in cpuhp_rollback_install()
2030 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_rollback_install()
2033 if (cpu >= failedcpu) in cpuhp_rollback_install()
2038 cpuhp_issue_call(cpu, state, false, node); in cpuhp_rollback_install()
2047 int cpu; in __cpuhp_state_add_instance_cpuslocked() local
2065 for_each_present_cpu(cpu) { in __cpuhp_state_add_instance_cpuslocked()
2066 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_add_instance_cpuslocked()
2072 ret = cpuhp_issue_call(cpu, state, true, node); in __cpuhp_state_add_instance_cpuslocked()
2075 cpuhp_rollback_install(cpu, state, node); in __cpuhp_state_add_instance_cpuslocked()
2119 int (*startup)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
2120 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
2123 int cpu, ret = 0; in __cpuhp_setup_state_cpuslocked() local
2149 for_each_present_cpu(cpu) { in __cpuhp_setup_state_cpuslocked()
2150 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_setup_state_cpuslocked()
2156 ret = cpuhp_issue_call(cpu, state, true, NULL); in __cpuhp_setup_state_cpuslocked()
2159 cpuhp_rollback_install(cpu, state, NULL); in __cpuhp_setup_state_cpuslocked()
2178 int (*startup)(unsigned int cpu), in __cpuhp_setup_state() argument
2179 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state() argument
2196 int cpu; in __cpuhp_state_remove_instance() local
2213 for_each_present_cpu(cpu) { in __cpuhp_state_remove_instance()
2214 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_remove_instance()
2218 cpuhp_issue_call(cpu, state, false, node); in __cpuhp_state_remove_instance()
2243 int cpu; in __cpuhp_remove_state_cpuslocked() local
2265 for_each_present_cpu(cpu) { in __cpuhp_remove_state_cpuslocked()
2266 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_remove_state_cpuslocked()
2270 cpuhp_issue_call(cpu, state, false, NULL); in __cpuhp_remove_state_cpuslocked()
2287 static void cpuhp_offline_cpu_device(unsigned int cpu) in cpuhp_offline_cpu_device() argument
2289 struct device *dev = get_cpu_device(cpu); in cpuhp_offline_cpu_device()
2296 static void cpuhp_online_cpu_device(unsigned int cpu) in cpuhp_online_cpu_device() argument
2298 struct device *dev = get_cpu_device(cpu); in cpuhp_online_cpu_device()
2307 int cpu, ret = 0; in cpuhp_smt_disable() local
2310 for_each_online_cpu(cpu) { in cpuhp_smt_disable()
2311 if (topology_is_primary_thread(cpu)) in cpuhp_smt_disable()
2317 if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) in cpuhp_smt_disable()
2319 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in cpuhp_smt_disable()
2335 cpuhp_offline_cpu_device(cpu); in cpuhp_smt_disable()
2345 int cpu, ret = 0; in cpuhp_smt_enable() local
2349 for_each_present_cpu(cpu) { in cpuhp_smt_enable()
2351 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) in cpuhp_smt_enable()
2353 if (!cpu_smt_thread_allowed(cpu)) in cpuhp_smt_enable()
2355 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); in cpuhp_smt_enable()
2359 cpuhp_online_cpu_device(cpu); in cpuhp_smt_enable()
2633 int cpu, ret; in cpuhp_sysfs_init() local
2644 for_each_possible_cpu(cpu) { in cpuhp_sysfs_init()
2645 struct device *dev = get_cpu_device(cpu); in cpuhp_sysfs_init()
2724 void set_cpu_online(unsigned int cpu, bool online) in set_cpu_online() argument
2737 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2740 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2750 int cpu = smp_processor_id(); in boot_cpu_init() local
2753 set_cpu_online(cpu, true); in boot_cpu_init()
2754 set_cpu_active(cpu, true); in boot_cpu_init()
2755 set_cpu_present(cpu, true); in boot_cpu_init()
2756 set_cpu_possible(cpu, true); in boot_cpu_init()
2759 __boot_cpu_id = cpu; in boot_cpu_init()