• Home
  • Raw
  • Download

Lines Matching refs:cpu

115 		int		(*single)(unsigned int cpu);
116 int (*multi)(unsigned int cpu,
120 int (*single)(unsigned int cpu);
121 int (*multi)(unsigned int cpu,
147 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
151 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
153 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
154 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback()
171 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
172 ret = cb(cpu); in cpuhp_invoke_callback()
173 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
184 ret = cbm(cpu, node); in cpuhp_invoke_callback()
185 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
196 ret = cbm(cpu, node); in cpuhp_invoke_callback()
197 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
220 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
221 ret = cbm(cpu, node); in cpuhp_invoke_callback()
222 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
424 static inline bool cpu_smt_allowed(unsigned int cpu) in cpu_smt_allowed() argument
429 if (topology_is_primary_thread(cpu)) in cpu_smt_allowed()
438 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); in cpu_smt_allowed()
449 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } in cpu_smt_allowed() argument
519 static int bringup_wait_for_ap(unsigned int cpu) in bringup_wait_for_ap() argument
521 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap()
525 if (WARN_ON_ONCE((!cpu_online(cpu)))) in bringup_wait_for_ap()
529 stop_machine_unpark(cpu); in bringup_wait_for_ap()
539 if (!cpu_smt_allowed(cpu)) in bringup_wait_for_ap()
548 static int bringup_cpu(unsigned int cpu) in bringup_cpu() argument
550 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu()
561 ret = __cpu_up(cpu, idle); in bringup_cpu()
565 return bringup_wait_for_ap(cpu); in bringup_cpu()
572 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) in undo_cpu_up() argument
575 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in undo_cpu_up()
592 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_up_callbacks() argument
600 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in cpuhp_up_callbacks()
604 undo_cpu_up(cpu, st); in cpuhp_up_callbacks()
615 static void cpuhp_create(unsigned int cpu) in cpuhp_create() argument
617 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_create()
623 static int cpuhp_should_run(unsigned int cpu) in cpuhp_should_run() argument
644 static void cpuhp_thread_fun(unsigned int cpu) in cpuhp_thread_fun() argument
688 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
696 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
718 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_invoke_ap_callback() argument
721 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback()
724 if (!cpu_online(cpu)) in cpuhp_invoke_ap_callback()
738 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_invoke_ap_callback()
768 static int cpuhp_kick_ap_work(unsigned int cpu) in cpuhp_kick_ap_work() argument
770 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work()
780 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
782 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
815 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument
826 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask()
838 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); in clear_tasks_mm_cpumask()
849 int err, cpu = smp_processor_id(); in take_cpu_down() local
865 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in take_cpu_down()
875 tick_offline_cpu(cpu); in take_cpu_down()
877 stop_machine_park(cpu); in take_cpu_down()
881 static int takedown_cpu(unsigned int cpu) in takedown_cpu() argument
883 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu()
887 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu()
898 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); in takedown_cpu()
903 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu()
906 BUG_ON(cpu_online(cpu)); in takedown_cpu()
921 hotplug_cpu__broadcast_tick_pull(cpu); in takedown_cpu()
923 __cpu_die(cpu); in takedown_cpu()
925 tick_cleanup_dead_cpu(cpu); in takedown_cpu()
926 rcutree_migrate_callbacks(cpu); in takedown_cpu()
952 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) in undo_cpu_down() argument
955 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in undo_cpu_down()
958 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_down_callbacks() argument
965 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); in cpuhp_down_callbacks()
969 undo_cpu_down(cpu, st); in cpuhp_down_callbacks()
977 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, in _cpu_down() argument
980 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down()
986 if (!cpu_present(cpu)) in _cpu_down()
1000 ret = cpuhp_kick_ap_work(cpu); in _cpu_down()
1021 ret = cpuhp_down_callbacks(cpu, st, target); in _cpu_down()
1038 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) in cpu_down_maps_locked() argument
1042 return _cpu_down(cpu, 0, target); in cpu_down_maps_locked()
1045 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) in do_cpu_down() argument
1050 err = cpu_down_maps_locked(cpu, target); in do_cpu_down()
1055 int cpu_down(unsigned int cpu) in cpu_down() argument
1057 return do_cpu_down(cpu, CPUHP_OFFLINE); in cpu_down()
1072 void notify_cpu_starting(unsigned int cpu) in notify_cpu_starting() argument
1074 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting()
1078 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ in notify_cpu_starting()
1079 cpumask_set_cpu(cpu, &cpus_booted_once_mask); in notify_cpu_starting()
1082 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); in notify_cpu_starting()
1108 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) in _cpu_up() argument
1110 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_up()
1116 if (!cpu_present(cpu)) { in _cpu_up()
1130 idle = idle_thread_get(cpu); in _cpu_up()
1145 ret = cpuhp_kick_ap_work(cpu); in _cpu_up()
1160 ret = cpuhp_up_callbacks(cpu, st, target); in _cpu_up()
1167 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) in do_cpu_up() argument
1171 if (!cpu_possible(cpu)) { in do_cpu_up()
1173 cpu); in do_cpu_up()
1180 err = try_online_node(cpu_to_node(cpu)); in do_cpu_up()
1190 if (!cpu_smt_allowed(cpu)) { in do_cpu_up()
1195 err = _cpu_up(cpu, 0, target); in do_cpu_up()
1201 int cpu_up(unsigned int cpu) in cpu_up() argument
1203 return do_cpu_up(cpu, CPUHP_ONLINE); in cpu_up()
1212 int cpu, error = 0; in freeze_secondary_cpus() local
1231 for_each_online_cpu(cpu) { in freeze_secondary_cpus()
1232 if (cpu == primary) in freeze_secondary_cpus()
1241 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); in freeze_secondary_cpus()
1242 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); in freeze_secondary_cpus()
1243 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); in freeze_secondary_cpus()
1245 cpumask_set_cpu(cpu, frozen_cpus); in freeze_secondary_cpus()
1247 pr_err("Error taking CPU%d down: %d\n", cpu, error); in freeze_secondary_cpus()
1278 int cpu, error; in enable_nonboot_cpus() local
1291 for_each_cpu(cpu, frozen_cpus) { in enable_nonboot_cpus()
1292 trace_suspend_resume(TPS("CPU_ON"), cpu, true); in enable_nonboot_cpus()
1293 error = _cpu_up(cpu, 1, CPUHP_ONLINE); in enable_nonboot_cpus()
1294 trace_suspend_resume(TPS("CPU_ON"), cpu, false); in enable_nonboot_cpus()
1296 pr_info("CPU%d is up\n", cpu); in enable_nonboot_cpus()
1297 cpu_device = get_cpu_device(cpu); in enable_nonboot_cpus()
1300 __func__, cpu); in enable_nonboot_cpus()
1305 pr_warn("Error taking CPU%d up: %d\n", cpu, error); in enable_nonboot_cpus()
1578 int (*startup)(unsigned int cpu), in cpuhp_store_callbacks() argument
1579 int (*teardown)(unsigned int cpu), in cpuhp_store_callbacks() argument
1623 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_issue_call() argument
1642 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); in cpuhp_issue_call()
1644 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
1646 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
1660 int cpu; in cpuhp_rollback_install() local
1663 for_each_present_cpu(cpu) { in cpuhp_rollback_install()
1664 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_rollback_install()
1667 if (cpu >= failedcpu) in cpuhp_rollback_install()
1672 cpuhp_issue_call(cpu, state, false, node); in cpuhp_rollback_install()
1681 int cpu; in __cpuhp_state_add_instance_cpuslocked() local
1699 for_each_present_cpu(cpu) { in __cpuhp_state_add_instance_cpuslocked()
1700 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_add_instance_cpuslocked()
1706 ret = cpuhp_issue_call(cpu, state, true, node); in __cpuhp_state_add_instance_cpuslocked()
1709 cpuhp_rollback_install(cpu, state, node); in __cpuhp_state_add_instance_cpuslocked()
1752 int (*startup)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
1753 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
1756 int cpu, ret = 0; in __cpuhp_setup_state_cpuslocked() local
1782 for_each_present_cpu(cpu) { in __cpuhp_setup_state_cpuslocked()
1783 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_setup_state_cpuslocked()
1789 ret = cpuhp_issue_call(cpu, state, true, NULL); in __cpuhp_setup_state_cpuslocked()
1792 cpuhp_rollback_install(cpu, state, NULL); in __cpuhp_setup_state_cpuslocked()
1811 int (*startup)(unsigned int cpu), in __cpuhp_setup_state() argument
1812 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state() argument
1829 int cpu; in __cpuhp_state_remove_instance() local
1846 for_each_present_cpu(cpu) { in __cpuhp_state_remove_instance()
1847 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_remove_instance()
1851 cpuhp_issue_call(cpu, state, false, node); in __cpuhp_state_remove_instance()
1876 int cpu; in __cpuhp_remove_state_cpuslocked() local
1898 for_each_present_cpu(cpu) { in __cpuhp_remove_state_cpuslocked()
1899 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_remove_state_cpuslocked()
1903 cpuhp_issue_call(cpu, state, false, NULL); in __cpuhp_remove_state_cpuslocked()
1920 static void cpuhp_offline_cpu_device(unsigned int cpu) in cpuhp_offline_cpu_device() argument
1922 struct device *dev = get_cpu_device(cpu); in cpuhp_offline_cpu_device()
1929 static void cpuhp_online_cpu_device(unsigned int cpu) in cpuhp_online_cpu_device() argument
1931 struct device *dev = get_cpu_device(cpu); in cpuhp_online_cpu_device()
1940 int cpu, ret = 0; in cpuhp_smt_disable() local
1943 for_each_online_cpu(cpu) { in cpuhp_smt_disable()
1944 if (topology_is_primary_thread(cpu)) in cpuhp_smt_disable()
1946 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in cpuhp_smt_disable()
1962 cpuhp_offline_cpu_device(cpu); in cpuhp_smt_disable()
1972 int cpu, ret = 0; in cpuhp_smt_enable() local
1976 for_each_present_cpu(cpu) { in cpuhp_smt_enable()
1978 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) in cpuhp_smt_enable()
1980 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); in cpuhp_smt_enable()
1984 cpuhp_online_cpu_device(cpu); in cpuhp_smt_enable()
2246 int cpu, ret; in cpuhp_sysfs_init() local
2257 for_each_possible_cpu(cpu) { in cpuhp_sysfs_init()
2258 struct device *dev = get_cpu_device(cpu); in cpuhp_sysfs_init()
2334 void set_cpu_online(unsigned int cpu, bool online) in set_cpu_online() argument
2347 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2350 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
2360 int cpu = smp_processor_id(); in boot_cpu_init() local
2363 set_cpu_online(cpu, true); in boot_cpu_init()
2364 set_cpu_active(cpu, true); in boot_cpu_init()
2365 set_cpu_present(cpu, true); in boot_cpu_init()
2366 set_cpu_possible(cpu, true); in boot_cpu_init()
2369 __boot_cpu_id = cpu; in boot_cpu_init()