• Home
  • Raw
  • Download

Lines Matching refs:cpu

46 int smpcfd_prepare_cpu(unsigned int cpu)  in smpcfd_prepare_cpu()  argument
48 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_prepare_cpu()
51 cpu_to_node(cpu))) in smpcfd_prepare_cpu()
54 cpu_to_node(cpu))) { in smpcfd_prepare_cpu()
68 int smpcfd_dead_cpu(unsigned int cpu) in smpcfd_dead_cpu() argument
70 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_dead_cpu()
78 int smpcfd_dying_cpu(unsigned int cpu) in smpcfd_dying_cpu() argument
146 int cpu = -1; in csd_lock_wait_toolong() local
156 cpu = csd_lock_wait_getcpu(csd); in csd_lock_wait_toolong()
158 *bug_id, raw_smp_processor_id(), cpu); in csd_lock_wait_toolong()
170 cpu = csd_lock_wait_getcpu(csd); in csd_lock_wait_toolong()
171 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong()
174 cpux = cpu; in csd_lock_wait_toolong()
178 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
187 if (cpu >= 0) { in csd_lock_wait_toolong()
188 if (!trigger_single_cpu_backtrace(cpu)) in csd_lock_wait_toolong()
189 dump_cpu_task(cpu); in csd_lock_wait_toolong()
191 … Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); in csd_lock_wait_toolong()
192 arch_send_call_function_single_ipi(cpu); in csd_lock_wait_toolong()
258 void __smp_call_single_queue(int cpu, struct llist_node *node) in __smp_call_single_queue() argument
271 if (llist_add(node, &per_cpu(call_single_queue, cpu))) in __smp_call_single_queue()
272 send_call_function_single_ipi(cpu); in __smp_call_single_queue()
280 static int generic_exec_single(int cpu, struct __call_single_data *csd) in generic_exec_single() argument
282 if (cpu == smp_processor_id()) { in generic_exec_single()
300 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
305 __smp_call_single_queue(cpu, &csd->llist); in generic_exec_single()
468 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, in smp_call_function_single() argument
511 csd->dst = cpu; in smp_call_function_single()
514 err = generic_exec_single(cpu, csd); in smp_call_function_single()
546 int smp_call_function_single_async(int cpu, struct __call_single_data *csd) in smp_call_function_single_async() argument
560 err = generic_exec_single(cpu, csd); in smp_call_function_single_async()
586 unsigned int cpu; in smp_call_function_any() local
591 cpu = get_cpu(); in smp_call_function_any()
592 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any()
596 nodemask = cpumask_of_node(cpu_to_node(cpu)); in smp_call_function_any()
597 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
598 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any()
599 if (cpu_online(cpu)) in smp_call_function_any()
604 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any()
606 ret = smp_call_function_single(cpu, func, info, wait); in smp_call_function_any()
617 int cpu, next_cpu, this_cpu = smp_processor_id(); in smp_call_function_many_cond() local
637 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many_cond()
638 if (cpu == this_cpu) in smp_call_function_many_cond()
639 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond()
642 if (cpu >= nr_cpu_ids) in smp_call_function_many_cond()
646 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond()
652 if (!cond_func || cond_func(cpu, info)) in smp_call_function_many_cond()
653 smp_call_function_single(cpu, func, info, wait); in smp_call_function_many_cond()
667 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
668 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
670 if (cond_func && !cond_func(cpu, info)) in smp_call_function_many_cond()
680 csd->dst = cpu; in smp_call_function_many_cond()
682 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) in smp_call_function_many_cond()
683 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); in smp_call_function_many_cond()
690 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
693 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
864 int cpu = get_cpu(); in on_each_cpu_mask() local
867 if (cpumask_test_cpu(cpu, mask)) { in on_each_cpu_mask()
902 int cpu = get_cpu(); in on_each_cpu_cond_mask() local
905 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) { in on_each_cpu_cond_mask()
954 int cpu; in wake_up_all_idle_cpus() local
957 for_each_online_cpu(cpu) { in wake_up_all_idle_cpus()
958 if (cpu == smp_processor_id()) in wake_up_all_idle_cpus()
962 if (s2idle_state == S2IDLE_STATE_ENTER || cpu_active(cpu)) in wake_up_all_idle_cpus()
964 wake_up_if_idle(cpu); in wake_up_all_idle_cpus()
978 int cpu; in wake_up_all_online_idle_cpus() local
981 for_each_online_cpu(cpu) { in wake_up_all_online_idle_cpus()
982 if (cpu == smp_processor_id()) in wake_up_all_online_idle_cpus()
985 wake_up_if_idle(cpu); in wake_up_all_online_idle_cpus()
1004 int cpu; member
1012 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1013 hypervisor_pin_vcpu(sscs->cpu); in smp_call_on_cpu_callback()
1015 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1021 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) in smp_call_on_cpu() argument
1027 .cpu = phys ? cpu : -1, in smp_call_on_cpu()
1032 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in smp_call_on_cpu()
1035 queue_work_on(cpu, system_wq, &sscs.work); in smp_call_on_cpu()