/kernel/ |
D | smp.c | 366 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any() 404 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many() 406 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many() 413 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many() 415 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); in smp_call_function_many() 425 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many() 473 smp_call_function_many(cpu_online_mask, func, info, wait); in smp_call_function()
|
D | padata.c | 358 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); in padata_setup_cpumasks() 364 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); in padata_setup_cpumasks() 568 if (!cpumask_intersects(cpumask, cpu_online_mask)) { in padata_validate_cpumask() 682 if (cpumask_test_cpu(cpu, cpu_online_mask)) { in __padata_add_cpu() 738 if (cpumask_test_cpu(cpu, cpu_online_mask)) { in __padata_remove_cpu()
|
D | cpu.c | 550 first_cpu = cpumask_first(cpu_online_mask); in disable_nonboot_cpus() 746 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); variable 747 EXPORT_SYMBOL(cpu_online_mask);
|
D | stop_machine.c | 184 is_active = cpu == cpumask_first(cpu_online_mask); in multi_cpu_stop() 580 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); in __stop_machine()
|
D | reboot.c | 197 cpu = cpumask_first(cpu_online_mask); in migrate_to_reboot_cpu()
|
D | cpuset.c | 336 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus() 346 cpumask_copy(pmask, cpu_online_mask); in guarantee_online_cpus() 350 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
|
D | workqueue.c | 4632 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
|
/kernel/sched/ |
D | stats.c | 100 n = cpumask_next(n - 1, cpu_online_mask); in schedstat_start() 102 n = cpumask_first(cpu_online_mask); in schedstat_start()
|
D | debug.c | 430 n = cpumask_next(n - 1, cpu_online_mask); in sched_debug_start() 432 n = cpumask_first(cpu_online_mask); in sched_debug_start()
|
D | rt.c | 524 return cpu_online_mask; in sched_rt_period_mask() 587 return cpu_online_mask; in sched_rt_period_mask() 815 span = cpu_online_mask; in do_sched_rt_period_timer()
|
D | tune.c | 913 sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask))); in schedtune_init()
|
D | fair.c | 5565 cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask); in energy_aware_wake_cpu()
|
/kernel/time/ |
D | tick-broadcast.c | 288 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); in tick_do_periodic_broadcast() 401 if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) in tick_broadcast_on_off() 617 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) in tick_handle_oneshot_broadcast() 618 cpumask_and(tmpmask, tmpmask, cpu_online_mask); in tick_handle_oneshot_broadcast()
|
D | clocksource.c | 269 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog() 271 next_cpu = cpumask_first(cpu_online_mask); in clocksource_watchdog() 285 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); in clocksource_start_watchdog()
|
D | tick-common.c | 343 int cpu = cpumask_first(cpu_online_mask); in tick_handover_do_timer()
|
D | timer_list.c | 303 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); in move_iter()
|
/kernel/power/ |
D | poweroff.c | 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); in handle_poweroff()
|
/kernel/irq/ |
D | migration.c | 45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) in irq_move_masked_irq()
|
D | proc.c | 124 if (!cpumask_intersects(new_value, cpu_online_mask)) { in write_irq_affinity() 218 if (!cpumask_intersects(new_value, cpu_online_mask)) { in default_affinity_write()
|
D | manage.c | 337 cpu_online_mask)) in setup_affinity() 343 cpumask_and(mask, cpu_online_mask, set); in setup_affinity()
|
/kernel/rcu/ |
D | tree.c | 3004 while (try_stop_cpus(cpu_online_mask, in synchronize_sched_expedited()
|
/kernel/trace/ |
D | ring_buffer.c | 1370 cpumask_copy(buffer->cpumask, cpu_online_mask); in __ring_buffer_alloc()
|