/kernel/ |
D | stop_machine.c | 596 const struct cpumask *cpus) in stop_machine_cpuslocked() argument 602 .active_cpus = cpus, in stop_machine_cpuslocked() 631 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() argument 637 ret = stop_machine_cpuslocked(fn, data, cpus); in stop_machine() 666 const struct cpumask *cpus) in stop_machine_from_inactive_cpu() argument 669 .active_cpus = cpus }; in stop_machine_from_inactive_cpu()
|
D | smp.c | 687 cpumask_var_t cpus; in on_each_cpu_cond_mask() local 692 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { in on_each_cpu_cond_mask() 696 __cpumask_set_cpu(cpu, cpus); in on_each_cpu_cond_mask() 697 on_each_cpu_mask(cpus, func, info, wait); in on_each_cpu_cond_mask() 699 free_cpumask_var(cpus); in on_each_cpu_cond_mask()
|
/kernel/sched/ |
D | deadline.c | 57 int cpus = 0; in dl_bw_cpus() local 62 cpus++; in dl_bw_cpus() 64 return cpus; in dl_bw_cpus() 2559 int cpus, err = -1; in sched_dl_overflow() local 2574 cpus = dl_bw_cpus(task_cpu(p)); in sched_dl_overflow() 2576 !__dl_overflow(dl_b, cpus, 0, new_bw)) { in sched_dl_overflow() 2578 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow() 2579 __dl_add(dl_b, new_bw, cpus); in sched_dl_overflow() 2582 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { in sched_dl_overflow() 2590 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow() [all …]
|
D | cpufreq_schedutil.c | 130 for_each_cpu(cpu, policy->cpus) in sugov_fast_switch() 506 for_each_cpu(j, policy->cpus) { in sugov_next_freq_shared() 846 for_each_cpu(cpu, policy->cpus) { in sugov_start() 854 for_each_cpu(cpu, policy->cpus) { in sugov_start() 870 for_each_cpu(cpu, policy->cpus) in sugov_stop()
|
D | sched.h | 288 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub() argument 291 __dl_update(dl_b, (s32)tsk_bw / cpus); in __dl_sub() 295 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add() argument 298 __dl_update(dl_b, -((s32)tsk_bw / cpus)); in __dl_add() 302 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) in __dl_overflow() argument 305 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; in __dl_overflow() 1285 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1290 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument 2493 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
|
D | cpufreq.c | 74 return cpumask_test_cpu(smp_processor_id(), policy->cpus) || in cpufreq_this_cpu_can_update()
|
D | fair.c | 150 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); in get_update_sysctl_factor() local 158 factor = cpus; in get_update_sysctl_factor() 162 factor = 1 + ilog2(cpus); in get_update_sysctl_factor() 5875 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); in select_idle_core() local 5884 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_core() 5886 for_each_cpu_wrap(core, cpus, target) { in select_idle_core() 5890 __cpumask_clear_cpu(cpu, cpus); in select_idle_core() 7177 struct cpumask *cpus; member 7322 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { in can_migrate_task() 8136 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats() [all …]
|
D | topology.c | 1739 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument 1744 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
|
/kernel/power/ |
D | energy_model.c | 63 debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); in em_debug_create_pd() 158 cpumask_copy(to_cpumask(pd->cpus), span); in em_create_pd()
|
D | power.h | 12 int cpus; member
|
/kernel/debug/kdb/ |
D | kdb_cmds | 26 defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
|
/kernel/trace/ |
D | ring_buffer.c | 487 int cpus; member 1407 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
|