/kernel/power/ |
D | energy_model.c | 75 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, in em_debug_create_pd() 180 struct em_data_callback *cb, cpumask_t *cpus) in em_create_pd() argument 191 cpumask_copy(em_span_cpus(pd), cpus); in em_create_pd() 205 for_each_cpu(cpu, cpus) { in em_create_pd() 274 struct em_data_callback *cb, cpumask_t *cpus, in em_dev_register_perf_domain() argument 295 if (!cpus) { in em_dev_register_perf_domain() 301 for_each_cpu(cpu, cpus) { in em_dev_register_perf_domain() 315 cpumask_pr_args(cpus)); in em_dev_register_perf_domain() 324 ret = em_create_pd(dev, nr_states, cb, cpus); in em_dev_register_perf_domain()
|
D | power.h | 12 int cpus; member
|
/kernel/ |
D | cpu.c | 1163 int __pause_drain_rq(struct cpumask *cpus) in __pause_drain_rq() argument 1173 for_each_cpu(cpu, cpus) { in __pause_drain_rq() 1183 void __wait_drain_rq(struct cpumask *cpus) in __wait_drain_rq() argument 1187 for_each_cpu(cpu, cpus) in __wait_drain_rq() 1216 int pause_cpus(struct cpumask *cpus) in pause_cpus() argument 1233 cpumask_and(cpus, cpus, cpu_active_mask); in pause_cpus() 1235 for_each_cpu(cpu, cpus) { in pause_cpus() 1243 if (cpumask_weight(cpus) >= num_active_cpus()) { in pause_cpus() 1248 if (cpumask_empty(cpus)) in pause_cpus() 1266 for_each_cpu(cpu, cpus) in pause_cpus() [all …]
|
D | stop_machine.c | 607 const struct cpumask *cpus) in stop_machine_cpuslocked() argument 613 .active_cpus = cpus, in stop_machine_cpuslocked() 642 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() argument 648 ret = stop_machine_cpuslocked(fn, data, cpus); in stop_machine() 677 const struct cpumask *cpus) in stop_machine_from_inactive_cpu() argument 680 .active_cpus = cpus }; in stop_machine_from_inactive_cpu()
|
/kernel/sched/ |
D | deadline.c | 80 int cpus; in dl_bw_cpus() local 88 cpus = 0; in dl_bw_cpus() 91 cpus++; in dl_bw_cpus() 93 return cpus; in dl_bw_cpus() 2571 int cpu, cpus, ret = 0; in sched_dl_global_validate() local 2586 cpus = dl_bw_cpus(cpu); in sched_dl_global_validate() 2589 if (new_bw * cpus < dl_b->total_bw) in sched_dl_global_validate() 2658 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow() local 2675 cpus = dl_bw_cpus(cpu); in sched_dl_overflow() 2681 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow() [all …]
|
D | cpufreq_schedutil.c | 500 for_each_cpu(j, policy->cpus) { in sugov_next_freq_shared() 847 for_each_cpu(cpu, policy->cpus) { in sugov_start() 855 for_each_cpu(cpu, policy->cpus) { in sugov_start() 871 for_each_cpu(cpu, policy->cpus) in sugov_stop()
|
D | cpufreq.c | 75 return cpumask_test_cpu(smp_processor_id(), policy->cpus) || in cpufreq_this_cpu_can_update()
|
D | sched.h | 309 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub() argument 312 __dl_update(dl_b, (s32)tsk_bw / cpus); in __dl_sub() 316 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add() argument 319 __dl_update(dl_b, -((s32)tsk_bw / cpus)); in __dl_add() 1405 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1410 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument 2715 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
|
D | fair.c | 167 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); in get_update_sysctl_factor() local 175 factor = cpus; in get_update_sysctl_factor() 179 factor = 1 + ilog2(cpus); in get_update_sysctl_factor() 6296 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); in select_idle_core() local 6305 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_core() 6307 for_each_cpu_wrap(core, cpus, target) { in select_idle_core() 6316 cpumask_andnot(cpus, cpus, cpu_smt_mask(core)); in select_idle_core() 6372 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); in select_idle_cpu() local 6403 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu() 6405 for_each_cpu_wrap(cpu, cpus, target) { in select_idle_cpu() [all …]
|
D | topology.c | 1723 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument 1728 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
|
D | core.c | 7273 int sched_cpus_activate(struct cpumask *cpus) in sched_cpus_activate() argument 7277 for_each_cpu(cpu, cpus) { in sched_cpus_activate() 7279 for_each_cpu_and(cpu, cpus, cpu_active_mask) in sched_cpus_activate() 7337 int sched_cpus_deactivate_nosync(struct cpumask *cpus) in sched_cpus_deactivate_nosync() argument 7341 for_each_cpu(cpu, cpus) { in sched_cpus_deactivate_nosync() 7343 for_each_cpu(cpu, cpus) { in sched_cpus_deactivate_nosync()
|
/kernel/debug/kdb/ |
D | kdb_cmds | 26 defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
|
/kernel/trace/ |
D | ring_buffer.c | 543 int cpus; member 1724 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
|