Searched refs:cpu_of (Results 1 – 8 of 8) sorted by relevance
/kernel/sched/ |
D | pelt.h | 106 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 107 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
D | pelt.c | 503 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 504 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
|
D | fair.c | 310 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 810 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 4048 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update() 4222 if (task_fits_cpu(p, cpu_of(rq))) { in update_misfit_status() 4946 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() 4964 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() 5004 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq() 5052 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq() 5534 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() 5552 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() [all …]
|
D | sched.h | 1105 static inline int cpu_of(struct rq *rq) in cpu_of() function 1203 return per_cpu(clock_task_mult, cpu_of(rq)); in rq_clock_task_mult() 2030 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2094 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 2450 cpu_of(rq))); in cpufreq_update_util()
|
D | stats.h | 153 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
|
D | core.c | 272 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 297 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 332 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 368 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 421 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 637 cpu = cpu_of(rq); in resched_curr() 780 int cpu = cpu_of(rq); in nohz_csd_func() 2047 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr_locked() 2789 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 2790 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending() [all …]
|
D | rt.c | 521 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 540 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 558 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2422 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
|
D | deadline.c | 1245 int cpu = cpu_of(rq); in update_curr_dl() 2332 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|