Searched refs:cpu_of (Results 1 – 11 of 11) sorted by relevance
| /kernel/linux/linux-5.10/kernel/sched/ |
| D | pelt.h | 100 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 101 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
| D | pelt.c | 443 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 444 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
|
| D | fair.c | 333 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 830 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 4090 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update() 4266 int cpu = cpu_of(rq); in update_misfit_status() 4284 task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq))); in update_misfit_status() 4286 task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq))); in update_misfit_status() 4953 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() 4971 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() 5011 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq() 5063 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq() [all …]
|
| D | rt.c | 281 !cpu_isolated(cpu_of(rq)); in need_pull_rt_task() 527 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 546 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 564 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2477 cpu_isolated(cpu_of(rq))) in switched_from_rt() 2520 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt() 2648 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2656 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) in rt_active_load_balance_cpu_stop()
|
| D | walt.h | 55 unsigned long capcurr = capacity_curr_of(cpu_of(rq)); in scale_exec_time()
|
| D | stats.h | 153 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
|
| D | sched.h | 1204 static inline int cpu_of(struct rq *rq) in cpu_of() function 2116 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2180 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 2561 cpu_of(rq))); in cpufreq_update_util()
|
| D | core.c | 265 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 290 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 325 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 360 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 413 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 626 cpu = cpu_of(rq); in resched_curr() 751 int cpu = cpu_of(rq); in nohz_csd_func() 2027 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr() 2717 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 2718 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending() [all …]
|
| D | deadline.c | 1245 int cpu = cpu_of(rq); in update_curr_dl() 2340 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|
| D | walt.c | 1794 cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask); in walt_sched_init_rq()
|
| /kernel/linux/linux-5.10/kernel/sched/rtg/ |
| D | rtg.c | 113 int cpu = cpu_of(rq); in transfer_busy_time()
|