Home
last modified time | relevance | path

Searched refs:cpu_rq (Results 1 – 13 of 13) sorted by relevance

/kernel/sched/
Dmembarrier.c141 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()
150 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
225 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
238 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
335 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
Dcpuacct.c112 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
129 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
148 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
156 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
334 lockdep_assert_held(&cpu_rq(cpu)->lock); in cpuacct_charge()
Dfair.c1634 struct rq *rq = cpu_rq(cpu); in update_numa_stats()
1666 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1681 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1696 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1755 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2102 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2532 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
5386 cfs_rq->throttled_clock_pelt = rq_clock_task_mult(cpu_rq(cpu)); in sync_throttle()
5676 unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); in cpu_overutilized()
5677 unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); in cpu_overutilized()
[all …]
Dcpupri.c55 struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr); in drop_nopreempt_cpus()
346 return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL; in cpupri_check_rt()
Dcore.c654 struct rq *rq = cpu_rq(cpu); in resched_cpu()
735 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()
1620 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()
1857 rq = cpu_rq(new_cpu); in move_queued_task()
2216 dst_rq = cpu_rq(cpu); in __migrate_swap_task()
2253 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
2254 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
2582 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
2598 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
2800 struct rq *rq = cpu_rq(cpu); in send_call_function_single_ipi()
[all …]
Ddeadline.c74 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
79 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()
98 struct root_domain *rd = cpu_rq(i)->rd; in __dl_bw_capacity()
126 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
621 later_rq = cpu_rq(cpu); in dl_task_offline_migration()
1686 rq = cpu_rq(cpu); in select_task_rq_dl()
1717 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()
1718 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()
2058 later_rq = cpu_rq(cpu); in find_lock_later_rq()
2237 src_rq = cpu_rq(cpu); in pull_dl_task()
[all …]
Drt.c169 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()
665 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()
1518 rq = cpu_rq(cpu); in select_task_rq_rt()
1523 this_cpu_rq = cpu_rq(this_cpu); in select_task_rq_rt()
1588 p->prio < cpu_rq(target)->rt.highest_prio.curr)) in select_task_rq_rt()
1903 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()
2262 src_rq = cpu_rq(cpu); in pull_rt_task()
2812 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()
2916 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
Dsched.h1130 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro
1132 #define task_rq(p) cpu_rq(task_cpu(p))
1133 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1465 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
2356 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2374 struct rq *rq = cpu_rq(i); in __dl_update()
2625 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()
Dcpufreq_schedutil.c207 struct rq *rq = cpu_rq(cpu); in schedutil_cpu_util()
291 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
443 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
Dstats.c29 rq = cpu_rq(cpu); in show_schedstat()
Ddebug.c561 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()
582 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()
677 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()
689 struct rq *rq = cpu_rq(cpu); in print_cpu()
Dtopology.c355 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
668 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
2045 rq = cpu_rq(i); in build_sched_domains()
2249 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2286 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
Dcputime.c985 rq = cpu_rq(cpu); in kcpustat_field()
1072 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()