Searched refs:cfs_rq (Results 1 – 3 of 3) sorted by relevance
/kernel/ |
D | sched_fair.c | 90 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 92 return cfs_rq->rq; in rq_of() 102 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() 104 return p->se.cfs_rq; in task_cfs_rq() 108 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() 110 return se->cfs_rq; in cfs_rq_of() 114 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq() 122 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) in cpu_cfs_rq() argument 124 return cfs_rq->tg->cfs_rq[this_cpu]; in cpu_cfs_rq() 128 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument [all …]
|
D | sched_debug.c | 161 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument 171 struct task_group *tg = cfs_rq->tg; in print_cfs_rq() 178 uid_t uid = cfs_rq->tg->uid; in print_cfs_rq() 185 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq() 188 if (cfs_rq->rb_leftmost) in print_cfs_rq() 189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; in print_cfs_rq() 190 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 193 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq() 208 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq() 209 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq() [all …]
|
D | sched.c | 262 struct cfs_rq; 280 struct cfs_rq **cfs_rq; member 318 static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 382 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; in set_task_rq() 403 struct cfs_rq { struct 562 struct cfs_rq cfs; 1492 rq_weight = tg->cfs_rq[cpu]->rq_weight; in update_group_shares_cpu() 1509 tg->cfs_rq[cpu]->shares = shares; in update_group_shares_cpu() 1534 weight = tg->cfs_rq[i]->load.weight; in tg_shares_up() 1538 tg->cfs_rq[i]->rq_weight = weight; in tg_shares_up() [all …]
|