Lines Matching refs:rq_of
228 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() function
278 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { in list_add_leaf_cfs_rq()
280 &rq_of(cfs_rq)->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
283 &rq_of(cfs_rq)->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
369 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() function
689 u64 now = rq_of(cfs_rq)->clock_task; in update_curr()
721 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); in update_stats_wait_start()
741 rq_of(cfs_rq)->clock - se->statistics.wait_start)); in update_stats_wait_end()
744 rq_of(cfs_rq)->clock - se->statistics.wait_start); in update_stats_wait_end()
748 rq_of(cfs_rq)->clock - se->statistics.wait_start); in update_stats_wait_end()
774 se->exec_start = rq_of(cfs_rq)->clock_task; in update_stats_curr_start()
1010 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
1013 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); in account_entity_enqueue()
1023 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
1096 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
1515 se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task; in enqueue_entity_load_avg()
1610 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; in enqueue_sleeper()
1627 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; in enqueue_sleeper()
1809 se->statistics.sleep_start = rq_of(cfs_rq)->clock; in dequeue_entity()
1811 se->statistics.block_start = rq_of(cfs_rq)->clock; in dequeue_entity()
1851 resched_task(rq_of(cfs_rq)->curr); in check_preempt_tick()
1875 resched_task(rq_of(cfs_rq)->curr); in check_preempt_tick()
1900 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
1995 resched_task(rq_of(cfs_rq)->curr); in entity_tick()
2002 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
2086 return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_task()
2142 struct rq *rq = rq_of(cfs_rq); in expire_cfs_rq_runtime()
2184 resched_task(rq_of(cfs_rq)->curr); in __account_cfs_rq_runtime()
2257 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
2262 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
2296 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
2302 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in unthrottle_cfs_rq()
2348 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
2710 return rq_of(cfs_rq)->clock_task; in cfs_rq_clock_task()
4105 struct rq *rq = rq_of(cfs_rq); in __update_blocked_averages_cpu()