• Home
  • Raw
  • Download

Lines Matching refs:curr

601 	struct sched_entity *curr = cfs_rq->curr;  in update_min_vruntime()  local
606 if (curr) { in update_min_vruntime()
607 if (curr->on_rq) in update_min_vruntime()
608 vruntime = curr->vruntime; in update_min_vruntime()
610 curr = NULL; in update_min_vruntime()
616 if (!curr) in update_min_vruntime()
896 struct sched_entity *curr = cfs_rq->curr; in update_curr() local
900 if (unlikely(!curr)) in update_curr()
903 delta_exec = now - curr->exec_start; in update_curr()
907 curr->exec_start = now; in update_curr()
912 stats = __schedstats_from_se(curr); in update_curr()
917 curr->sum_exec_runtime += delta_exec; in update_curr()
920 curr->vruntime += calc_delta_fair(delta_exec, curr); in update_curr()
923 if (entity_is_task(curr)) { in update_curr()
924 struct task_struct *curtask = task_of(curr); in update_curr()
926 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); in update_curr()
936 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
1012 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1030 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1933 cur = rcu_dereference(dst_rq->curr); in task_numa_compare()
2722 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
3131 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
3133 struct callback_head *work = &curr->numa_work; in task_tick_numa()
3139 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) in task_tick_numa()
3148 now = curr->se.sum_exec_runtime; in task_tick_numa()
3149 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; in task_tick_numa()
3151 if (now > curr->node_stamp + period) { in task_tick_numa()
3152 if (!curr->node_stamp) in task_tick_numa()
3153 curr->numa_scan_period = task_scan_start(curr); in task_tick_numa()
3154 curr->node_stamp += period; in task_tick_numa()
3156 if (!time_before(jiffies, curr->mm->numa_next_scan)) in task_tick_numa()
3157 task_work_add(curr, work, TWA_RESUME); in task_tick_numa()
3196 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
3323 if (cfs_rq->curr == se) in reweight_entity()
3945 is_idle = is_idle_task(rcu_dereference(rq->curr)); in migrate_se_pelt_lag()
4778 bool curr = cfs_rq->curr == se; in enqueue_entity() local
4784 if (renorm && curr) in enqueue_entity()
4795 if (renorm && !curr) in enqueue_entity()
4821 if (!curr) in enqueue_entity()
4908 if (se != cfs_rq->curr) in dequeue_entity()
4944 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
4956 ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency); in check_preempt_tick()
4958 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; in check_preempt_tick()
4960 delta_exec, cfs_rq, curr, sysctl_sched_min_granularity); in check_preempt_tick()
4969 clear_buddies(cfs_rq, curr); in check_preempt_tick()
4982 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
5008 cfs_rq->curr = se; in set_next_entity()
5030 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
5040 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
5045 trace_android_rvh_pick_next_entity(cfs_rq, curr, &se); in pick_next_entity()
5053 if (!left || (curr && entity_before(curr, left))) in pick_next_entity()
5054 left = curr; in pick_next_entity()
5065 if (se == curr) { in pick_next_entity()
5069 if (!second || (curr && entity_before(curr, second))) in pick_next_entity()
5070 second = curr; in pick_next_entity()
5116 cfs_rq->curr = NULL; in put_prev_entity()
5120 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
5130 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
5131 update_cfs_group(curr); in entity_tick()
5151 check_preempt_tick(cfs_rq, curr); in entity_tick()
5152 trace_android_rvh_entity_tick(cfs_rq, curr); in entity_tick()
5288 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
5528 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
5759 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
6054 struct task_struct *curr = rq->curr; in hrtick_update() local
6056 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
6059 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
6060 hrtick_start_fair(rq, curr); in hrtick_update()
7676 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
7678 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
7723 struct task_struct *curr = rq->curr; in check_preempt_wakeup() local
7724 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
7725 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup()
7734 trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore); in check_preempt_wakeup()
7762 if (test_tsk_need_resched(curr)) in check_preempt_wakeup()
7766 if (unlikely(task_has_idle_policy(curr)) && in check_preempt_wakeup()
7823 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
7842 struct sched_entity *curr = cfs_rq->curr; in pick_task_fair() local
7845 if (curr) { in pick_task_fair()
7846 if (curr->on_rq) in pick_task_fair()
7849 curr = NULL; in pick_task_fair()
7855 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
7889 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair() local
7897 if (curr) { in pick_next_task_fair()
7898 if (curr->on_rq) in pick_next_task_fair()
7901 curr = NULL; in pick_next_task_fair()
7919 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
8038 struct task_struct *curr = rq->curr; in yield_task_fair() local
8039 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair()
8040 struct sched_entity *se = &curr->se; in yield_task_fair()
8050 if (curr->policy != SCHED_BATCH) { in yield_task_fair()
8792 curr_class = rq->curr->sched_class; in __update_blocked_others()
9615 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
10833 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { in load_balance()
11876 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) in task_tick_core() argument
11896 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
11975 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} in task_tick_core() argument
11986 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
11989 struct sched_entity *se = &curr->se; in task_tick_fair()
11997 task_tick_numa(rq, curr); in task_tick_fair()
11999 update_misfit_status(curr, rq); in task_tick_fair()
12000 update_overutilized_status(task_rq(curr)); in task_tick_fair()
12002 task_tick_core(rq, curr); in task_tick_fair()
12013 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
12021 curr = cfs_rq->curr; in task_fork_fair()
12022 if (curr) { in task_fork_fair()
12024 se->vruntime = curr->vruntime; in task_fork_fair()
12028 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
12033 swap(curr->vruntime, se->vruntime); in task_fork_fair()