• Home
  • Raw
  • Download

Lines Matching refs:idle

4407 			cfs_b->idle = 0;  in assign_cfs_rq_runtime()
4612 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
4679 if (cfs_b->idle && !throttled) in do_sched_cfs_period_timer()
4686 cfs_b->idle = 1; in do_sched_cfs_period_timer()
4720 cfs_b->idle = 0; in do_sched_cfs_period_timer()
4932 int idle = 0; in sched_cfs_period_timer() local
4971 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); in sched_cfs_period_timer()
4973 if (idle) in sched_cfs_period_timer()
4977 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; in sched_cfs_period_timer()
5721 struct cpuidle_state *idle = idle_get_state(rq); in find_idlest_group_cpu() local
5722 if (idle && idle->exit_latency < min_exit_latency) { in find_idlest_group_cpu()
5728 min_exit_latency = idle->exit_latency; in find_idlest_group_cpu()
5731 } else if ((!idle || idle->exit_latency == min_exit_latency) && in find_idlest_group_cpu()
5887 bool idle = true; in select_idle_core() local
5892 idle = false; in select_idle_core()
5895 if (idle) in select_idle_core()
6359 struct cpuidle_state *idle; in find_energy_efficient_cpu() local
6446 idle = idle_get_state(cpu_rq(cpu)); in find_energy_efficient_cpu()
6447 if (idle && idle->exit_latency > min_exit_lat && in find_energy_efficient_cpu()
6451 if (idle) in find_energy_efficient_cpu()
6452 min_exit_lat = idle->exit_latency; in find_energy_efficient_cpu()
6808 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
6825 goto idle; in pick_next_task_fair()
6864 goto idle; in pick_next_task_fair()
6933 idle: in pick_next_task_fair()
7173 enum cpu_idle_type idle; member
7260 if (env->idle == CPU_IDLE) in migrate_degrades_locality()
7318 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) in can_migrate_task()
7354 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in can_migrate_task()
7400 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
7431 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) in detach_tasks()
7471 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
7492 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
8249 if (env->idle == CPU_NOT_IDLE) in update_sd_pick_busiest()
8315 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) in update_sd_lb_stats()
8328 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
8430 if (env->idle == CPU_NOT_IDLE) in check_asym_packing()
8605 (env->idle == CPU_NEWLY_IDLE || in calculate_imbalance()
8681 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && in find_busiest_group()
8703 if (env->idle == CPU_IDLE) { in find_busiest_group()
8846 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && in asym_active_balance()
8864 if ((env->idle != CPU_NOT_IDLE) && in voluntary_active_balance()
8908 if (env->idle == CPU_NEWLY_IDLE) in should_we_balance()
8935 struct sched_domain *sd, enum cpu_idle_type idle, in load_balance() argument
8950 .idle = idle, in load_balance()
8959 schedstat_inc(sd->lb_count[idle]); in load_balance()
8969 schedstat_inc(sd->lb_nobusyg[idle]); in load_balance()
8975 schedstat_inc(sd->lb_nobusyq[idle]); in load_balance()
8981 schedstat_add(sd->lb_imbalance[idle], env.imbalance); in load_balance()
9097 schedstat_inc(sd->lb_failed[idle]); in load_balance()
9104 if (idle != CPU_NEWLY_IDLE) in load_balance()
9184 schedstat_inc(sd->lb_balanced[idle]); in load_balance()
9197 if (env.idle == CPU_NEWLY_IDLE) in load_balance()
9293 .idle = CPU_IDLE, in active_load_balance_cpu_stop()
9345 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains() argument
9385 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); in rebalance_domains()
9394 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
9400 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; in rebalance_domains()
9403 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); in rebalance_domains()
9440 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) in rebalance_domains()
9731 enum cpu_idle_type idle) in _nohz_idle_balance() argument
9801 if (idle != CPU_NEWLY_IDLE) { in _nohz_idle_balance()
9835 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance() argument
9843 if (idle != CPU_IDLE) { in nohz_idle_balance()
9853 _nohz_idle_balance(this_rq, flags, idle); in nohz_idle_balance()
9893 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance() argument
10026 enum cpu_idle_type idle = this_rq->idle_balance ? in run_rebalance_domains()
10037 if (nohz_idle_balance(this_rq, idle)) in run_rebalance_domains()
10042 rebalance_domains(this_rq, idle); in run_rebalance_domains()