Searched refs:busiest (Results 1 – 2 of 2) sorted by relevance
8967 struct sched_group *busiest; /* Busiest group in this sd */ member8988 .busiest = NULL, in init_sd_lb_stats()9451 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local9469 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()9472 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()9483 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()9496 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest()9505 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()9520 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()9532 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest()[all …]
2641 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument2643 __acquires(busiest->lock) in _double_lock_balance()2647 double_rq_lock(this_rq, busiest); in _double_lock_balance()2660 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument2662 __acquires(busiest->lock) in _double_lock_balance()2665 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || in _double_lock_balance()2666 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance()2667 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance()2671 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance()2672 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance()[all …]