Lines Matching refs:rq
127 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
248 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
250 rq->core->core_task_seq++; in sched_core_enqueue()
255 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
258 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
260 rq->core->core_task_seq++; in sched_core_dequeue()
263 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
272 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
273 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
274 resched_curr(rq); in sched_core_dequeue()
280 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) in sched_core_find() argument
284 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
289 return idle_sched_class.pick_task(rq); in sched_core_find()
447 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } in sched_core_enqueue() argument
449 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } in sched_core_dequeue() argument
545 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) in raw_spin_rq_lock_nested() argument
552 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
559 lock = __rq_lockp(rq); in raw_spin_rq_lock_nested()
561 if (likely(lock == __rq_lockp(rq))) { in raw_spin_rq_lock_nested()
571 bool raw_spin_rq_trylock(struct rq *rq) in raw_spin_rq_trylock() argument
579 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
585 lock = __rq_lockp(rq); in raw_spin_rq_trylock()
587 if (!ret || (likely(lock == __rq_lockp(rq)))) { in raw_spin_rq_trylock()
595 void raw_spin_rq_unlock(struct rq *rq) in raw_spin_rq_unlock() argument
597 raw_spin_unlock(rq_lockp(rq)); in raw_spin_rq_unlock()
605 void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
624 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
625 __acquires(rq->lock) in __task_rq_lock()
627 struct rq *rq; in __task_rq_lock() local
632 rq = task_rq(p); in __task_rq_lock()
633 raw_spin_rq_lock(rq); in __task_rq_lock()
634 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
635 rq_pin_lock(rq, rf); in __task_rq_lock()
636 return rq; in __task_rq_lock()
638 raw_spin_rq_unlock(rq); in __task_rq_lock()
649 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
651 __acquires(rq->lock) in task_rq_lock()
653 struct rq *rq; in task_rq_lock() local
657 rq = task_rq(p); in task_rq_lock()
658 raw_spin_rq_lock(rq); in task_rq_lock()
676 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
677 rq_pin_lock(rq, rf); in task_rq_lock()
678 return rq; in task_rq_lock()
680 raw_spin_rq_unlock(rq); in task_rq_lock()
693 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
702 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
722 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
724 psi_account_irqtime(rq->curr, irq_delta); in update_rq_clock_task()
728 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
729 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
734 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
739 rq->clock_task += delta; in update_rq_clock_task()
743 update_irq_load_avg(rq, irq_delta + steal); in update_rq_clock_task()
745 update_rq_clock_task_mult(rq, delta); in update_rq_clock_task()
748 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
752 lockdep_assert_rq_held(rq); in update_rq_clock()
754 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
759 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
760 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
763 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
766 rq->clock += delta; in update_rq_clock()
767 update_rq_clock_task(rq, delta); in update_rq_clock()
776 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
778 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
779 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
788 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
791 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
793 rq_lock(rq, &rf); in hrtick()
794 update_rq_clock(rq); in hrtick()
795 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
796 rq_unlock(rq, &rf); in hrtick()
803 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
805 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
806 ktime_t time = rq->hrtick_time; in __hrtick_restart()
816 struct rq *rq = arg; in __hrtick_start() local
819 rq_lock(rq, &rf); in __hrtick_start()
820 __hrtick_restart(rq); in __hrtick_start()
821 rq_unlock(rq, &rf); in __hrtick_start()
829 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
831 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
839 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
841 if (rq == this_rq()) in hrtick_start()
842 __hrtick_restart(rq); in hrtick_start()
844 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
853 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
860 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
866 static void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
869 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
871 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
872 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
875 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
879 static inline void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
1043 void resched_curr(struct rq *rq) in resched_curr() argument
1045 struct task_struct *curr = rq->curr; in resched_curr()
1048 lockdep_assert_rq_held(rq); in resched_curr()
1053 cpu = cpu_of(rq); in resched_curr()
1070 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
1073 raw_spin_rq_lock_irqsave(rq, flags); in resched_cpu()
1075 resched_curr(rq); in resched_cpu()
1076 raw_spin_rq_unlock_irqrestore(rq, flags); in resched_cpu()
1141 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
1146 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
1185 struct rq *rq = info; in nohz_csd_func() local
1186 int cpu = cpu_of(rq); in nohz_csd_func()
1195 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1196 if (rq->idle_balance && !need_resched()) { in nohz_csd_func()
1197 rq->nohz_idle_balance = flags; in nohz_csd_func()
1205 bool sched_can_stop_tick(struct rq *rq) in sched_can_stop_tick() argument
1210 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1217 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1218 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1228 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1237 if (rq->nr_running > 1) in sched_can_stop_tick()
1403 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_value() argument
1412 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1419 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_reset() argument
1423 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1426 uclamp_rq_set(rq, clamp_id, clamp_value); in uclamp_idle_reset()
1430 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_max_value() argument
1433 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1447 return uclamp_idle_value(rq, clamp_id, clamp_value); in uclamp_rq_max_value()
1470 struct rq *rq; in uclamp_update_util_min_rt_default() local
1476 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1478 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1559 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1562 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1566 lockdep_assert_rq_held(rq); in uclamp_rq_inc_id()
1575 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1584 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1585 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1597 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1600 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1606 lockdep_assert_rq_held(rq); in uclamp_rq_dec_id()
1651 rq_clamp = uclamp_rq_get(rq, clamp_id); in uclamp_rq_dec_id()
1658 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1659 uclamp_rq_set(rq, clamp_id, bkt_clamp); in uclamp_rq_dec_id()
1663 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1680 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1683 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1684 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1687 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1704 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1707 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_reinc_id() argument
1713 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1714 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1720 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1721 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1729 struct rq *rq; in uclamp_update_active() local
1739 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1748 uclamp_rq_reinc_id(rq, p, clamp_id); in uclamp_update_active()
1750 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
2025 static void __init init_uclamp_rq(struct rq *rq) in init_uclamp_rq() argument
2028 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
2036 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2065 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
2066 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
2103 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
2106 update_rq_clock(rq); in enqueue_task()
2109 sched_info_enqueue(rq, p); in enqueue_task()
2113 uclamp_rq_inc(rq, p); in enqueue_task()
2114 trace_android_rvh_enqueue_task(rq, p, flags); in enqueue_task()
2115 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2116 trace_android_rvh_after_enqueue_task(rq, p, flags); in enqueue_task()
2118 if (sched_core_enabled(rq)) in enqueue_task()
2119 sched_core_enqueue(rq, p); in enqueue_task()
2122 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
2124 if (sched_core_enabled(rq)) in dequeue_task()
2125 sched_core_dequeue(rq, p, flags); in dequeue_task()
2128 update_rq_clock(rq); in dequeue_task()
2131 sched_info_dequeue(rq, p); in dequeue_task()
2135 uclamp_rq_dec(rq, p); in dequeue_task()
2136 trace_android_rvh_dequeue_task(rq, p, flags); in dequeue_task()
2137 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2138 trace_android_rvh_after_dequeue_task(rq, p, flags); in dequeue_task()
2141 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
2146 enqueue_task(rq, p, flags); in activate_task()
2152 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
2156 dequeue_task(rq, p, flags); in deactivate_task()
2224 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
2230 prev_class->switched_from(rq, p); in check_class_changed()
2232 p->sched_class->switched_to(rq, p); in check_class_changed()
2234 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2237 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
2239 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
2240 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
2241 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in check_preempt_curr()
2242 resched_curr(rq); in check_preempt_curr()
2248 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
2249 rq_clock_skip_update(rq); in check_preempt_curr()
2302 struct rq *rq; in wait_task_inactive() local
2311 rq = task_rq(p); in wait_task_inactive()
2324 while (task_on_cpu(rq, p)) { in wait_task_inactive()
2335 rq = task_rq_lock(p, &rf); in wait_task_inactive()
2337 running = task_on_cpu(rq, p); in wait_task_inactive()
2349 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2405 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) in migrate_disable_switch() argument
2416 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); in migrate_disable_switch()
2466 static inline bool rq_has_pinned_tasks(struct rq *rq) in rq_has_pinned_tasks() argument
2468 return rq->nr_pinned; in rq_has_pinned_tasks()
2528 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
2533 lockdep_assert_rq_held(rq); in move_queued_task()
2540 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached); in move_queued_task()
2544 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
2548 rq_unlock(rq, rf); in move_queued_task()
2549 rq = cpu_rq(new_cpu); in move_queued_task()
2551 rq_lock(rq, rf); in move_queued_task()
2553 activate_task(rq, p, 0); in move_queued_task()
2554 check_preempt_curr(rq, p, 0); in move_queued_task()
2556 return rq; in move_queued_task()
2586 struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
2591 return rq; in __migrate_task()
2593 update_rq_clock(rq); in __migrate_task()
2594 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2596 return rq; in __migrate_task()
2610 struct rq *rq = this_rq(); in migration_cpu_stop() local
2627 rq_lock(rq, &rf); in migration_cpu_stop()
2640 if (task_rq(p) == rq) { in migration_cpu_stop()
2653 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2692 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2701 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2711 struct rq *lowest_rq = NULL, *rq = this_rq(); in push_cpu_stop() local
2715 raw_spin_rq_lock(rq); in push_cpu_stop()
2717 if (task_rq(p) != rq) in push_cpu_stop()
2728 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2734 if (task_rq(p) == rq) { in push_cpu_stop()
2735 deactivate_task(rq, p, 0); in push_cpu_stop()
2741 double_unlock_balance(rq, lowest_rq); in push_cpu_stop()
2744 rq->push_busy = false; in push_cpu_stop()
2745 raw_spin_rq_unlock(rq); in push_cpu_stop()
2772 struct rq *rq = task_rq(p); in __do_set_cpus_allowed() local
2793 running = task_current(rq, p); in __do_set_cpus_allowed()
2800 lockdep_assert_rq_held(rq); in __do_set_cpus_allowed()
2801 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2804 put_prev_task(rq, p); in __do_set_cpus_allowed()
2809 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2811 set_next_task(rq, p); in __do_set_cpus_allowed()
2952 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2963 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2964 rq->push_busy = true; in affine_move_task()
2979 task_rq_unlock(rq, p, rf); in affine_move_task()
2981 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2982 p, &rq->push_work); in affine_move_task()
3033 task_rq_unlock(rq, p, rf); in affine_move_task()
3037 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3051 task_rq_unlock(rq, p, rf); in affine_move_task()
3053 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, in affine_move_task()
3064 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
3071 task_rq_unlock(rq, p, rf); in affine_move_task()
3100 struct rq *rq, in __set_cpus_allowed_ptr_locked() argument
3102 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3112 update_rq_clock(rq); in __set_cpus_allowed_ptr_locked()
3172 ret = affine_move_task(rq, p, rf, dest_cpu, flags); in __set_cpus_allowed_ptr_locked()
3179 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
3197 struct rq *rq; in __set_cpus_allowed_ptr() local
3199 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
3200 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); in __set_cpus_allowed_ptr()
3222 struct rq *rq; in restrict_cpus_allowed_ptr() local
3231 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3257 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); in restrict_cpus_allowed_ptr()
3260 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3401 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
3436 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
3720 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } in migrate_disable_switch() argument
3722 static inline bool rq_has_pinned_tasks(struct rq *rq) in rq_has_pinned_tasks() argument
3732 struct rq *rq; in ttwu_stat() local
3737 rq = this_rq(); in ttwu_stat()
3740 if (cpu == rq->cpu) { in ttwu_stat()
3741 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3748 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3761 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3771 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
3774 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
3784 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
3785 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
3786 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
3789 if (rq->idle_stamp) { in ttwu_do_wakeup()
3790 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
3791 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
3793 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
3795 if (rq->avg_idle > max) in ttwu_do_wakeup()
3796 rq->avg_idle = max; in ttwu_do_wakeup()
3798 rq->wake_stamp = jiffies; in ttwu_do_wakeup()
3799 rq->wake_avg_idle = rq->avg_idle / 2; in ttwu_do_wakeup()
3801 rq->idle_stamp = 0; in ttwu_do_wakeup()
3807 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
3815 lockdep_assert_rq_held(rq); in ttwu_do_activate()
3818 rq->nr_uninterruptible--; in ttwu_do_activate()
3830 activate_task(rq, p, en_flags); in ttwu_do_activate()
3831 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
3862 struct rq *rq; in ttwu_runnable() local
3865 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3868 update_rq_clock(rq); in ttwu_runnable()
3869 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
3872 __task_rq_unlock(rq, &rf); in ttwu_runnable()
3881 struct rq *rq = this_rq(); in sched_ttwu_pending() local
3893 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3895 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
3896 update_rq_clock(rq); in sched_ttwu_pending()
3902 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
3903 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
3905 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3908 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
3913 struct rq *rq = cpu_rq(cpu); in send_call_function_single_ipi() local
3915 if (!set_nr_if_polling(rq->idle)) in send_call_function_single_ipi()
3929 struct rq *rq = cpu_rq(cpu); in __ttwu_queue_wakelist() local
3933 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3939 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
3944 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
3947 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
3948 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3949 resched_curr(rq); in wake_up_if_idle()
3951 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
4033 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
4039 rq_lock(rq, &rf); in ttwu_queue()
4040 update_rq_clock(rq); in ttwu_queue()
4041 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
4042 rq_unlock(rq, &rf); in ttwu_queue()
4433 struct rq *rq = NULL; in task_call_func() local
4440 rq = __task_rq_lock(p, &rf); in task_call_func()
4454 if (rq) in task_call_func()
4455 rq_unlock(rq, &rf); in task_call_func()
4872 struct rq *rq; in wake_up_new_task() local
4891 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4892 update_rq_clock(rq); in wake_up_new_task()
4896 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
4898 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
4905 rq_unpin_lock(rq, &rf); in wake_up_new_task()
4906 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4907 rq_repin_lock(rq, &rf); in wake_up_new_task()
4910 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
5034 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) in do_balance_callbacks() argument
5036 void (*func)(struct rq *rq); in do_balance_callbacks()
5039 lockdep_assert_rq_held(rq); in do_balance_callbacks()
5042 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
5047 func(rq); in do_balance_callbacks()
5051 static void balance_push(struct rq *rq);
5071 __splice_balance_callbacks(struct rq *rq, bool split) in __splice_balance_callbacks() argument
5073 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5078 lockdep_assert_rq_held(rq); in __splice_balance_callbacks()
5090 rq->balance_callback = NULL; in __splice_balance_callbacks()
5095 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) in splice_balance_callbacks() argument
5097 return __splice_balance_callbacks(rq, true); in splice_balance_callbacks()
5100 void __balance_callbacks(struct rq *rq) in __balance_callbacks() argument
5102 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); in __balance_callbacks()
5106 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) in balance_callbacks() argument
5111 raw_spin_rq_lock_irqsave(rq, flags); in balance_callbacks()
5112 do_balance_callbacks(rq, head); in balance_callbacks()
5113 raw_spin_rq_unlock_irqrestore(rq, flags); in balance_callbacks()
5119 static inline void __balance_callbacks(struct rq *rq) in __balance_callbacks() argument
5123 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) in splice_balance_callbacks() argument
5128 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) in balance_callbacks() argument
5135 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
5143 rq_unpin_lock(rq, rf); in prepare_lock_switch()
5144 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5147 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5151 static inline void finish_lock_switch(struct rq *rq) in finish_lock_switch() argument
5158 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5159 __balance_callbacks(rq); in finish_lock_switch()
5160 raw_spin_rq_unlock_irq(rq); in finish_lock_switch()
5205 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
5209 sched_info_switch(rq, prev, next); in prepare_task_switch()
5237 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
5238 __releases(rq->lock) in finish_task_switch()
5240 struct rq *rq = this_rq(); in finish_task_switch() local
5241 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5260 rq->prev_mm = NULL; in finish_task_switch()
5278 finish_lock_switch(rq); in finish_task_switch()
5319 return rq; in finish_task_switch()
5327 __releases(rq->lock) in schedule_tail()
5350 static __always_inline struct rq *
5351 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
5354 prepare_task_switch(rq, prev, next); in context_switch()
5379 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5393 rq->prev_mm = prev->active_mm; in context_switch()
5398 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in context_switch()
5400 prepare_lock_switch(rq, next, rf); in context_switch()
5574 struct rq *rq; in task_sched_runtime() local
5593 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5599 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
5601 update_rq_clock(rq); in task_sched_runtime()
5602 p->sched_class->update_curr(rq); in task_sched_runtime()
5605 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5611 static u64 cpu_resched_latency(struct rq *rq) in cpu_resched_latency() argument
5614 u64 resched_latency, now = rq_clock(rq); in cpu_resched_latency()
5626 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5627 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5628 rq->ticks_without_resched = 0; in cpu_resched_latency()
5632 rq->ticks_without_resched++; in cpu_resched_latency()
5633 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5656 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } in cpu_resched_latency() argument
5666 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
5667 struct task_struct *curr = rq->curr; in scheduler_tick()
5677 rq_lock(rq, &rf); in scheduler_tick()
5679 update_rq_clock(rq); in scheduler_tick()
5680 trace_android_rvh_tick_entry(rq); in scheduler_tick()
5682 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); in scheduler_tick()
5683 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); in scheduler_tick()
5684 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
5686 resched_latency = cpu_resched_latency(rq); in scheduler_tick()
5687 calc_global_load_tick(rq); in scheduler_tick()
5688 sched_core_tick(rq); in scheduler_tick()
5690 rq_unlock(rq, &rf); in scheduler_tick()
5698 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
5699 trigger_load_balance(rq); in scheduler_tick()
5702 trace_android_vh_scheduler_tick(rq); in scheduler_tick()
5747 struct rq *rq = cpu_rq(cpu); in sched_tick_remote() local
5763 rq_lock_irq(rq, &rf); in sched_tick_remote()
5764 curr = rq->curr; in sched_tick_remote()
5768 update_rq_clock(rq); in sched_tick_remote()
5775 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5778 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5780 calc_load_nohz_remote(rq); in sched_tick_remote()
5782 rq_unlock_irq(rq, &rf); in sched_tick_remote()
5998 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, in put_prev_task_balance() argument
6012 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
6017 put_prev_task(rq, prev); in put_prev_task_balance()
6024 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in __pick_next_task() argument
6036 rq->nr_running == rq->cfs.h_nr_running)) { in __pick_next_task()
6038 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
6044 put_prev_task(rq, prev); in __pick_next_task()
6045 p = pick_next_task_idle(rq); in __pick_next_task()
6052 put_prev_task_balance(rq, prev, rf); in __pick_next_task()
6055 p = class->pick_next_task(rq); in __pick_next_task()
6082 static inline struct task_struct *pick_task(struct rq *rq) in pick_task() argument
6088 p = class->pick_task(rq); in pick_task()
6096 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6098 static void queue_core_balance(struct rq *rq);
6101 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
6106 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6109 struct rq *rq_i; in pick_next_task()
6112 if (!sched_core_enabled(rq)) in pick_next_task()
6113 return __pick_next_task(rq, prev, rf); in pick_next_task()
6115 cpu = cpu_of(rq); in pick_next_task()
6124 rq->core_pick = NULL; in pick_next_task()
6125 return __pick_next_task(rq, prev, rf); in pick_next_task()
6137 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6138 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6139 rq->core_pick) { in pick_next_task()
6140 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6142 next = rq->core_pick; in pick_next_task()
6144 put_prev_task(rq, prev); in pick_next_task()
6145 set_next_task(rq, next); in pick_next_task()
6148 rq->core_pick = NULL; in pick_next_task()
6152 put_prev_task_balance(rq, prev, rf); in pick_next_task()
6155 need_sync = !!rq->core->core_cookie; in pick_next_task()
6158 rq->core->core_cookie = 0UL; in pick_next_task()
6159 if (rq->core->core_forceidle_count) { in pick_next_task()
6161 update_rq_clock(rq->core); in pick_next_task()
6164 sched_core_account_forceidle(rq); in pick_next_task()
6166 rq->core->core_forceidle_start = 0; in pick_next_task()
6167 rq->core->core_forceidle_count = 0; in pick_next_task()
6168 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6183 rq->core->core_task_seq++; in pick_next_task()
6190 next = pick_task(rq); in pick_next_task()
6192 rq->core_pick = NULL; in pick_next_task()
6198 task_vruntime_update(rq, next, false); in pick_next_task()
6217 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6225 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6247 rq->core->core_forceidle_count++; in pick_next_task()
6249 rq->core->core_forceidle_seq++; in pick_next_task()
6256 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6257 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6258 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6261 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6262 next = rq->core_pick; in pick_next_task()
6263 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6297 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6298 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6319 set_next_task(rq, next); in pick_next_task()
6321 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6322 queue_core_balance(rq); in pick_next_task()
6329 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); in try_steal_cookie()
6396 static void sched_core_balance(struct rq *rq) in sched_core_balance() argument
6399 int cpu = cpu_of(rq); in sched_core_balance()
6403 raw_spin_rq_unlock_irq(rq); in sched_core_balance()
6411 raw_spin_rq_lock_irq(rq); in sched_core_balance()
6418 static void queue_core_balance(struct rq *rq) in queue_core_balance() argument
6420 if (!sched_core_enabled(rq)) in queue_core_balance()
6423 if (!rq->core->core_cookie) in queue_core_balance()
6426 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6429 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6435 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_starting() local
6441 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6451 rq = cpu_rq(t); in sched_core_cpu_starting()
6452 if (rq->core == rq) { in sched_core_cpu_starting()
6453 core_rq = rq; in sched_core_cpu_starting()
6463 rq = cpu_rq(t); in sched_core_cpu_starting()
6466 rq->core = core_rq; in sched_core_cpu_starting()
6468 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6478 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_deactivate() local
6486 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6491 if (rq->core != rq) in sched_core_cpu_deactivate()
6506 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6507 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6508 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6509 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6510 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6511 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6522 rq = cpu_rq(t); in sched_core_cpu_deactivate()
6523 rq->core = core_rq; in sched_core_cpu_deactivate()
6532 struct rq *rq = cpu_rq(cpu); in sched_core_cpu_dying() local
6534 if (rq->core != rq) in sched_core_cpu_dying()
6535 rq->core = rq; in sched_core_cpu_dying()
6545 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
6547 return __pick_next_task(rq, prev, rf); in pick_next_task()
6615 struct rq *rq; in __schedule() local
6619 rq = cpu_rq(cpu); in __schedule()
6620 prev = rq->curr; in __schedule()
6625 hrtick_clear(rq); in __schedule()
6645 rq_lock(rq, &rf); in __schedule()
6649 rq->clock_update_flags <<= 1; in __schedule()
6650 update_rq_clock(rq); in __schedule()
6669 rq->nr_uninterruptible++; in __schedule()
6682 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); in __schedule()
6685 atomic_inc(&rq->nr_iowait); in __schedule()
6692 next = pick_next_task(rq, prev, &rf); in __schedule()
6696 rq->last_seen_need_resched_ns = 0; in __schedule()
6699 trace_android_rvh_schedule(sched_mode, prev, next, rq); in __schedule()
6701 rq->nr_switches++; in __schedule()
6706 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6723 migrate_disable_switch(rq, prev); in __schedule()
6729 rq = context_switch(rq, prev, next, &rf); in __schedule()
6731 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in __schedule()
6733 rq_unpin_lock(rq, &rf); in __schedule()
6734 __balance_callbacks(rq); in __schedule()
6735 raw_spin_rq_unlock_irq(rq); in __schedule()
7103 struct rq *rq; in rt_mutex_setprio() local
7117 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7118 update_rq_clock(rq); in rt_mutex_setprio()
7149 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7150 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7163 running = task_current(rq, p); in rt_mutex_setprio()
7165 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
7167 put_prev_task(rq, p); in rt_mutex_setprio()
7202 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
7204 set_next_task(rq, p); in rt_mutex_setprio()
7206 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
7211 rq_unpin_lock(rq, &rf); in rt_mutex_setprio()
7212 __balance_callbacks(rq); in rt_mutex_setprio()
7213 raw_spin_rq_unlock(rq); in rt_mutex_setprio()
7229 struct rq *rq; in set_user_nice() local
7238 rq = task_rq_lock(p, &rf); in set_user_nice()
7239 update_rq_clock(rq); in set_user_nice()
7256 running = task_current(rq, p); in set_user_nice()
7258 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
7260 put_prev_task(rq, p); in set_user_nice()
7268 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
7270 set_next_task(rq, p); in set_user_nice()
7276 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
7279 task_rq_unlock(rq, p, &rf); in set_user_nice()
7369 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
7371 if (rq->curr != rq->idle) in idle_cpu()
7374 if (rq->nr_running) in idle_cpu()
7378 if (rq->ttwu_pending) in idle_cpu()
7440 struct rq *rq = cpu_rq(cpu); in effective_cpu_util() local
7450 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { in effective_cpu_util()
7459 irq = cpu_util_irq(rq); in effective_cpu_util()
7475 util = util_cfs + cpu_util_rt(rq); in effective_cpu_util()
7477 util = uclamp_rq_util_with(rq, util, p); in effective_cpu_util()
7479 dl_util = cpu_util_dl(rq); in effective_cpu_util()
7523 util += cpu_bw_dl(rq); in effective_cpu_util()
7666 struct rq *rq; in __sched_setscheduler() local
7733 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7734 update_rq_clock(rq); in __sched_setscheduler()
7739 if (p == rq->stop) { in __sched_setscheduler()
7780 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
7788 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
7799 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7833 running = task_current(rq, p); in __sched_setscheduler()
7835 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
7837 put_prev_task(rq, p); in __sched_setscheduler()
7856 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
7859 set_next_task(rq, p); in __sched_setscheduler()
7861 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
7865 head = splice_balance_callbacks(rq); in __sched_setscheduler()
7866 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7875 balance_callbacks(rq, head); in __sched_setscheduler()
7881 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
8530 struct rq *rq; in do_sched_yield() local
8537 rq = this_rq_lock_irq(&rf); in do_sched_yield()
8539 schedstat_inc(rq->yld_count); in do_sched_yield()
8540 current->sched_class->yield_task(rq); in do_sched_yield()
8542 trace_android_rvh_do_sched_yield(rq); in do_sched_yield()
8545 rq_unlock_irq(rq, &rf); in do_sched_yield()
8892 struct rq *rq, *p_rq; in yield_to() local
8897 rq = this_rq(); in yield_to()
8905 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
8910 double_rq_lock(rq, p_rq); in yield_to()
8912 double_rq_unlock(rq, p_rq); in yield_to()
8925 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
8927 schedstat_inc(rq->yld_count); in yield_to()
8932 if (preempt && rq != p_rq) in yield_to()
8937 double_rq_unlock(rq, p_rq); in yield_to()
9047 struct rq *rq; in sched_rr_get_interval() local
9063 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
9066 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
9067 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
9211 struct rq *rq = cpu_rq(cpu); in init_idle() local
9217 raw_spin_rq_lock(rq); in init_idle()
9251 rq->idle = idle; in init_idle()
9252 rcu_assign_pointer(rq->curr, idle); in init_idle()
9257 raw_spin_rq_unlock(rq); in init_idle()
9337 struct rq *rq; in sched_setnuma() local
9339 rq = task_rq_lock(p, &rf); in sched_setnuma()
9341 running = task_current(rq, p); in sched_setnuma()
9344 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
9346 put_prev_task(rq, p); in sched_setnuma()
9351 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
9353 set_next_task(rq, p); in sched_setnuma()
9354 task_rq_unlock(rq, p, &rf); in sched_setnuma()
9378 struct task_struct *pick_migrate_task(struct rq *rq) in pick_migrate_task() argument
9384 next = class->pick_next_task(rq); in pick_migrate_task()
9386 next->sched_class->put_prev_task(rq, next); in pick_migrate_task()
9399 struct rq *rq = this_rq(); in __balance_push_cpu_stop() local
9404 rq_lock(rq, &rf); in __balance_push_cpu_stop()
9406 update_rq_clock(rq); in __balance_push_cpu_stop()
9408 if (task_rq(p) == rq && task_on_rq_queued(p)) { in __balance_push_cpu_stop()
9409 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
9410 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9413 rq_unlock(rq, &rf); in __balance_push_cpu_stop()
9429 static void balance_push(struct rq *rq) in balance_push() argument
9431 struct task_struct *push_task = rq->curr; in balance_push()
9433 lockdep_assert_rq_held(rq); in balance_push()
9438 rq->balance_callback = &balance_push_callback; in balance_push()
9444 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
9465 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
9466 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
9467 raw_spin_rq_unlock(rq); in balance_push()
9468 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
9469 raw_spin_rq_lock(rq); in balance_push()
9480 raw_spin_rq_unlock(rq); in balance_push()
9481 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
9489 raw_spin_rq_lock(rq); in balance_push()
9494 struct rq *rq = cpu_rq(cpu); in balance_push_set() local
9497 rq_lock_irqsave(rq, &rf); in balance_push_set()
9499 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
9500 rq->balance_callback = &balance_push_callback; in balance_push_set()
9501 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
9502 rq->balance_callback = NULL; in balance_push_set()
9504 rq_unlock_irqrestore(rq, &rf); in balance_push_set()
9515 struct rq *rq = this_rq(); in balance_hotplug_wait() local
9517 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
9518 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
9524 static inline void balance_push(struct rq *rq) in balance_push() argument
9538 void set_rq_online(struct rq *rq) in set_rq_online() argument
9540 if (!rq->online) { in set_rq_online()
9543 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
9544 rq->online = 1; in set_rq_online()
9548 class->rq_online(rq); in set_rq_online()
9553 void set_rq_offline(struct rq *rq) in set_rq_offline() argument
9555 if (rq->online) { in set_rq_offline()
9560 class->rq_offline(rq); in set_rq_offline()
9563 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
9564 rq->online = 0; in set_rq_offline()
9620 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate() local
9653 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
9654 if (rq->rd) { in sched_cpu_activate()
9655 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
9656 set_rq_online(rq); in sched_cpu_activate()
9658 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
9665 struct rq *rq = cpu_rq(cpu); in sched_cpu_deactivate() local
9673 nohz_balance_exit_idle(rq); in sched_cpu_deactivate()
9697 rq_lock_irqsave(rq, &rf); in sched_cpu_deactivate()
9698 if (rq->rd) { in sched_cpu_deactivate()
9699 update_rq_clock(rq); in sched_cpu_deactivate()
9700 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_deactivate()
9701 set_rq_offline(rq); in sched_cpu_deactivate()
9703 rq_unlock_irqrestore(rq, &rf); in sched_cpu_deactivate()
9732 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting() local
9734 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
9775 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
9777 long delta = calc_load_fold_active(rq, 1); in calc_load_migrate()
9783 static void dump_rq_tasks(struct rq *rq, const char *loglvl) in dump_rq_tasks() argument
9786 int cpu = cpu_of(rq); in dump_rq_tasks()
9788 lockdep_assert_rq_held(rq); in dump_rq_tasks()
9790 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
9804 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying() local
9810 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
9811 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
9813 dump_rq_tasks(rq, KERN_WARNING); in sched_cpu_dying()
9815 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
9819 calc_load_migrate(rq); in sched_cpu_dying()
9821 hrtick_clear(rq); in sched_cpu_dying()
9952 struct rq *rq; in sched_init() local
9954 rq = cpu_rq(i); in sched_init()
9955 raw_spin_lock_init(&rq->__lock); in sched_init()
9956 rq->nr_running = 0; in sched_init()
9957 rq->calc_load_active = 0; in sched_init()
9958 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
9959 init_cfs_rq(&rq->cfs); in sched_init()
9960 init_rt_rq(&rq->rt); in sched_init()
9961 init_dl_rq(&rq->dl); in sched_init()
9963 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
9964 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
9984 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
9987 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
9989 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
9992 rq->sd = NULL; in sched_init()
9993 rq->rd = NULL; in sched_init()
9994 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
9995 rq->balance_callback = &balance_push_callback; in sched_init()
9996 rq->active_balance = 0; in sched_init()
9997 rq->next_balance = jiffies; in sched_init()
9998 rq->push_cpu = 0; in sched_init()
9999 rq->cpu = i; in sched_init()
10000 rq->online = 0; in sched_init()
10001 rq->idle_stamp = 0; in sched_init()
10002 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
10003 rq->wake_stamp = jiffies; in sched_init()
10004 rq->wake_avg_idle = rq->avg_idle; in sched_init()
10005 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
10007 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
10009 rq_attach_root(rq, &def_root_domain); in sched_init()
10011 rq->last_blocked_load_update_tick = jiffies; in sched_init()
10012 atomic_set(&rq->nohz_flags, 0); in sched_init()
10014 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
10017 rcuwait_init(&rq->hotplug_wait); in sched_init()
10020 hrtick_rq_init(rq); in sched_init()
10021 atomic_set(&rq->nr_iowait, 0); in sched_init()
10024 rq->core = rq; in sched_init()
10025 rq->core_pick = NULL; in sched_init()
10026 rq->core_enabled = 0; in sched_init()
10027 rq->core_tree = RB_ROOT; in sched_init()
10028 rq->core_forceidle_count = 0; in sched_init()
10029 rq->core_forceidle_occupation = 0; in sched_init()
10030 rq->core_forceidle_start = 0; in sched_init()
10032 rq->core_cookie = 0UL; in sched_init()
10474 struct rq *rq; in sched_move_task() local
10476 rq = task_rq_lock(tsk, &rf); in sched_move_task()
10477 update_rq_clock(rq); in sched_move_task()
10479 running = task_current(rq, tsk); in sched_move_task()
10483 dequeue_task(rq, tsk, queue_flags); in sched_move_task()
10485 put_prev_task(rq, tsk); in sched_move_task()
10490 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
10492 set_next_task(rq, tsk); in sched_move_task()
10498 resched_curr(rq); in sched_move_task()
10501 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
10884 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
10887 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
10893 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()
11521 void call_trace_sched_update_nr_running(struct rq *rq, int count) in call_trace_sched_update_nr_running() argument
11523 trace_sched_update_nr_running_tp(rq, count); in call_trace_sched_update_nr_running()