Lines Matching +full:throttle +full:- +full:period +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
35 raw_spin_lock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
37 overrun = hrtimer_forward_now(timer, rt_b->rt_period); in sched_rt_period_timer()
41 raw_spin_unlock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
43 raw_spin_lock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
46 rt_b->rt_period_active = 0; in sched_rt_period_timer()
47 raw_spin_unlock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
52 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth() argument
54 rt_b->rt_period = ns_to_ktime(period); in init_rt_bandwidth()
55 rt_b->rt_runtime = runtime; in init_rt_bandwidth()
57 raw_spin_lock_init(&rt_b->rt_runtime_lock); in init_rt_bandwidth()
59 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, in init_rt_bandwidth()
61 rt_b->rt_period_timer.function = sched_rt_period_timer; in init_rt_bandwidth()
66 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) in start_rt_bandwidth()
69 raw_spin_lock(&rt_b->rt_runtime_lock); in start_rt_bandwidth()
70 if (!rt_b->rt_period_active) { in start_rt_bandwidth()
71 rt_b->rt_period_active = 1; in start_rt_bandwidth()
75 * not reset the period. If a deadline task was running in start_rt_bandwidth()
77 * throttle when they start up. Kick the timer right away in start_rt_bandwidth()
78 * to update the period. in start_rt_bandwidth()
80 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); in start_rt_bandwidth()
81 hrtimer_start_expires(&rt_b->rt_period_timer, in start_rt_bandwidth()
84 raw_spin_unlock(&rt_b->rt_runtime_lock); in start_rt_bandwidth()
92 array = &rt_rq->active; in init_rt_rq()
94 INIT_LIST_HEAD(array->queue + i); in init_rt_rq()
95 __clear_bit(i, array->bitmap); in init_rt_rq()
98 __set_bit(MAX_RT_PRIO, array->bitmap); in init_rt_rq()
101 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_rt_rq()
102 rt_rq->highest_prio.next = MAX_RT_PRIO; in init_rt_rq()
103 rt_rq->rt_nr_migratory = 0; in init_rt_rq()
104 rt_rq->overloaded = 0; in init_rt_rq()
105 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
108 rt_rq->rt_queued = 0; in init_rt_rq()
110 rt_rq->rt_time = 0; in init_rt_rq()
111 rt_rq->rt_throttled = 0; in init_rt_rq()
112 rt_rq->rt_runtime = 0; in init_rt_rq()
113 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
119 hrtimer_cancel(&rt_b->rt_period_timer); in destroy_rt_bandwidth()
122 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
134 return rt_rq->rq; in rq_of_rt_rq()
139 return rt_se->rt_rq; in rt_rq_of_se()
144 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se()
146 return rt_rq->rq; in rq_of_rt_se()
153 if (tg->rt_se) in free_rt_sched_group()
154 destroy_rt_bandwidth(&tg->rt_bandwidth); in free_rt_sched_group()
157 if (tg->rt_rq) in free_rt_sched_group()
158 kfree(tg->rt_rq[i]); in free_rt_sched_group()
159 if (tg->rt_se) in free_rt_sched_group()
160 kfree(tg->rt_se[i]); in free_rt_sched_group()
163 kfree(tg->rt_rq); in free_rt_sched_group()
164 kfree(tg->rt_se); in free_rt_sched_group()
173 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry()
174 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
175 rt_rq->rq = rq; in init_tg_rt_entry()
176 rt_rq->tg = tg; in init_tg_rt_entry()
178 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
179 tg->rt_se[cpu] = rt_se; in init_tg_rt_entry()
185 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
187 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
189 rt_se->my_q = rt_rq; in init_tg_rt_entry()
190 rt_se->parent = parent; in init_tg_rt_entry()
191 INIT_LIST_HEAD(&rt_se->run_list); in init_tg_rt_entry()
200 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
201 if (!tg->rt_rq) in alloc_rt_sched_group()
203 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); in alloc_rt_sched_group()
204 if (!tg->rt_se) in alloc_rt_sched_group()
207 init_rt_bandwidth(&tg->rt_bandwidth, in alloc_rt_sched_group()
222 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
223 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
259 return &rq->rt; in rt_rq_of_se()
280 return rq->rt.highest_prio.curr > prev->prio && in need_pull_rt_task()
286 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
291 if (!rq->online) in rt_set_overload()
294 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
305 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
310 if (!rq->online) in rt_clear_overload()
314 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
315 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
320 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { in update_rt_migration()
321 if (!rt_rq->overloaded) { in update_rt_migration()
323 rt_rq->overloaded = 1; in update_rt_migration()
325 } else if (rt_rq->overloaded) { in update_rt_migration()
327 rt_rq->overloaded = 0; in update_rt_migration()
339 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in inc_rt_migration()
341 rt_rq->rt_nr_total++; in inc_rt_migration()
342 if (p->nr_cpus_allowed > 1) in inc_rt_migration()
343 rt_rq->rt_nr_migratory++; in inc_rt_migration()
356 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in dec_rt_migration()
358 rt_rq->rt_nr_total--; in dec_rt_migration()
359 if (p->nr_cpus_allowed > 1) in dec_rt_migration()
360 rt_rq->rt_nr_migratory--; in dec_rt_migration()
367 return !plist_head_empty(&rq->rt.pushable_tasks); in has_pushable_tasks()
381 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); in rt_queue_push_tasks()
386 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); in rt_queue_pull_task()
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
392 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
396 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
397 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
402 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
406 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
408 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
410 rq->rt.highest_prio.next = MAX_RT_PRIO; in dequeue_pushable_task()
452 return rt_se->on_rq; in on_rt_rq()
461 * is higher than the capacity of a @cpu. For non-heterogeneous system this
498 if (!rt_rq->tg) in sched_rt_runtime()
501 return rt_rq->rt_runtime; in sched_rt_runtime()
506 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
514 tg = list_entry_rcu(tg->list.next, in next_task_group()
516 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); in next_task_group()
518 if (&tg->list == &task_groups) in next_task_group()
527 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
530 for (; rt_se; rt_se = rt_se->parent)
534 return rt_se->my_q; in group_rt_rq()
542 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
548 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
550 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
556 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
566 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
569 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
579 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
588 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
591 return p->prio != p->normal_prio; in rt_se_boosted()
597 return this_rq()->rd->span; in sched_rt_period_mask()
609 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
614 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
621 return rt_rq->rt_runtime; in sched_rt_runtime()
632 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
646 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
655 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
660 return rt_rq->rt_throttled; in rt_rq_throttled()
671 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()
685 return (hrtimer_active(&rt_b->rt_period_timer) || in sched_rt_bandwidth_account()
686 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
696 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
700 weight = cpumask_weight(rd->span); in do_balance_runtime()
702 raw_spin_lock(&rt_b->rt_runtime_lock); in do_balance_runtime()
703 rt_period = ktime_to_ns(rt_b->rt_period); in do_balance_runtime()
704 for_each_cpu(i, rd->span) { in do_balance_runtime()
711 raw_spin_lock(&iter->rt_runtime_lock); in do_balance_runtime()
717 if (iter->rt_runtime == RUNTIME_INF) in do_balance_runtime()
722 * spare time, but no more than our period. in do_balance_runtime()
724 diff = iter->rt_runtime - iter->rt_time; in do_balance_runtime()
727 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
728 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
729 iter->rt_runtime -= diff; in do_balance_runtime()
730 rt_rq->rt_runtime += diff; in do_balance_runtime()
731 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
732 raw_spin_unlock(&iter->rt_runtime_lock); in do_balance_runtime()
737 raw_spin_unlock(&iter->rt_runtime_lock); in do_balance_runtime()
739 raw_spin_unlock(&rt_b->rt_runtime_lock); in do_balance_runtime()
747 struct root_domain *rd = rq->rd; in __disable_runtime()
759 raw_spin_lock(&rt_b->rt_runtime_lock); in __disable_runtime()
760 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
766 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
767 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
769 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
776 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
781 for_each_cpu(i, rd->span) { in __disable_runtime()
788 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
791 raw_spin_lock(&iter->rt_runtime_lock); in __disable_runtime()
793 diff = min_t(s64, iter->rt_runtime, want); in __disable_runtime()
794 iter->rt_runtime -= diff; in __disable_runtime()
795 want -= diff; in __disable_runtime()
797 iter->rt_runtime -= want; in __disable_runtime()
798 want -= want; in __disable_runtime()
800 raw_spin_unlock(&iter->rt_runtime_lock); in __disable_runtime()
806 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
808 * We cannot be left wanting - that would mean some runtime in __disable_runtime()
815 * runtime - in which case borrowing doesn't make sense. in __disable_runtime()
817 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
818 rt_rq->rt_throttled = 0; in __disable_runtime()
819 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
820 raw_spin_unlock(&rt_b->rt_runtime_lock); in __disable_runtime()
841 raw_spin_lock(&rt_b->rt_runtime_lock); in __enable_runtime()
842 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
843 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
844 rt_rq->rt_time = 0; in __enable_runtime()
845 rt_rq->rt_throttled = 0; in __enable_runtime()
846 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
847 raw_spin_unlock(&rt_b->rt_runtime_lock); in __enable_runtime()
856 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
857 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
859 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
875 * CPUs or non-isolated CPUs, whether they are isolcpus or in do_sched_rt_period_timer()
890 * When span == cpu_online_mask, taking each rq->lock in do_sched_rt_period_timer()
891 * can be time-consuming. Try to avoid it when possible. in do_sched_rt_period_timer()
893 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
894 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
895 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
896 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
897 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
901 raw_spin_lock(&rq->lock); in do_sched_rt_period_timer()
904 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
907 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
908 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
910 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
911 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
912 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
913 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
923 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
926 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
928 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
929 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
934 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
939 raw_spin_unlock(&rq->lock); in do_sched_rt_period_timer()
942 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) in do_sched_rt_period_timer()
954 return rt_rq->highest_prio.curr; in rt_se_prio()
957 return rt_task_of(rt_se)->prio; in rt_se_prio()
962 raw_spin_lock(&rt_b->rt_runtime_lock); in try_start_rt_bandwidth()
963 if (!rt_b->rt_period_active) { in try_start_rt_bandwidth()
964 rt_b->rt_period_active = 1; in try_start_rt_bandwidth()
965 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period); in try_start_rt_bandwidth()
966 hrtimer_start_expires(&rt_b->rt_period_timer, in try_start_rt_bandwidth()
969 raw_spin_unlock(&rt_b->rt_runtime_lock); in try_start_rt_bandwidth()
976 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
987 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
991 * Don't actually throttle groups that have no runtime assigned in sched_rt_runtime_exceeded()
994 if (likely(rt_b->rt_runtime)) { in sched_rt_runtime_exceeded()
995 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
1000 * replenishment is a joke, since it will replenish us in sched_rt_runtime_exceeded()
1003 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
1021 struct task_struct *curr = rq->curr; in update_curr_rt()
1022 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt()
1026 if (curr->sched_class != &rt_sched_class) in update_curr_rt()
1030 delta_exec = now - curr->se.exec_start; in update_curr_rt()
1034 schedstat_set(curr->se.statistics.exec_max, in update_curr_rt()
1035 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_rt()
1037 curr->se.sum_exec_runtime += delta_exec; in update_curr_rt()
1040 curr->se.exec_start = now; in update_curr_rt()
1051 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1052 rt_rq->rt_time += delta_exec; in update_curr_rt()
1056 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1068 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1070 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1073 BUG_ON(!rq->nr_running); in dequeue_top_rt_rq()
1076 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1085 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1087 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1093 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1094 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1095 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1113 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1116 if (rq->online && prio < prev_prio) in inc_rt_prio_smp()
1117 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); in inc_rt_prio_smp()
1129 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1132 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1133 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1149 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1152 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1160 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1162 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1171 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1173 rt_rq->highest_prio.curr = in dec_rt_prio()
1174 sched_find_first_bit(array->bitmap); in dec_rt_prio()
1178 rt_rq->highest_prio.curr = MAX_RT_PRIO; in dec_rt_prio()
1196 rt_rq->rt_nr_boosted++; in inc_rt_group()
1198 if (rt_rq->tg) in inc_rt_group()
1199 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1206 rt_rq->rt_nr_boosted--; in dec_rt_group()
1208 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1230 return group_rq->rt_nr_running; in rt_se_nr_running()
1242 return group_rq->rr_nr_running; in rt_se_rr_nr_running()
1246 return (tsk->policy == SCHED_RR) ? 1 : 0; in rt_se_rr_nr_running()
1255 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1256 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1267 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1268 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1269 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1277 * Change rt_se->run_list location unless SAVE && !MOVE
1291 list_del_init(&rt_se->run_list); in __delist_rt_entity()
1293 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity()
1294 __clear_bit(rt_se_prio(rt_se), array->bitmap); in __delist_rt_entity()
1296 rt_se->on_list = 0; in __delist_rt_entity()
1302 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1304 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity()
1312 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { in __enqueue_rt_entity()
1313 if (rt_se->on_list) in __enqueue_rt_entity()
1319 WARN_ON_ONCE(rt_se->on_list); in __enqueue_rt_entity()
1321 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity()
1323 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity()
1325 __set_bit(rt_se_prio(rt_se), array->bitmap); in __enqueue_rt_entity()
1326 rt_se->on_list = 1; in __enqueue_rt_entity()
1328 rt_se->on_rq = 1; in __enqueue_rt_entity()
1336 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1339 WARN_ON_ONCE(!rt_se->on_list); in __dequeue_rt_entity()
1342 rt_se->on_rq = 0; in __dequeue_rt_entity()
1349 * entries, we must remove entries top - down.
1357 rt_se->back = back; in dequeue_rt_stack()
1361 rt_nr_running = rt_rq_of_se(back)->rt_nr_running; in dequeue_rt_stack()
1363 for (rt_se = back; rt_se; rt_se = rt_se->back) { in dequeue_rt_stack()
1378 enqueue_top_rt_rq(&rq->rt); in enqueue_rt_entity()
1390 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1393 enqueue_top_rt_rq(&rq->rt); in dequeue_rt_entity()
1402 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1405 rt_se->timeout = 0; in enqueue_task_rt()
1410 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1416 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1433 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1434 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity()
1437 list_move(&rt_se->run_list, queue); in requeue_rt_entity()
1439 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity()
1445 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1456 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1476 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1495 * post-schedule router will push the preempted task away in select_task_rq_rt()
1497 * This test is optimistic, if we get it wrong the load-balancer in select_task_rq_rt()
1501 * requirement of the task - which is only important on heterogeneous in select_task_rq_rt()
1506 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); in select_task_rq_rt()
1518 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) in select_task_rq_rt()
1525 if (target != -1 && ( in select_task_rq_rt()
1529 p->prio < cpu_rq(target)->rt.highest_prio.curr)) in select_task_rq_rt()
1546 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1547 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1554 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1555 cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1569 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1572 * picked for load-balance and preemption/IRQs are still in balance_rt()
1590 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1599 * - the newly woken task is of equal priority to the current task in check_preempt_curr_rt()
1600 * - the newly woken task is non-migratable while current is migratable in check_preempt_curr_rt()
1601 * - current will be preempted on the next reschedule in check_preempt_curr_rt()
1605 * to move current somewhere else, making room for our non-migratable in check_preempt_curr_rt()
1608 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1615 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1628 if (rq->curr->sched_class != &rt_sched_class) in set_next_task_rt()
1636 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1641 idx = sched_find_first_bit(array->bitmap); in pick_next_rt_entity()
1644 queue = array->queue + idx; in pick_next_rt_entity()
1647 next = list_entry(queue->next, struct sched_rt_entity, run_list); in pick_next_rt_entity()
1655 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt()
1689 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1701 cpumask_test_cpu(cpu, p->cpus_ptr)) in pick_rt_task()
1713 struct plist_head *head = &rq->rt.pushable_tasks; in pick_highest_pushable_task()
1731 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; in find_cas_cpu()
1736 int cpu = -1; in find_cas_cpu()
1737 int target_cpu = -1; in find_cas_cpu()
1750 return -1; in find_cas_cpu()
1761 return -1; in find_cas_cpu()
1764 sg = sd->groups; in find_cas_cpu()
1770 if (cpumask_test_cpu(rd->max_cap_orig_cpu, in find_cas_cpu()
1787 if (task->state == TASK_RUNNING || boosted) { in find_cas_cpu()
1821 } while (sg = sg->next, sg != sd->groups); in find_cas_cpu()
1846 if (!cpumask_test_cpu(cpu, task->cpus_ptr)) in find_cas_cpu()
1850 if (target_cpu != -1 && in find_cas_cpu()
1876 if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) { in find_cas_cpu()
1906 return -1; in find_lowest_rq()
1908 if (task->nr_cpus_allowed == 1) in find_lowest_rq()
1909 return -1; /* No other targets possible */ in find_lowest_rq()
1917 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1922 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1927 return -1; /* No targets found */ in find_lowest_rq()
1931 if (cas_cpu != -1) in find_lowest_rq()
1941 * it is most likely cache-hot in that location. in find_lowest_rq()
1951 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ in find_lowest_rq()
1955 if (sd->flags & SD_WAKE_AFFINE) { in find_lowest_rq()
1962 if (this_cpu != -1 && in find_lowest_rq()
1983 if (this_cpu != -1) in find_lowest_rq()
1990 return -1; in find_lowest_rq()
2000 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
2003 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
2005 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
2023 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
2028 if (lowest_rq->rt.highest_prio.curr <= task->prio) { in find_lock_lowest_rq()
2047 !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) { in find_lock_lowest_rq()
2055 if (lowest_rq->rt.highest_prio.curr > task->prio) in find_lock_lowest_rq()
2077 if (!rq->rt.overloaded) in push_rt_task()
2085 if (WARN_ON(next_task == rq->curr)) in push_rt_task()
2093 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
2106 * find_lock_lowest_rq releases rq->lock in push_rt_task()
2110 * run-queue and is also still the next task eligible for in push_rt_task()
2117 * eligible task, but we failed to find a run-queue in push_rt_task()
2119 * other CPUs will pull from us when ready. in push_rt_task()
2137 set_task_cpu(next_task, lowest_rq->cpu); in push_rt_task()
2166 * up that may be able to run one of its non-running queued RT tasks.
2207 * When starting the IPI RT pushing, the rto_cpu is set to -1, in rto_next_cpu()
2221 /* When rto_cpu is -1 this acts like cpumask_first() */ in rto_next_cpu()
2222 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); in rto_next_cpu()
2224 rd->rto_cpu = cpu; in rto_next_cpu()
2229 rd->rto_cpu = -1; in rto_next_cpu()
2237 next = atomic_read_acquire(&rd->rto_loop_next); in rto_next_cpu()
2239 if (rd->rto_loop == next) in rto_next_cpu()
2242 rd->rto_loop = next; in rto_next_cpu()
2245 return -1; in rto_next_cpu()
2260 int cpu = -1; in tell_cpu_to_push()
2263 atomic_inc(&rq->rd->rto_loop_next); in tell_cpu_to_push()
2266 if (!rto_start_trylock(&rq->rd->rto_loop_start)) in tell_cpu_to_push()
2269 raw_spin_lock(&rq->rd->rto_lock); in tell_cpu_to_push()
2277 if (rq->rd->rto_cpu < 0) in tell_cpu_to_push()
2278 cpu = rto_next_cpu(rq->rd); in tell_cpu_to_push()
2280 raw_spin_unlock(&rq->rd->rto_lock); in tell_cpu_to_push()
2282 rto_start_unlock(&rq->rd->rto_loop_start); in tell_cpu_to_push()
2286 sched_get_rd(rq->rd); in tell_cpu_to_push()
2287 irq_work_queue_on(&rq->rd->rto_push_work, cpu); in tell_cpu_to_push()
2306 raw_spin_lock(&rq->lock); in rto_push_irq_work_func()
2308 raw_spin_unlock(&rq->lock); in rto_push_irq_work_func()
2311 raw_spin_lock(&rd->rto_lock); in rto_push_irq_work_func()
2316 raw_spin_unlock(&rd->rto_lock); in rto_push_irq_work_func()
2324 irq_work_queue_on(&rd->rto_push_work, cpu); in rto_push_irq_work_func()
2330 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2347 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2357 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task()
2364 * Don't bother taking the src_rq->lock if the next highest in pull_rt_task()
2365 * task is known to be lower-priority than our current task. in pull_rt_task()
2370 if (src_rq->rt.highest_prio.next >= in pull_rt_task()
2371 this_rq->rt.highest_prio.curr) in pull_rt_task()
2389 * the to-be-scheduled task? in pull_rt_task()
2391 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2392 WARN_ON(p == src_rq->curr); in pull_rt_task()
2403 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2433 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2434 p->nr_cpus_allowed > 1 && in task_woken_rt()
2435 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2436 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2437 rq->curr->prio <= p->prio); in task_woken_rt()
2443 /* Assumes rq->lock is held */
2446 if (rq->rt.overloaded) in rq_online_rt()
2451 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2454 /* Assumes rq->lock is held */
2457 if (rq->rt.overloaded) in rq_offline_rt()
2462 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
2478 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || in switched_from_rt()
2519 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2522 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2529 * us to initiate a push or pull.
2537 if (rq->curr == p) { in prio_changed_rt()
2543 if (oldprio < p->prio) in prio_changed_rt()
2550 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2554 if (oldprio < p->prio) in prio_changed_rt()
2563 if (p->prio < rq->curr->prio) in prio_changed_rt()
2580 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2581 p->rt.timeout++; in watchdog()
2582 p->rt.watchdog_stamp = jiffies; in watchdog()
2586 if (p->rt.timeout > next) { in watchdog()
2587 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2588 p->se.sum_exec_runtime); in watchdog()
2606 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2617 if (p->policy != SCHED_RR) in task_tick_rt()
2620 if (--p->rt.time_slice) in task_tick_rt()
2623 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2630 if (rt_se->run_list.prev != rt_se->run_list.next) { in task_tick_rt()
2642 struct task_struct *next_task = busiest_rq->rt_push_task; in rt_active_load_balance_cpu_stop()
2646 raw_spin_lock_irqsave(&busiest_rq->lock, flags); in rt_active_load_balance_cpu_stop()
2647 busiest_rq->rt_active_balance = 0; in rt_active_load_balance_cpu_stop()
2662 set_task_cpu(next_task, lowest_rq->cpu); in rt_active_load_balance_cpu_stop()
2670 raw_spin_unlock_irqrestore(&busiest_rq->lock, flags); in rt_active_load_balance_cpu_stop()
2688 if (p->nr_cpus_allowed == 1) in check_for_migration_rt()
2693 if (cpu_orig_cap == rq->rd->max_cpu_capacity) in check_for_migration_rt()
2708 raw_spin_lock(&rq->lock); in check_for_migration_rt()
2709 if (!rq->active_balance && !rq->rt_active_balance) { in check_for_migration_rt()
2710 rq->rt_active_balance = 1; in check_for_migration_rt()
2711 rq->rt_push_task = p; in check_for_migration_rt()
2715 raw_spin_unlock(&rq->lock); in check_for_migration_rt()
2720 rq, &rq->rt_active_balance_work); in check_for_migration_rt()
2730 if (task->policy == SCHED_RR) in get_rr_interval_rt()
2796 css_task_iter_start(&tg->css, 0, &it); in tg_has_rt_tasks()
2815 u64 period, runtime; in tg_rt_schedulable() local
2817 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
2818 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2820 if (tg == d->tg) { in tg_rt_schedulable()
2821 period = d->rt_period; in tg_rt_schedulable()
2822 runtime = d->rt_runtime; in tg_rt_schedulable()
2826 * Cannot have more runtime than the period. in tg_rt_schedulable()
2828 if (runtime > period && runtime != RUNTIME_INF) in tg_rt_schedulable()
2829 return -EINVAL; in tg_rt_schedulable()
2835 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
2836 return -EBUSY; in tg_rt_schedulable()
2838 total = to_ratio(period, runtime); in tg_rt_schedulable()
2844 return -EINVAL; in tg_rt_schedulable()
2849 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
2850 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
2851 runtime = child->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2853 if (child == d->tg) { in tg_rt_schedulable()
2854 period = d->rt_period; in tg_rt_schedulable()
2855 runtime = d->rt_runtime; in tg_rt_schedulable()
2858 sum += to_ratio(period, runtime); in tg_rt_schedulable()
2862 return -EINVAL; in tg_rt_schedulable()
2867 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
2873 .rt_period = period, in __rt_schedulable()
2894 return -EINVAL; in tg_set_rt_bandwidth()
2896 /* No period doesn't make any sense. */ in tg_set_rt_bandwidth()
2898 return -EINVAL; in tg_set_rt_bandwidth()
2904 return -EINVAL; in tg_set_rt_bandwidth()
2911 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2912 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
2913 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2916 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth()
2918 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2919 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2920 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2922 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2933 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
2938 return -EINVAL; in sched_group_set_rt_runtime()
2947 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
2948 return -1; in sched_group_rt_runtime()
2950 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
2960 return -EINVAL; in sched_group_set_rt_period()
2963 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
2972 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
2991 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()
3005 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()
3007 raw_spin_lock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
3008 rt_rq->rt_runtime = global_rt_runtime(); in sched_rt_global_constraints()
3009 raw_spin_unlock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
3020 return -EINVAL; in sched_rt_global_validate()
3026 return -EINVAL; in sched_rt_global_validate()