Lines Matching refs:dl
59 return container_of(dl_se, struct task_struct, dl); in dl_task_of()
64 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
72 return &rq->dl; in dl_rq_of_se()
179 rq->dl.extra_bw += bw; in __dl_update()
185 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
206 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); in __dl_update() local
208 dl->extra_bw += bw; in __dl_update()
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
321 if (p->dl.dl_non_contending) { in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
323 p->dl.dl_non_contending = 0; in dl_change_utilization()
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
335 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
394 struct sched_dl_entity *dl_se = &p->dl; in task_non_contending()
433 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
487 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
601 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); in __pushable_less()
615 &rq->dl.pushable_dl_tasks_root, in enqueue_pushable_dl_task()
618 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
623 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task()
632 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; in dequeue_pushable_dl_task()
639 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); in has_pushable_dl_tasks()
702 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
709 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
710 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
712 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
713 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
715 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
716 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
726 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
731 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
1054 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
1285 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ in grub_reclaim()
1287 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1297 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) in grub_reclaim()
1300 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; in grub_reclaim()
1312 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1356 &curr->dl); in update_curr_dl()
1380 if (!is_leftmost(curr, &rq->dl)) in update_curr_dl()
1428 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1429 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1434 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1443 sub_running_bw(dl_se, &rq->dl); in inactive_task_timer()
1674 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1687 if (p->dl.dl_throttled) { in enqueue_task_dl()
1693 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1694 p->dl.dl_throttled = 0; in enqueue_task_dl()
1706 p->dl.dl_throttled = 0; in enqueue_task_dl()
1720 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) in enqueue_task_dl()
1721 dl_check_constrained_dl(&p->dl); in enqueue_task_dl()
1724 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1725 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1740 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_task_dl()
1742 task_contending(&p->dl, flags); in enqueue_task_dl()
1748 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1750 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1758 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1759 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
1769 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1770 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1804 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
1821 return (!rq->dl.dl_nr_running || in dl_task_is_earliest_deadline()
1822 dl_time_before(p->dl.deadline, in dl_task_is_earliest_deadline()
1823 rq->dl.earliest_dl.curr)); in dl_task_is_earliest_deadline()
1854 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1892 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1894 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1895 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1903 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1906 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1933 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1956 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1966 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1975 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1985 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
1986 struct dl_rq *dl_rq = &rq->dl; in set_next_task_dl()
1989 if (on_dl_rq(&p->dl)) in set_next_task_dl()
2020 struct dl_rq *dl_rq = &rq->dl; in pick_task_dl()
2046 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
2047 struct dl_rq *dl_rq = &rq->dl; in put_prev_task_dl()
2049 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
2055 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2077 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2078 is_leftmost(p, &rq->dl)) in task_tick_dl()
2115 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); in pick_earliest_pushable_dl_task()
2282 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2305 if (!rq->dl.overloaded) in push_dl_task()
2319 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
2411 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2412 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2413 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2424 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2434 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2443 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2444 src_rq->curr->dl.deadline)) in pull_dl_task()
2453 dmin = p->dl.deadline; in pull_dl_task()
2487 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2519 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2529 if (rq->dl.overloaded) in rq_online_dl()
2533 if (rq->dl.dl_nr_running > 0) in rq_online_dl()
2534 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2540 if (rq->dl.overloaded) in rq_offline_dl()
2573 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2601 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2617 if (p->dl.dl_non_contending) in switched_from_dl()
2618 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2619 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2627 if (p->dl.dl_non_contending) in switched_from_dl()
2628 p->dl.dl_non_contending = 0; in switched_from_dl()
2635 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2647 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2658 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2665 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2692 if (!rq->dl.overloaded) in prio_changed_dl()
2700 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2713 DEFINE_SCHED_CLASS(dl) = {
2828 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()
2854 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2868 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2869 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2873 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2881 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2908 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
2920 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
2990 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
3011 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
3101 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); in print_dl_stats()