• Home
  • Raw
  • Download

Lines Matching refs:p

69 	struct task_struct *p = dl_task_of(dl_se);  in dl_rq_of_se()  local
70 struct rq *rq = task_rq(p); in dl_rq_of_se()
311 static void dl_change_utilization(struct task_struct *p, u64 new_bw) in dl_change_utilization() argument
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
317 if (task_on_rq_queued(p)) in dl_change_utilization()
320 rq = task_rq(p); in dl_change_utilization()
321 if (p->dl.dl_non_contending) { in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
323 p->dl.dl_non_contending = 0; in dl_change_utilization()
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
332 put_task_struct(p); in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
392 static void task_non_contending(struct task_struct *p) in task_non_contending() argument
394 struct sched_dl_entity *dl_se = &p->dl; in task_non_contending()
427 if (dl_task(p)) in task_non_contending()
429 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in task_non_contending()
430 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
432 if (READ_ONCE(p->__state) == TASK_DEAD) in task_non_contending()
433 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
437 __dl_clear_params(p); in task_non_contending()
444 get_task_struct(p); in task_non_contending()
485 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
487 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
578 struct task_struct *p = dl_task_of(dl_se); in inc_dl_migration() local
580 if (p->nr_cpus_allowed > 1) in inc_dl_migration()
588 struct task_struct *p = dl_task_of(dl_se); in dec_dl_migration() local
590 if (p->nr_cpus_allowed > 1) in dec_dl_migration()
608 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
612 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); in enqueue_pushable_dl_task()
614 leftmost = rb_add_cached(&p->pushable_dl_tasks, in enqueue_pushable_dl_task()
618 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
621 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
627 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) in dequeue_pushable_dl_task()
630 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); in dequeue_pushable_dl_task()
634 RB_CLEAR_NODE(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
670 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) in dl_task_offline_migration() argument
675 later_rq = find_lock_later_rq(p, rq); in dl_task_offline_migration()
683 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); in dl_task_offline_migration()
702 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
709 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
710 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
712 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
713 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
715 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
716 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
726 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
731 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
734 set_task_cpu(p, later_rq->cpu); in dl_task_offline_migration()
743 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
748 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
771 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
772 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
773 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
1052 static int start_dl_timer(struct task_struct *p) in start_dl_timer() argument
1054 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
1056 struct rq *rq = task_rq(p); in start_dl_timer()
1090 get_task_struct(p); in start_dl_timer()
1115 struct task_struct *p = dl_task_of(dl_se); in dl_task_timer() local
1119 rq = task_rq_lock(p, &rf); in dl_task_timer()
1125 if (!dl_task(p)) in dl_task_timer()
1159 if (!task_on_rq_queued(p)) { in dl_task_timer()
1171 rq = dl_task_offline_migration(rq, p); in dl_task_timer()
1183 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); in dl_task_timer()
1185 check_preempt_curr_dl(rq, p, 0); in dl_task_timer()
1206 task_rq_unlock(rq, p, &rf); in dl_task_timer()
1212 put_task_struct(p); in dl_task_timer()
1245 struct task_struct *p = dl_task_of(dl_se); in dl_check_constrained_dl() local
1250 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p))) in dl_check_constrained_dl()
1415 struct task_struct *p = dl_task_of(dl_se); in inactive_task_timer() local
1419 rq = task_rq_lock(p, &rf); in inactive_task_timer()
1424 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in inactive_task_timer()
1425 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1427 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { in inactive_task_timer()
1428 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1429 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1434 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1436 __dl_clear_params(p); in inactive_task_timer()
1446 task_rq_unlock(rq, p, &rf); in inactive_task_timer()
1447 put_task_struct(p); in inactive_task_timer()
1597 struct task_struct *p = dl_task_of(dl_se); in update_stats_dequeue_dl() local
1605 state = READ_ONCE(p->__state); in update_stats_dequeue_dl()
1607 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_dl()
1611 __schedstat_set(p->stats.block_start, in update_stats_dequeue_dl()
1672 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_dl() argument
1674 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1687 if (p->dl.dl_throttled) { in enqueue_task_dl()
1693 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1694 p->dl.dl_throttled = 0; in enqueue_task_dl()
1696 } else if (!dl_prio(p->normal_prio)) { in enqueue_task_dl()
1706 p->dl.dl_throttled = 0; in enqueue_task_dl()
1709 task_pid_nr(p)); in enqueue_task_dl()
1720 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) in enqueue_task_dl()
1721 dl_check_constrained_dl(&p->dl); in enqueue_task_dl()
1723 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { in enqueue_task_dl()
1724 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1725 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1740 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_task_dl()
1742 task_contending(&p->dl, flags); in enqueue_task_dl()
1748 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1750 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1752 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_dl()
1753 enqueue_pushable_dl_task(rq, p); in enqueue_task_dl()
1756 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in __dequeue_task_dl() argument
1758 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1759 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
1760 dequeue_pushable_dl_task(rq, p); in __dequeue_task_dl()
1763 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_dl() argument
1766 __dequeue_task_dl(rq, p, flags); in dequeue_task_dl()
1768 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { in dequeue_task_dl()
1769 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1770 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1783 task_non_contending(p); in dequeue_task_dl()
1818 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, in dl_task_is_earliest_deadline() argument
1822 dl_time_before(p->dl.deadline, in dl_task_is_earliest_deadline()
1829 select_task_rq_dl(struct task_struct *p, int cpu, int flags) in select_task_rq_dl() argument
1854 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1855 p->nr_cpus_allowed > 1; in select_task_rq_dl()
1862 select_rq |= !dl_task_fits_capacity(p, cpu); in select_task_rq_dl()
1865 int target = find_later_rq(p); in select_task_rq_dl()
1868 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl()
1877 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) in migrate_task_rq_dl() argument
1882 if (READ_ONCE(p->__state) != TASK_WAKING) in migrate_task_rq_dl()
1885 rq = task_rq(p); in migrate_task_rq_dl()
1892 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1894 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1895 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1903 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1904 put_task_struct(p); in migrate_task_rq_dl()
1906 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1910 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) in check_preempt_equal_dl() argument
1924 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
1925 cpudl_find(&rq->rd->cpudl, p, NULL)) in check_preempt_equal_dl()
1931 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl() argument
1933 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1953 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, in check_preempt_curr_dl() argument
1956 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1966 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1968 check_preempt_equal_dl(rq, p); in check_preempt_curr_dl()
1973 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1975 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1978 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1983 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) in set_next_task_dl() argument
1985 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
1988 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
1989 if (on_dl_rq(&p->dl)) in set_next_task_dl()
1993 dequeue_pushable_dl_task(rq, p); in set_next_task_dl()
1999 start_hrtick_dl(rq, p); in set_next_task_dl()
2021 struct task_struct *p; in pick_task_dl() local
2028 p = dl_task_of(dl_se); in pick_task_dl()
2030 return p; in pick_task_dl()
2035 struct task_struct *p; in pick_next_task_dl() local
2037 p = pick_task_dl(rq); in pick_next_task_dl()
2038 if (p) in pick_next_task_dl()
2039 set_next_task_dl(rq, p, true); in pick_next_task_dl()
2041 return p; in pick_next_task_dl()
2044 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) in put_prev_task_dl() argument
2046 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
2049 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
2055 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2056 enqueue_pushable_dl_task(rq, p); in put_prev_task_dl()
2067 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
2077 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2078 is_leftmost(p, &rq->dl)) in task_tick_dl()
2079 start_hrtick_dl(rq, p); in task_tick_dl()
2082 static void task_fork_dl(struct task_struct *p) in task_fork_dl() argument
2095 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
2097 if (!task_on_cpu(rq, p) && in pick_dl_task()
2098 cpumask_test_cpu(cpu, &p->cpus_mask)) in pick_dl_task()
2109 struct task_struct *p = NULL; in pick_earliest_pushable_dl_task() local
2119 p = __node_2_pdl(next_node); in pick_earliest_pushable_dl_task()
2121 if (pick_dl_task(rq, p, cpu)) in pick_earliest_pushable_dl_task()
2122 return p; in pick_earliest_pushable_dl_task()
2277 struct task_struct *p; in pick_next_pushable_dl_task() local
2282 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2284 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2285 WARN_ON_ONCE(task_current(rq, p)); in pick_next_pushable_dl_task()
2286 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
2288 WARN_ON_ONCE(!task_on_rq_queued(p)); in pick_next_pushable_dl_task()
2289 WARN_ON_ONCE(!dl_task(p)); in pick_next_pushable_dl_task()
2291 return p; in pick_next_pushable_dl_task()
2387 struct task_struct *p, *push_task; in pull_dl_task() local
2427 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task()
2434 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2435 dl_task_is_earliest_deadline(p, this_rq)) { in pull_dl_task()
2436 WARN_ON(p == src_rq->curr); in pull_dl_task()
2437 WARN_ON(!task_on_rq_queued(p)); in pull_dl_task()
2443 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2447 if (is_migration_disabled(p)) { in pull_dl_task()
2450 deactivate_task(src_rq, p, 0); in pull_dl_task()
2451 set_task_cpu(p, this_cpu); in pull_dl_task()
2452 activate_task(this_rq, p, 0); in pull_dl_task()
2453 dmin = p->dl.deadline; in pull_dl_task()
2480 static void task_woken_dl(struct rq *rq, struct task_struct *p) in task_woken_dl() argument
2482 if (!task_on_cpu(rq, p) && in task_woken_dl()
2484 p->nr_cpus_allowed > 1 && in task_woken_dl()
2487 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2492 static void set_cpus_allowed_dl(struct task_struct *p, in set_cpus_allowed_dl() argument
2499 WARN_ON_ONCE(!dl_task(p)); in set_cpus_allowed_dl()
2501 rq = task_rq(p); in set_cpus_allowed_dl()
2519 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2523 set_cpus_allowed_common(p, new_mask, flags); in set_cpus_allowed_dl()
2556 void dl_add_task_root_domain(struct task_struct *p) in dl_add_task_root_domain() argument
2562 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2563 if (!dl_task(p)) { in dl_add_task_root_domain()
2564 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2568 rq = __task_rq_lock(p, &rf); in dl_add_task_root_domain()
2573 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2577 task_rq_unlock(rq, p, &rf); in dl_add_task_root_domain()
2591 static void switched_from_dl(struct rq *rq, struct task_struct *p) in switched_from_dl() argument
2601 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2602 task_non_contending(p); in switched_from_dl()
2608 dec_dl_tasks_cs(p); in switched_from_dl()
2610 if (!task_on_rq_queued(p)) { in switched_from_dl()
2617 if (p->dl.dl_non_contending) in switched_from_dl()
2618 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2619 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2627 if (p->dl.dl_non_contending) in switched_from_dl()
2628 p->dl.dl_non_contending = 0; in switched_from_dl()
2635 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2645 static void switched_to_dl(struct rq *rq, struct task_struct *p) in switched_to_dl() argument
2647 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2648 put_task_struct(p); in switched_to_dl()
2654 inc_dl_tasks_cs(p); in switched_to_dl()
2657 if (!task_on_rq_queued(p)) { in switched_to_dl()
2658 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2663 if (rq->curr != p) { in switched_to_dl()
2665 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2669 check_preempt_curr_dl(rq, p, 0); in switched_to_dl()
2681 static void prio_changed_dl(struct rq *rq, struct task_struct *p, in prio_changed_dl() argument
2684 if (task_on_rq_queued(p) || task_current(rq, p)) { in prio_changed_dl()
2700 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2840 int sched_dl_overflow(struct task_struct *p, int policy, in sched_dl_overflow() argument
2846 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
2854 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2866 if (dl_policy(policy) && !task_has_dl_policy(p) && in sched_dl_overflow()
2868 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2869 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2872 } else if (dl_policy(policy) && task_has_dl_policy(p) && in sched_dl_overflow()
2873 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2881 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2883 dl_change_utilization(p, new_bw); in sched_dl_overflow()
2885 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { in sched_dl_overflow()
2906 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument
2908 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
2918 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument
2920 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
2922 attr->sched_priority = p->rt_priority; in __getparam_dl()
2988 void __dl_clear_params(struct task_struct *p) in __dl_clear_params() argument
2990 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
3009 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) in dl_param_changed() argument
3011 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()