Lines Matching refs:p
303 struct task_struct *p = rt_task_of(rt_se); in rq_of_rt_se() local
305 return task_rq(p); in rq_of_rt_se()
382 struct task_struct *p; in inc_rt_migration() local
387 p = rt_task_of(rt_se); in inc_rt_migration()
391 if (p->nr_cpus_allowed > 1) in inc_rt_migration()
399 struct task_struct *p; in dec_rt_migration() local
404 p = rt_task_of(rt_se); in dec_rt_migration()
408 if (p->nr_cpus_allowed > 1) in dec_rt_migration()
438 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
440 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
441 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
442 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
445 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
446 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
449 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
451 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
455 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
457 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
465 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
469 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
511 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
521 min_cap = uclamp_eff_value(p, UCLAMP_MIN); in rt_task_fits_capacity()
522 max_cap = uclamp_eff_value(p, UCLAMP_MAX); in rt_task_fits_capacity()
529 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
626 struct task_struct *p; in rt_se_boosted() local
631 p = rt_task_of(rt_se); in rt_se_boosted()
632 return p->prio != p->normal_prio; in rt_se_boosted()
1355 struct task_struct *p = NULL; in update_stats_wait_start_rt() local
1361 p = rt_task_of(rt_se); in update_stats_wait_start_rt()
1367 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1374 struct task_struct *p = NULL; in update_stats_enqueue_sleeper_rt() local
1380 p = rt_task_of(rt_se); in update_stats_enqueue_sleeper_rt()
1386 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1404 struct task_struct *p = NULL; in update_stats_wait_end_rt() local
1410 p = rt_task_of(rt_se); in update_stats_wait_end_rt()
1416 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1423 struct task_struct *p = NULL; in update_stats_dequeue_rt() local
1429 p = rt_task_of(rt_se); in update_stats_dequeue_rt()
1431 if ((flags & DEQUEUE_SLEEP) && p) { in update_stats_dequeue_rt()
1434 state = READ_ONCE(p->__state); in update_stats_dequeue_rt()
1436 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_rt()
1440 __schedstat_set(p->stats.block_start, in update_stats_dequeue_rt()
1547 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1556 p->prio <= rq->rt.highest_prio.next && in should_honor_rt_sync()
1560 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1571 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1573 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1584 if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && in enqueue_task_rt()
1585 !should_honor_rt_sync(rq, p, sync)) in enqueue_task_rt()
1586 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1589 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1591 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1596 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1617 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1619 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1658 static bool rt_task_fits_cpu(struct task_struct *p, int cpu) in rt_task_fits_cpu() argument
1660 return rt_task_fits_capacity(p, cpu) && !cpu_busy_with_softirqs(cpu); in rt_task_fits_cpu()
1664 select_task_rq_rt(struct task_struct *p, int cpu, int flags) in select_task_rq_rt() argument
1674 trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF, in select_task_rq_rt()
1720 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); in select_task_rq_rt()
1725 if (should_honor_rt_sync(this_cpu_rq, p, sync) && in select_task_rq_rt()
1726 cpumask_test_cpu(this_cpu, p->cpus_ptr)) { in select_task_rq_rt()
1731 if (test || !rt_task_fits_cpu(p, cpu)) { in select_task_rq_rt()
1732 int target = find_lowest_rq(p); in select_task_rq_rt()
1738 if (!test && target != -1 && !rt_task_fits_cpu(p, target)) in select_task_rq_rt()
1746 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1757 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1771 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1772 cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1780 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1784 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_rt() argument
1786 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1796 trace_android_rvh_sched_balance_rt(rq, p, &done); in balance_rt()
1809 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1811 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1829 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1830 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1834 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument
1836 struct sched_rt_entity *rt_se = &p->rt; in set_next_task_rt()
1839 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1840 if (on_rt_rq(&p->rt)) in set_next_task_rt()
1844 dequeue_pushable_task(rq, p); in set_next_task_rt()
1856 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0); in set_next_task_rt()
1896 struct task_struct *p; in pick_task_rt() local
1901 p = _pick_next_task_rt(rq); in pick_task_rt()
1903 return p; in pick_task_rt()
1908 struct task_struct *p = pick_task_rt(rq); in pick_next_task_rt() local
1910 if (p) in pick_next_task_rt()
1911 set_next_task_rt(rq, p, true); in pick_next_task_rt()
1913 return p; in pick_next_task_rt()
1916 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1918 struct sched_rt_entity *rt_se = &p->rt; in put_prev_task_rt()
1921 if (on_rt_rq(&p->rt)) in put_prev_task_rt()
1927 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in put_prev_task_rt()
1933 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1934 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1942 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1944 if (!task_on_cpu(rq, p) && in pick_rt_task()
1945 cpumask_test_cpu(cpu, &p->cpus_mask)) in pick_rt_task()
1958 struct task_struct *p; in pick_highest_pushable_task() local
1963 plist_for_each_entry(p, head, pushable_tasks) { in pick_highest_pushable_task()
1964 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
1965 return p; in pick_highest_pushable_task()
2137 struct task_struct *p; in pick_next_pushable_task() local
2142 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
2145 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
2146 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
2147 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
2149 BUG_ON(!task_on_rq_queued(p)); in pick_next_pushable_task()
2150 BUG_ON(!rt_task(p)); in pick_next_pushable_task()
2152 return p; in pick_next_pushable_task()
2465 struct task_struct *p, *push_task; in pull_rt_task() local
2519 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task()
2525 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2526 WARN_ON(p == src_rq->curr); in pull_rt_task()
2527 WARN_ON(!task_on_rq_queued(p)); in pull_rt_task()
2537 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2540 if (is_migration_disabled(p)) { in pull_rt_task()
2543 deactivate_task(src_rq, p, 0); in pull_rt_task()
2544 set_task_cpu(p, this_cpu); in pull_rt_task()
2545 activate_task(this_rq, p, 0); in pull_rt_task()
2576 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2578 bool need_to_push = !task_on_cpu(rq, p) && in task_woken_rt()
2580 p->nr_cpus_allowed > 1 && in task_woken_rt()
2583 rq->curr->prio <= p->prio); in task_woken_rt()
2615 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2624 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2646 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2652 if (task_current(rq, p)) { in switched_to_rt()
2662 if (task_on_rq_queued(p)) { in switched_to_rt()
2664 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2667 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2677 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2679 if (!task_on_rq_queued(p)) in prio_changed_rt()
2682 if (task_current(rq, p)) { in prio_changed_rt()
2688 if (oldprio < p->prio) in prio_changed_rt()
2695 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2699 if (oldprio < p->prio) in prio_changed_rt()
2708 if (p->prio < rq->curr->prio) in prio_changed_rt()
2714 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2719 soft = task_rlimit(p, RLIMIT_RTTIME); in watchdog()
2720 hard = task_rlimit_max(p, RLIMIT_RTTIME); in watchdog()
2725 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2726 p->rt.timeout++; in watchdog()
2727 p->rt.watchdog_stamp = jiffies; in watchdog()
2731 if (p->rt.timeout > next) { in watchdog()
2732 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2733 p->se.sum_exec_runtime); in watchdog()
2738 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2749 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2751 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2755 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in task_tick_rt()
2757 watchdog(rq, p); in task_tick_rt()
2763 if (p->policy != SCHED_RR) in task_tick_rt()
2766 if (--p->rt.time_slice) in task_tick_rt()
2769 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2777 requeue_task_rt(rq, p, 0); in task_tick_rt()