Lines Matching refs:p
301 struct task_struct *p = rt_task_of(rt_se); in rq_of_rt_se() local
303 return task_rq(p); in rq_of_rt_se()
389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
392 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
396 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
397 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
405 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
407 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
411 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
413 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
426 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
430 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
462 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
472 min_cap = uclamp_eff_value(p, UCLAMP_MIN); in rt_task_fits_capacity()
473 max_cap = uclamp_eff_value(p, UCLAMP_MAX); in rt_task_fits_capacity()
480 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
577 struct task_struct *p; in rt_se_boosted() local
582 p = rt_task_of(rt_se); in rt_se_boosted()
583 return p->prio != p->normal_prio; in rt_se_boosted()
1285 struct task_struct *p = NULL; in update_stats_wait_start_rt() local
1291 p = rt_task_of(rt_se); in update_stats_wait_start_rt()
1297 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1304 struct task_struct *p = NULL; in update_stats_enqueue_sleeper_rt() local
1310 p = rt_task_of(rt_se); in update_stats_enqueue_sleeper_rt()
1316 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1334 struct task_struct *p = NULL; in update_stats_wait_end_rt() local
1340 p = rt_task_of(rt_se); in update_stats_wait_end_rt()
1346 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1353 struct task_struct *p = NULL; in update_stats_dequeue_rt() local
1359 p = rt_task_of(rt_se); in update_stats_dequeue_rt()
1361 if ((flags & DEQUEUE_SLEEP) && p) { in update_stats_dequeue_rt()
1364 state = READ_ONCE(p->__state); in update_stats_dequeue_rt()
1366 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_rt()
1370 __schedstat_set(p->stats.block_start, in update_stats_dequeue_rt()
1477 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1486 p->prio <= rq->rt.highest_prio.next && in should_honor_rt_sync()
1490 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1501 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1503 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1514 if (should_honor_rt_sync(rq, p, sync)) in enqueue_task_rt()
1517 if (task_is_blocked(p)) in enqueue_task_rt()
1520 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1521 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1524 static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1526 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1531 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1554 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1556 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1590 static bool rt_task_fits_cpu(struct task_struct *p, int cpu) in rt_task_fits_cpu() argument
1592 return rt_task_fits_capacity(p, cpu) && !cpu_busy_with_softirqs(cpu); in rt_task_fits_cpu()
1596 select_task_rq_rt(struct task_struct *p, int cpu, int flags) in select_task_rq_rt() argument
1606 trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF, in select_task_rq_rt()
1653 (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio); in select_task_rq_rt()
1658 if (should_honor_rt_sync(this_cpu_rq, p, sync) && in select_task_rq_rt()
1659 cpumask_test_cpu(this_cpu, p->cpus_ptr)) { in select_task_rq_rt()
1664 if (test || !rt_task_fits_cpu(p, cpu)) { in select_task_rq_rt()
1665 int target = find_lowest_rq(p, p); in select_task_rq_rt()
1671 if (!test && target != -1 && !rt_task_fits_cpu(p, target)) in select_task_rq_rt()
1679 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1690 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1692 struct task_struct *exec_ctx = p; in check_preempt_equal_prio()
1702 exec_ctx = find_exec_ctx(rq, p); in check_preempt_equal_prio()
1710 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1711 cpupri_find(&rq->rd->cpupri, p, exec_ctx, NULL)) in check_preempt_equal_prio()
1719 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1723 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_rt() argument
1725 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1735 trace_android_rvh_sched_balance_rt(rq, p, &done); in balance_rt()
1748 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_rt() argument
1752 if (p->prio < donor->prio) { in wakeup_preempt_rt()
1770 if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr)) in wakeup_preempt_rt()
1771 check_preempt_equal_prio(rq, p); in wakeup_preempt_rt()
1775 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument
1777 struct sched_rt_entity *rt_se = &p->rt; in set_next_task_rt()
1780 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1781 if (on_rt_rq(&p->rt)) in set_next_task_rt()
1785 dequeue_pushable_task(rq, p); in set_next_task_rt()
1797 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0); in set_next_task_rt()
1837 struct task_struct *p; in pick_task_rt() local
1842 p = _pick_next_task_rt(rq); in pick_task_rt()
1844 return p; in pick_task_rt()
1847 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) in put_prev_task_rt() argument
1849 struct sched_rt_entity *rt_se = &p->rt; in put_prev_task_rt()
1852 if (on_rt_rq(&p->rt)) in put_prev_task_rt()
1858 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in put_prev_task_rt()
1860 if (task_is_blocked(p)) in put_prev_task_rt()
1866 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1867 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1882 struct task_struct *p; in pick_highest_pushable_task() local
1887 plist_for_each_entry(p, head, pushable_tasks) { in pick_highest_pushable_task()
1888 if (task_is_pushable(rq, p, cpu) == 1) in pick_highest_pushable_task()
1889 return p; in pick_highest_pushable_task()
2001 struct task_struct *p, *push_task = NULL; in pick_next_pushable_task() local
2006 plist_for_each_entry(p, head, pushable_tasks) { in pick_next_pushable_task()
2007 if (task_is_pushable(rq, p, 0)) { in pick_next_pushable_task()
2008 push_task = p; in pick_next_pushable_task()
2465 struct task_struct *p, *push_task; in pull_rt_task() local
2519 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task()
2525 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2526 WARN_ON(p == src_rq->curr); in pull_rt_task()
2527 WARN_ON(!task_on_rq_queued(p)); in pull_rt_task()
2537 if (p->prio < src_rq->donor->prio) in pull_rt_task()
2540 if (is_migration_disabled(p)) { in pull_rt_task()
2543 move_queued_task_locked(src_rq, this_rq, p); in pull_rt_task()
2574 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2576 bool need_to_push = !task_on_cpu(rq, p) && in task_woken_rt()
2578 p->nr_cpus_allowed > 1 && in task_woken_rt()
2581 rq->donor->prio <= p->prio); in task_woken_rt()
2613 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2622 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2644 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2650 if (task_current(rq, p)) { in switched_to_rt()
2660 if (task_on_rq_queued(p)) { in switched_to_rt()
2662 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2665 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2675 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2677 if (!task_on_rq_queued(p)) in prio_changed_rt()
2680 if (task_current_donor(rq, p)) { in prio_changed_rt()
2686 if (oldprio < p->prio) in prio_changed_rt()
2693 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2697 if (oldprio < p->prio) in prio_changed_rt()
2706 if (p->prio < rq->donor->prio) in prio_changed_rt()
2712 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2717 soft = task_rlimit(p, RLIMIT_RTTIME); in watchdog()
2718 hard = task_rlimit_max(p, RLIMIT_RTTIME); in watchdog()
2723 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2724 p->rt.timeout++; in watchdog()
2725 p->rt.watchdog_stamp = jiffies; in watchdog()
2729 if (p->rt.timeout > next) { in watchdog()
2730 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2731 p->se.sum_exec_runtime); in watchdog()
2736 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2747 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2749 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2753 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in task_tick_rt()
2755 watchdog(rq, p); in task_tick_rt()
2761 if (p->policy != SCHED_RR) in task_tick_rt()
2764 if (--p->rt.time_slice) in task_tick_rt()
2767 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2775 requeue_task_rt(rq, p, 0); in task_tick_rt()
2794 static int task_is_throttled_rt(struct task_struct *p, int cpu) in task_is_throttled_rt() argument
2799 rt_rq = task_group(p)->rt_rq[cpu]; in task_is_throttled_rt()