• Home
  • Raw
  • Download

Lines Matching refs:p

252 	struct task_struct *p = rt_task_of(rt_se);  in rq_of_rt_se()  local
254 return task_rq(p); in rq_of_rt_se()
333 struct task_struct *p; in inc_rt_migration() local
338 p = rt_task_of(rt_se); in inc_rt_migration()
342 if (p->nr_cpus_allowed > 1) in inc_rt_migration()
350 struct task_struct *p; in dec_rt_migration() local
355 p = rt_task_of(rt_se); in dec_rt_migration()
359 if (p->nr_cpus_allowed > 1) in dec_rt_migration()
389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
392 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
396 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
397 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
400 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
402 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
406 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
408 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
416 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
420 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
471 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
481 min_cap = uclamp_eff_value(p, UCLAMP_MIN); in rt_task_fits_capacity()
482 max_cap = uclamp_eff_value(p, UCLAMP_MAX); in rt_task_fits_capacity()
489 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
586 struct task_struct *p; in rt_se_boosted() local
591 p = rt_task_of(rt_se); in rt_se_boosted()
592 return p->prio != p->normal_prio; in rt_se_boosted()
1399 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1408 p->prio <= rq->rt.highest_prio.next && in should_honor_rt_sync()
1412 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1423 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1425 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1433 if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && in enqueue_task_rt()
1434 !should_honor_rt_sync(rq, p, sync)) in enqueue_task_rt()
1435 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1438 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1440 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1445 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1466 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1468 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1507 select_task_rq_rt(struct task_struct *p, int cpu, int flags) in select_task_rq_rt() argument
1518 trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF, in select_task_rq_rt()
1568 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio)))); in select_task_rq_rt()
1573 if (should_honor_rt_sync(this_cpu_rq, p, sync) && in select_task_rq_rt()
1574 cpumask_test_cpu(this_cpu, p->cpus_ptr)) { in select_task_rq_rt()
1579 if (test || !rt_task_fits_capacity(p, cpu)) { in select_task_rq_rt()
1580 int target = find_lowest_rq(p); in select_task_rq_rt()
1586 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) in select_task_rq_rt()
1597 p->prio < cpu_rq(target)->rt.highest_prio.curr)) in select_task_rq_rt()
1608 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1622 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1623 cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1631 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1635 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_rt() argument
1637 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1647 trace_android_rvh_sched_balance_rt(rq, p, &done); in balance_rt()
1660 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1662 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1680 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1681 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1685 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument
1687 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1690 dequeue_pushable_task(rq, p); in set_next_task_rt()
1702 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0); in set_next_task_rt()
1742 struct task_struct *p; in pick_task_rt() local
1747 p = _pick_next_task_rt(rq); in pick_task_rt()
1749 return p; in pick_task_rt()
1754 struct task_struct *p = pick_task_rt(rq); in pick_next_task_rt() local
1756 if (p) in pick_next_task_rt()
1757 set_next_task_rt(rq, p, true); in pick_next_task_rt()
1759 return p; in pick_next_task_rt()
1762 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1767 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in put_prev_task_rt()
1773 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1774 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1782 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1784 if (!task_running(rq, p) && in pick_rt_task()
1785 cpumask_test_cpu(cpu, &p->cpus_mask)) in pick_rt_task()
1798 struct task_struct *p; in pick_highest_pushable_task() local
1803 plist_for_each_entry(p, head, pushable_tasks) { in pick_highest_pushable_task()
1804 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
1805 return p; in pick_highest_pushable_task()
1974 struct task_struct *p; in pick_next_pushable_task() local
1979 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
1982 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
1983 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
1984 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
1986 BUG_ON(!task_on_rq_queued(p)); in pick_next_pushable_task()
1987 BUG_ON(!rt_task(p)); in pick_next_pushable_task()
1989 return p; in pick_next_pushable_task()
2302 struct task_struct *p, *push_task; in pull_rt_task() local
2356 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task()
2362 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2363 WARN_ON(p == src_rq->curr); in pull_rt_task()
2364 WARN_ON(!task_on_rq_queued(p)); in pull_rt_task()
2374 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2377 if (is_migration_disabled(p)) { in pull_rt_task()
2380 deactivate_task(src_rq, p, 0); in pull_rt_task()
2381 set_task_cpu(p, this_cpu); in pull_rt_task()
2382 activate_task(this_rq, p, 0); in pull_rt_task()
2413 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2415 bool need_to_push = !task_running(rq, p) && in task_woken_rt()
2417 p->nr_cpus_allowed > 1 && in task_woken_rt()
2420 rq->curr->prio <= p->prio); in task_woken_rt()
2452 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2461 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2483 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2489 if (task_current(rq, p)) { in switched_to_rt()
2499 if (task_on_rq_queued(p)) { in switched_to_rt()
2501 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2504 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2514 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2516 if (!task_on_rq_queued(p)) in prio_changed_rt()
2519 if (task_current(rq, p)) { in prio_changed_rt()
2525 if (oldprio < p->prio) in prio_changed_rt()
2532 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2536 if (oldprio < p->prio) in prio_changed_rt()
2545 if (p->prio < rq->curr->prio) in prio_changed_rt()
2551 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2556 soft = task_rlimit(p, RLIMIT_RTTIME); in watchdog()
2557 hard = task_rlimit_max(p, RLIMIT_RTTIME); in watchdog()
2562 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2563 p->rt.timeout++; in watchdog()
2564 p->rt.watchdog_stamp = jiffies; in watchdog()
2568 if (p->rt.timeout > next) { in watchdog()
2569 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2570 p->se.sum_exec_runtime); in watchdog()
2575 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2586 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2588 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2592 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in task_tick_rt()
2594 watchdog(rq, p); in task_tick_rt()
2600 if (p->policy != SCHED_RR) in task_tick_rt()
2603 if (--p->rt.time_slice) in task_tick_rt()
2606 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2614 requeue_task_rt(rq, p, 0); in task_tick_rt()