• Home
  • Raw
  • Download

Lines Matching refs:p

239 	struct task_struct *p = rt_task_of(rt_se);  in rq_of_rt_se()  local
241 return task_rq(p); in rq_of_rt_se()
318 struct task_struct *p; in inc_rt_migration() local
323 p = rt_task_of(rt_se); in inc_rt_migration()
327 if (p->nr_cpus_allowed > 1) in inc_rt_migration()
335 struct task_struct *p; in dec_rt_migration() local
340 p = rt_task_of(rt_se); in dec_rt_migration()
344 if (p->nr_cpus_allowed > 1) in dec_rt_migration()
374 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
376 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
377 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
378 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
381 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
382 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
385 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
387 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
391 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
393 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
400 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
404 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
455 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
465 min_cap = uclamp_eff_value(p, UCLAMP_MIN); in rt_task_fits_capacity()
466 max_cap = uclamp_eff_value(p, UCLAMP_MAX); in rt_task_fits_capacity()
473 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) in rt_task_fits_capacity() argument
570 struct task_struct *p; in rt_se_boosted() local
575 p = rt_task_of(rt_se); in rt_se_boosted()
576 return p->prio != p->normal_prio; in rt_se_boosted()
1368 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1370 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1377 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1378 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1381 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1383 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1388 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1409 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1411 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1429 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) in select_task_rq_rt() argument
1472 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); in select_task_rq_rt()
1474 if (test || !rt_task_fits_capacity(p, cpu)) { in select_task_rq_rt()
1475 int target = find_lowest_rq(p); in select_task_rq_rt()
1482 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1491 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1505 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1506 cpupri_find(&rq->rd->cpupri, p, NULL, NULL)) in check_preempt_equal_prio()
1514 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1518 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_rt() argument
1520 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1539 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1541 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1559 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1560 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1564 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument
1566 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1569 dequeue_pushable_task(rq, p); in set_next_task_rt()
1619 struct task_struct *p; in pick_next_task_rt() local
1626 p = _pick_next_task_rt(rq); in pick_next_task_rt()
1627 set_next_task_rt(rq, p, true); in pick_next_task_rt()
1628 return p; in pick_next_task_rt()
1631 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1641 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1642 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1650 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1652 if (!task_running(rq, p) && in pick_rt_task()
1653 cpumask_test_cpu(cpu, p->cpus_ptr) && in pick_rt_task()
1654 rt_task_fits_capacity(p, cpu)) in pick_rt_task()
1667 struct task_struct *p; in pick_highest_pushable_task() local
1672 plist_for_each_entry(p, head, pushable_tasks) { in pick_highest_pushable_task()
1673 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
1674 return p; in pick_highest_pushable_task()
1817 struct task_struct *p; in pick_next_pushable_task() local
1822 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
1825 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
1826 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
1827 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
1829 BUG_ON(!task_on_rq_queued(p)); in pick_next_pushable_task()
1830 BUG_ON(!rt_task(p)); in pick_next_pushable_task()
1832 return p; in pick_next_pushable_task()
2101 struct task_struct *p; in pull_rt_task() local
2154 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task()
2160 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2161 WARN_ON(p == src_rq->curr); in pull_rt_task()
2162 WARN_ON(!task_on_rq_queued(p)); in pull_rt_task()
2172 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2177 deactivate_task(src_rq, p, 0); in pull_rt_task()
2178 set_task_cpu(p, this_cpu); in pull_rt_task()
2179 activate_task(this_rq, p, 0); in pull_rt_task()
2199 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2201 bool need_to_push = !task_running(rq, p) && in task_woken_rt()
2203 p->nr_cpus_allowed > 1 && in task_woken_rt()
2206 rq->curr->prio <= p->prio); in task_woken_rt()
2208 if (need_to_push || !rt_task_fits_capacity(p, cpu_of(rq))) in task_woken_rt()
2238 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2247 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2269 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2278 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_rt()
2281 !rt_task_fits_capacity(p, cpu_of(rq)); in switched_to_rt()
2283 if (p->nr_cpus_allowed > 1 && need_to_push) in switched_to_rt()
2286 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2296 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2298 if (!task_on_rq_queued(p)) in prio_changed_rt()
2301 if (rq->curr == p) { in prio_changed_rt()
2307 if (oldprio < p->prio) in prio_changed_rt()
2314 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2318 if (oldprio < p->prio) in prio_changed_rt()
2327 if (p->prio < rq->curr->prio) in prio_changed_rt()
2333 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2338 soft = task_rlimit(p, RLIMIT_RTTIME); in watchdog()
2339 hard = task_rlimit_max(p, RLIMIT_RTTIME); in watchdog()
2344 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2345 p->rt.timeout++; in watchdog()
2346 p->rt.watchdog_stamp = jiffies; in watchdog()
2350 if (p->rt.timeout > next) { in watchdog()
2351 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2352 p->se.sum_exec_runtime); in watchdog()
2357 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2368 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2370 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2375 watchdog(rq, p); in task_tick_rt()
2381 if (p->policy != SCHED_RR) in task_tick_rt()
2384 if (--p->rt.time_slice) in task_tick_rt()
2387 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2395 requeue_task_rt(rq, p, 0); in task_tick_rt()
2458 struct task_struct *g, *p; in tg_has_rt_tasks() local
2466 for_each_process_thread(g, p) { in tg_has_rt_tasks()
2467 if (rt_task(p) && task_group(p) == tg) in tg_has_rt_tasks()