Lines Matching refs:p
170 static inline int __task_prio(struct task_struct *p) in __task_prio() argument
172 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
175 if (rt_prio(p->prio)) /* includes deadline */ in __task_prio()
176 return p->prio; /* [-1, 99] */ in __task_prio()
178 if (p->sched_class == &idle_sched_class) in __task_prio()
236 const struct task_struct *p = __node_2_sc(node); in rb_sched_core_cmp() local
239 if (cookie < p->core_cookie) in rb_sched_core_cmp()
242 if (cookie > p->core_cookie) in rb_sched_core_cmp()
248 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
252 if (!p->core_cookie) in sched_core_enqueue()
255 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
258 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
262 if (sched_core_enqueued(p)) { in sched_core_dequeue()
263 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
264 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
294 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) in sched_core_next() argument
296 struct rb_node *node = &p->core_node; in sched_core_next()
302 p = container_of(node, struct task_struct, core_node); in sched_core_next()
303 if (p->core_cookie != cookie) in sched_core_next()
306 return p; in sched_core_next()
447 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } in sched_core_enqueue() argument
449 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } in sched_core_dequeue() argument
624 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() argument
629 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
632 rq = task_rq(p); in __task_rq_lock()
634 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
640 while (unlikely(task_on_rq_migrating(p))) in __task_rq_lock()
649 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument
650 __acquires(p->pi_lock) in task_rq_lock()
656 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
657 rq = task_rq(p); in task_rq_lock()
676 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
681 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
683 while (unlikely(task_on_rq_migrating(p))) in task_rq_lock()
904 static inline bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
906 struct thread_info *ti = task_thread_info(p); in set_nr_and_not_polling()
916 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
918 struct thread_info *ti = task_thread_info(p); in set_nr_if_polling()
933 static inline bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
935 set_tsk_need_resched(p); in set_nr_and_not_polling()
940 static inline bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
1290 static void set_load_weight(struct task_struct *p, bool update_load) in set_load_weight() argument
1292 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1293 struct load_weight *load = &p->se.load; in set_load_weight()
1298 if (task_has_idle_policy(p)) { in set_load_weight()
1308 if (update_load && p->sched_class == &fair_sched_class) { in set_load_weight()
1309 reweight_task(p, prio); in set_load_weight()
1450 static void __uclamp_update_util_min_rt_default(struct task_struct *p) in __uclamp_update_util_min_rt_default() argument
1455 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1457 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1467 static void uclamp_update_util_min_rt_default(struct task_struct *p) in uclamp_update_util_min_rt_default() argument
1472 if (!rt_task(p)) in uclamp_update_util_min_rt_default()
1476 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1477 __uclamp_update_util_min_rt_default(p); in uclamp_update_util_min_rt_default()
1478 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1482 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_tg_restrict() argument
1485 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1493 if (task_group_is_autogroup(task_group(p))) in uclamp_tg_restrict()
1495 if (task_group(p) == &root_task_group) in uclamp_tg_restrict()
1498 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1499 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1517 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_get() argument
1519 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); in uclamp_eff_get()
1524 trace_android_rvh_uclamp_eff_get(p, clamp_id, &uc_max, &uc_eff, &ret); in uclamp_eff_get()
1535 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_value() argument
1540 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1541 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1543 uc_eff = uclamp_eff_get(p, clamp_id); in uclamp_eff_value()
1559 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1563 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1569 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1597 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1601 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1663 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1676 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1680 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1687 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1700 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1704 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1707 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_reinc_id() argument
1710 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1713 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1714 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1725 uclamp_update_active(struct task_struct *p) in uclamp_update_active() argument
1739 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1748 uclamp_rq_reinc_id(rq, p, clamp_id); in uclamp_update_active()
1750 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1758 struct task_struct *p; in uclamp_update_active_tasks() local
1761 while ((p = css_task_iter_next(&it))) in uclamp_update_active_tasks()
1762 uclamp_update_active(p); in uclamp_update_active_tasks()
1791 struct task_struct *g, *p; in uclamp_sync_util_min_rt_default() local
1811 for_each_process_thread(g, p) in uclamp_sync_util_min_rt_default()
1812 uclamp_update_util_min_rt_default(p); in uclamp_sync_util_min_rt_default()
1883 static int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
1886 int util_min = p->uclamp_req[UCLAMP_MIN].value; in uclamp_validate()
1887 int util_max = p->uclamp_req[UCLAMP_MAX].value; in uclamp_validate()
1891 trace_android_vh_uclamp_validate(p, attr, user, &ret, &done); in uclamp_validate()
1957 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
1963 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; in __setscheduler_uclamp()
1973 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) in __setscheduler_uclamp()
1987 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], in __setscheduler_uclamp()
1989 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MIN, attr->sched_util_min); in __setscheduler_uclamp()
1994 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], in __setscheduler_uclamp()
1996 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MAX, attr->sched_util_max); in __setscheduler_uclamp()
2000 static void uclamp_fork(struct task_struct *p) in uclamp_fork() argument
2009 p->uclamp[clamp_id].active = false; in uclamp_fork()
2011 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
2015 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2020 static void uclamp_post_fork(struct task_struct *p) in uclamp_post_fork() argument
2022 uclamp_update_util_min_rt_default(p); in uclamp_post_fork()
2065 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
2066 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
2067 static inline int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
2072 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
2074 static inline void uclamp_fork(struct task_struct *p) { } in uclamp_fork() argument
2075 static inline void uclamp_post_fork(struct task_struct *p) { } in uclamp_post_fork() argument
2079 bool sched_task_on_rq(struct task_struct *p) in sched_task_on_rq() argument
2081 return task_on_rq_queued(p); in sched_task_on_rq()
2084 unsigned long get_wchan(struct task_struct *p) in get_wchan() argument
2089 if (!p || p == current) in get_wchan()
2093 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2094 state = READ_ONCE(p->__state); in get_wchan()
2096 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2097 ip = __get_wchan(p); in get_wchan()
2098 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2103 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
2109 sched_info_enqueue(rq, p); in enqueue_task()
2110 psi_enqueue(p, flags & ENQUEUE_WAKEUP); in enqueue_task()
2113 uclamp_rq_inc(rq, p); in enqueue_task()
2114 trace_android_rvh_enqueue_task(rq, p, flags); in enqueue_task()
2115 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2116 trace_android_rvh_after_enqueue_task(rq, p, flags); in enqueue_task()
2119 sched_core_enqueue(rq, p); in enqueue_task()
2122 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
2125 sched_core_dequeue(rq, p, flags); in dequeue_task()
2131 sched_info_dequeue(rq, p); in dequeue_task()
2132 psi_dequeue(p, flags & DEQUEUE_SLEEP); in dequeue_task()
2135 uclamp_rq_dec(rq, p); in dequeue_task()
2136 trace_android_rvh_dequeue_task(rq, p, flags); in dequeue_task()
2137 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2138 trace_android_rvh_after_dequeue_task(rq, p, flags); in dequeue_task()
2141 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
2143 if (task_on_rq_migrating(p)) in activate_task()
2146 enqueue_task(rq, p, flags); in activate_task()
2148 p->on_rq = TASK_ON_RQ_QUEUED; in activate_task()
2152 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
2154 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; in deactivate_task()
2156 dequeue_task(rq, p, flags); in deactivate_task()
2181 static inline int normal_prio(struct task_struct *p) in normal_prio() argument
2183 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio()
2193 static int effective_prio(struct task_struct *p) in effective_prio() argument
2195 p->normal_prio = normal_prio(p); in effective_prio()
2201 if (!rt_prio(p->prio)) in effective_prio()
2202 return p->normal_prio; in effective_prio()
2203 return p->prio; in effective_prio()
2212 inline int task_curr(const struct task_struct *p) in task_curr() argument
2214 return cpu_curr(task_cpu(p)) == p; in task_curr()
2224 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
2228 if (prev_class != p->sched_class) { in check_class_changed()
2230 prev_class->switched_from(rq, p); in check_class_changed()
2232 p->sched_class->switched_to(rq, p); in check_class_changed()
2233 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2234 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2237 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
2239 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
2240 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
2241 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in check_preempt_curr()
2254 int __task_state_match(struct task_struct *p, unsigned int state) in __task_state_match() argument
2256 if (READ_ONCE(p->__state) & state) in __task_state_match()
2259 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2266 int task_state_match(struct task_struct *p, unsigned int state) in task_state_match() argument
2274 raw_spin_lock_irq(&p->pi_lock); in task_state_match()
2275 match = __task_state_match(p, state); in task_state_match()
2276 raw_spin_unlock_irq(&p->pi_lock); in task_state_match()
2297 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) in wait_task_inactive() argument
2311 rq = task_rq(p); in wait_task_inactive()
2324 while (task_on_cpu(rq, p)) { in wait_task_inactive()
2325 if (!task_state_match(p, match_state)) in wait_task_inactive()
2335 rq = task_rq_lock(p, &rf); in wait_task_inactive()
2336 trace_sched_wait_task(p); in wait_task_inactive()
2337 running = task_on_cpu(rq, p); in wait_task_inactive()
2338 queued = task_on_rq_queued(p); in wait_task_inactive()
2340 if ((match = __task_state_match(p, match_state))) { in wait_task_inactive()
2347 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2349 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2399 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2401 static int __set_cpus_allowed_ptr(struct task_struct *p,
2405 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) in migrate_disable_switch() argument
2407 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2410 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2416 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); in migrate_disable_switch()
2421 struct task_struct *p = current; in migrate_disable() local
2423 if (p->migration_disabled) { in migrate_disable()
2424 p->migration_disabled++; in migrate_disable()
2430 p->migration_disabled = 1; in migrate_disable()
2437 struct task_struct *p = current; in migrate_enable() local
2439 if (p->migration_disabled > 1) { in migrate_enable()
2440 p->migration_disabled--; in migrate_enable()
2444 if (WARN_ON_ONCE(!p->migration_disabled)) in migrate_enable()
2452 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2453 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); in migrate_enable()
2460 p->migration_disabled = 0; in migrate_enable()
2475 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) in is_cpu_allowed() argument
2480 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in is_cpu_allowed()
2484 if (is_migration_disabled(p)) in is_cpu_allowed()
2488 trace_android_rvh_is_cpu_allowed(p, cpu, &allowed); in is_cpu_allowed()
2491 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2492 return cpu_active(cpu) && task_cpu_possible(cpu, p) && allowed; in is_cpu_allowed()
2495 if (kthread_is_per_cpu(p)) in is_cpu_allowed()
2529 struct task_struct *p, int new_cpu) in move_queued_task() argument
2540 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached); in move_queued_task()
2544 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
2545 set_task_cpu(p, new_cpu); in move_queued_task()
2552 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2553 activate_task(rq, p, 0); in move_queued_task()
2554 check_preempt_curr(rq, p, 0); in move_queued_task()
2587 struct task_struct *p, int dest_cpu) in __migrate_task() argument
2590 if (!is_cpu_allowed(p, dest_cpu)) in __migrate_task()
2594 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2609 struct task_struct *p = arg->task; in migration_cpu_stop() local
2626 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2633 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2640 if (task_rq(p) == rq) { in migration_cpu_stop()
2641 if (is_migration_disabled(p)) in migration_cpu_stop()
2645 p->migration_pending = NULL; in migration_cpu_stop()
2648 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2652 if (task_on_rq_queued(p)) in migration_cpu_stop()
2653 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2655 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2679 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2680 p->migration_pending = NULL; in migration_cpu_stop()
2692 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2693 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2701 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2712 struct task_struct *p = arg; in push_cpu_stop() local
2714 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2717 if (task_rq(p) != rq) in push_cpu_stop()
2720 if (is_migration_disabled(p)) { in push_cpu_stop()
2721 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2725 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2727 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2728 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2734 if (task_rq(p) == rq) { in push_cpu_stop()
2735 deactivate_task(rq, p, 0); in push_cpu_stop()
2736 set_task_cpu(p, lowest_rq->cpu); in push_cpu_stop()
2737 activate_task(lowest_rq, p, 0); in push_cpu_stop()
2746 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2748 put_task_struct(p); in push_cpu_stop()
2757 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in set_cpus_allowed_common() argument
2760 p->cpus_ptr = new_mask; in set_cpus_allowed_common()
2764 cpumask_copy(&p->cpus_mask, new_mask); in set_cpus_allowed_common()
2765 p->nr_cpus_allowed = cpumask_weight(new_mask); in set_cpus_allowed_common()
2766 trace_android_rvh_set_cpus_allowed_comm(p, new_mask); in set_cpus_allowed_common()
2770 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in __do_set_cpus_allowed() argument
2772 struct rq *rq = task_rq(p); in __do_set_cpus_allowed()
2788 SCHED_WARN_ON(!p->on_cpu); in __do_set_cpus_allowed()
2790 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2792 queued = task_on_rq_queued(p); in __do_set_cpus_allowed()
2793 running = task_current(rq, p); in __do_set_cpus_allowed()
2801 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2804 put_prev_task(rq, p); in __do_set_cpus_allowed()
2806 p->sched_class->set_cpus_allowed(p, new_mask, flags); in __do_set_cpus_allowed()
2809 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2811 set_next_task(rq, p); in __do_set_cpus_allowed()
2814 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
2816 __do_set_cpus_allowed(p, new_mask, 0); in do_set_cpus_allowed()
2862 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) in clear_user_cpus_ptr() argument
2866 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2871 void release_user_cpus_ptr(struct task_struct *p) in release_user_cpus_ptr() argument
2873 kfree(clear_user_cpus_ptr(p)); in release_user_cpus_ptr()
2952 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2959 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2963 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2965 push_task = get_task_struct(p); in affine_move_task()
2972 pending = p->migration_pending; in affine_move_task()
2974 p->migration_pending = NULL; in affine_move_task()
2979 task_rq_unlock(rq, p, rf); in affine_move_task()
2982 p, &rq->push_work); in affine_move_task()
2994 if (!p->migration_pending) { in affine_move_task()
2999 .task = p, in affine_move_task()
3004 p->migration_pending = &my_pending; in affine_move_task()
3006 pending = p->migration_pending; in affine_move_task()
3019 pending = p->migration_pending; in affine_move_task()
3033 task_rq_unlock(rq, p, rf); in affine_move_task()
3037 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3048 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3051 task_rq_unlock(rq, p, rf); in affine_move_task()
3062 if (!is_migration_disabled(p)) { in affine_move_task()
3063 if (task_on_rq_queued(p)) in affine_move_task()
3064 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
3067 p->migration_pending = NULL; in affine_move_task()
3071 task_rq_unlock(rq, p, rf); in affine_move_task()
3097 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, in __set_cpus_allowed_ptr_locked() argument
3103 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3105 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); in __set_cpus_allowed_ptr_locked()
3107 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3114 if (kthread || is_migration_disabled(p)) { in __set_cpus_allowed_ptr_locked()
3137 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3143 if (cpumask_equal(&p->cpus_mask, new_mask)) in __set_cpus_allowed_ptr_locked()
3146 if (WARN_ON_ONCE(p == current && in __set_cpus_allowed_ptr_locked()
3147 is_migration_disabled(p) && in __set_cpus_allowed_ptr_locked()
3148 !cpumask_test_cpu(task_cpu(p), new_mask))) { in __set_cpus_allowed_ptr_locked()
3160 trace_android_rvh_set_cpus_allowed_by_task(cpu_valid_mask, new_mask, p, &dest_cpu); in __set_cpus_allowed_ptr_locked()
3167 __do_set_cpus_allowed(p, new_mask, flags); in __set_cpus_allowed_ptr_locked()
3170 user_mask = clear_user_cpus_ptr(p); in __set_cpus_allowed_ptr_locked()
3172 ret = affine_move_task(rq, p, rf, dest_cpu, flags); in __set_cpus_allowed_ptr_locked()
3179 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
3193 static int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
3199 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
3200 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); in __set_cpus_allowed_ptr()
3203 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
3205 return __set_cpus_allowed_ptr(p, new_mask, 0); in set_cpus_allowed_ptr()
3216 static int restrict_cpus_allowed_ptr(struct task_struct *p, in restrict_cpus_allowed_ptr() argument
3225 if (!p->user_cpus_ptr) { in restrict_cpus_allowed_ptr()
3231 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3238 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { in restrict_cpus_allowed_ptr()
3243 if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { in restrict_cpus_allowed_ptr()
3253 cpumask_copy(user_mask, p->cpus_ptr); in restrict_cpus_allowed_ptr()
3254 p->user_cpus_ptr = user_mask; in restrict_cpus_allowed_ptr()
3257 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); in restrict_cpus_allowed_ptr()
3260 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3271 void force_compatible_cpus_allowed_ptr(struct task_struct *p) in force_compatible_cpus_allowed_ptr() argument
3274 const struct cpumask *override_mask = task_cpu_possible_mask(p); in force_compatible_cpus_allowed_ptr()
3287 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) in force_compatible_cpus_allowed_ptr()
3294 cpuset_cpus_allowed(p, new_mask); in force_compatible_cpus_allowed_ptr()
3300 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3304 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); in force_compatible_cpus_allowed_ptr()
3311 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
3321 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) in relax_compatible_cpus_allowed_ptr() argument
3323 struct cpumask *user_mask = p->user_cpus_ptr; in relax_compatible_cpus_allowed_ptr()
3331 if (!user_mask || !__sched_setaffinity(p, user_mask)) in relax_compatible_cpus_allowed_ptr()
3334 raw_spin_lock_irqsave(&p->pi_lock, flags); in relax_compatible_cpus_allowed_ptr()
3335 user_mask = clear_user_cpus_ptr(p); in relax_compatible_cpus_allowed_ptr()
3336 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in relax_compatible_cpus_allowed_ptr()
3341 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument
3344 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3350 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3358 p->sched_class == &fair_sched_class && in set_task_cpu()
3359 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3372 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3373 lockdep_is_held(__rq_lockp(task_rq(p))))); in set_task_cpu()
3380 WARN_ON_ONCE(is_migration_disabled(p)); in set_task_cpu()
3383 trace_sched_migrate_task(p, new_cpu); in set_task_cpu()
3385 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3386 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3387 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3388 p->se.nr_migrations++; in set_task_cpu()
3389 rseq_migrate(p); in set_task_cpu()
3390 perf_event_task_migrate(p); in set_task_cpu()
3391 trace_android_rvh_set_task_cpu(p, new_cpu); in set_task_cpu()
3394 __set_task_cpu(p, new_cpu); in set_task_cpu()
3398 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
3400 if (task_on_rq_queued(p)) { in __migrate_swap_task()
3404 src_rq = task_rq(p); in __migrate_swap_task()
3410 deactivate_task(src_rq, p, 0); in __migrate_swap_task()
3411 set_task_cpu(p, cpu); in __migrate_swap_task()
3412 activate_task(dst_rq, p, 0); in __migrate_swap_task()
3413 check_preempt_curr(dst_rq, p, 0); in __migrate_swap_task()
3424 p->wake_cpu = cpu; in __migrate_swap_task()
3477 int migrate_swap(struct task_struct *cur, struct task_struct *p, in migrate_swap() argument
3486 .dst_task = p, in migrate_swap()
3506 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
3527 void kick_process(struct task_struct *p) in kick_process() argument
3532 cpu = task_cpu(p); in kick_process()
3533 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
3561 int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
3568 trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu); in select_fallback_rq()
3582 if (is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3589 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3590 if (!is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3599 if (cpuset_cpus_allowed_fallback(p)) { in select_fallback_rq()
3611 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); in select_fallback_rq()
3627 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3629 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3641 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) in select_task_rq() argument
3643 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3645 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) in select_task_rq()
3646 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); in select_task_rq()
3648 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3660 if (unlikely(!is_cpu_allowed(p, cpu))) in select_task_rq()
3661 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
3713 static inline int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
3717 return set_cpus_allowed_ptr(p, new_mask); in __set_cpus_allowed_ptr()
3720 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } in migrate_disable_switch() argument
3730 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
3742 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3746 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3758 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3762 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3765 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3771 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
3774 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
3775 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3776 trace_sched_wakeup(p); in ttwu_do_wakeup()
3779 if (p->sched_class->task_woken) { in ttwu_do_wakeup()
3785 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
3807 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
3817 if (p->sched_contributes_to_load) in ttwu_do_activate()
3825 if (p->in_iowait) { in ttwu_do_activate()
3826 delayacct_blkio_end(p); in ttwu_do_activate()
3827 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3830 activate_task(rq, p, en_flags); in ttwu_do_activate()
3831 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
3859 static int ttwu_runnable(struct task_struct *p, int wake_flags) in ttwu_runnable() argument
3865 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3866 if (task_on_rq_queued(p)) { in ttwu_runnable()
3869 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
3882 struct task_struct *p, *t; in sched_ttwu_pending() local
3898 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { in sched_ttwu_pending()
3899 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3900 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3902 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
3903 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
3905 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3927 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in __ttwu_queue_wakelist() argument
3931 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3934 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3966 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) in ttwu_queue_cond() argument
3976 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
4006 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4012 if ((sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) || in ttwu_queue_wakelist()
4015 __ttwu_queue_wakelist(p, cpu, wake_flags); in ttwu_queue_wakelist()
4024 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4031 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue() argument
4036 if (ttwu_queue_wakelist(p, cpu, wake_flags)) in ttwu_queue()
4041 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
4064 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) in ttwu_state_match() argument
4073 *success = !!(match = __task_state_match(p, state)); in ttwu_state_match()
4090 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4216 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
4222 if (p == current) { in try_to_wake_up()
4234 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4237 trace_sched_waking(p); in try_to_wake_up()
4238 WRITE_ONCE(p->__state, TASK_RUNNING); in try_to_wake_up()
4239 trace_sched_wakeup(p); in try_to_wake_up()
4249 raw_spin_lock_irqsave(&p->pi_lock, flags); in try_to_wake_up()
4251 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4254 trace_sched_waking(p); in try_to_wake_up()
4279 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4282 if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE) in try_to_wake_up()
4283 trace_sched_blocked_reason(p); in try_to_wake_up()
4317 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4338 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4339 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) in try_to_wake_up()
4351 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4353 trace_android_rvh_try_to_wake_up(p); in try_to_wake_up()
4355 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); in try_to_wake_up()
4356 if (task_cpu(p) != cpu) { in try_to_wake_up()
4357 if (p->in_iowait) { in try_to_wake_up()
4358 delayacct_blkio_end(p); in try_to_wake_up()
4359 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4363 psi_ttwu_dequeue(p); in try_to_wake_up()
4364 set_task_cpu(p, cpu); in try_to_wake_up()
4367 cpu = task_cpu(p); in try_to_wake_up()
4370 ttwu_queue(p, cpu, wake_flags); in try_to_wake_up()
4372 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in try_to_wake_up()
4375 trace_android_rvh_try_to_wake_up_success(p); in try_to_wake_up()
4376 ttwu_stat(p, task_cpu(p), wake_flags); in try_to_wake_up()
4383 static bool __task_needs_rq_lock(struct task_struct *p) in __task_needs_rq_lock() argument
4385 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4402 if (p->on_rq) in __task_needs_rq_lock()
4411 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4431 int task_call_func(struct task_struct *p, task_call_f func, void *arg) in task_call_func() argument
4437 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4439 if (__task_needs_rq_lock(p)) in task_call_func()
4440 rq = __task_rq_lock(p, &rf); in task_call_func()
4452 ret = func(p, arg); in task_call_func()
4457 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4504 int wake_up_process(struct task_struct *p) in wake_up_process() argument
4506 return try_to_wake_up(p, TASK_NORMAL, 0); in wake_up_process()
4510 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
4512 return try_to_wake_up(p, state, 0); in wake_up_state()
4522 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) in __sched_fork() argument
4524 p->on_rq = 0; in __sched_fork()
4526 p->se.on_rq = 0; in __sched_fork()
4527 p->se.exec_start = 0; in __sched_fork()
4528 p->se.sum_exec_runtime = 0; in __sched_fork()
4529 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4530 p->se.nr_migrations = 0; in __sched_fork()
4531 p->se.vruntime = 0; in __sched_fork()
4532 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4535 p->se.cfs_rq = NULL; in __sched_fork()
4538 trace_android_rvh_sched_fork_init(p); in __sched_fork()
4542 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4545 RB_CLEAR_NODE(&p->dl.rb_node); in __sched_fork()
4546 init_dl_task_timer(&p->dl); in __sched_fork()
4547 init_dl_inactive_task_timer(&p->dl); in __sched_fork()
4548 __dl_clear_params(p); in __sched_fork()
4550 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4551 p->rt.timeout = 0; in __sched_fork()
4552 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4553 p->rt.on_rq = 0; in __sched_fork()
4554 p->rt.on_list = 0; in __sched_fork()
4557 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4561 p->capture_control = NULL; in __sched_fork()
4563 init_numa_balancing(clone_flags, p); in __sched_fork()
4565 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4566 p->migration_pending = NULL; in __sched_fork()
4745 int sched_fork(unsigned long clone_flags, struct task_struct *p) in sched_fork() argument
4747 trace_android_rvh_sched_fork(p); in sched_fork()
4749 __sched_fork(clone_flags, p); in sched_fork()
4755 p->__state = TASK_NEW; in sched_fork()
4760 p->prio = current->normal_prio; in sched_fork()
4761 trace_android_rvh_prepare_prio_fork(p); in sched_fork()
4763 uclamp_fork(p); in sched_fork()
4768 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4769 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in sched_fork()
4770 p->policy = SCHED_NORMAL; in sched_fork()
4771 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4772 p->rt_priority = 0; in sched_fork()
4773 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4774 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4776 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4777 set_load_weight(p, false); in sched_fork()
4783 p->sched_reset_on_fork = 0; in sched_fork()
4786 if (dl_prio(p->prio)) in sched_fork()
4788 else if (rt_prio(p->prio)) in sched_fork()
4789 p->sched_class = &rt_sched_class; in sched_fork()
4791 p->sched_class = &fair_sched_class; in sched_fork()
4793 init_entity_runnable_average(&p->se); in sched_fork()
4794 trace_android_rvh_finish_prio_fork(p); in sched_fork()
4799 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4802 p->on_cpu = 0; in sched_fork()
4804 init_task_preempt_count(p); in sched_fork()
4806 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4807 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4812 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) in sched_cgroup_fork() argument
4820 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4826 tg = autogroup_task_group(p, tg); in sched_cgroup_fork()
4827 p->sched_task_group = tg; in sched_cgroup_fork()
4830 rseq_migrate(p); in sched_cgroup_fork()
4835 __set_task_cpu(p, smp_processor_id()); in sched_cgroup_fork()
4836 if (p->sched_class->task_fork) in sched_cgroup_fork()
4837 p->sched_class->task_fork(p); in sched_cgroup_fork()
4838 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4841 void sched_post_fork(struct task_struct *p) in sched_post_fork() argument
4843 uclamp_post_fork(p); in sched_post_fork()
4869 void wake_up_new_task(struct task_struct *p) in wake_up_new_task() argument
4874 trace_android_rvh_wake_up_new_task(p); in wake_up_new_task()
4876 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4877 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4887 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4888 rseq_migrate(p); in wake_up_new_task()
4889 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); in wake_up_new_task()
4891 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4893 post_init_entity_util_avg(p); in wake_up_new_task()
4894 trace_android_rvh_new_task_stats(p); in wake_up_new_task()
4896 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
4897 trace_sched_wakeup_new(p); in wake_up_new_task()
4898 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
4900 if (p->sched_class->task_woken) { in wake_up_new_task()
4906 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4910 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
5516 struct task_struct *p = current; in sched_exec() local
5525 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_exec()
5526 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5531 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
5533 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
5534 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); in sched_exec()
5538 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_exec()
5555 static inline void prefetch_curr_exec_start(struct task_struct *p) in prefetch_curr_exec_start() argument
5558 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
5560 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; in prefetch_curr_exec_start()
5571 unsigned long long task_sched_runtime(struct task_struct *p) in task_sched_runtime() argument
5589 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5590 return p->se.sum_exec_runtime; in task_sched_runtime()
5593 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5599 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
5600 prefetch_curr_exec_start(p); in task_sched_runtime()
5602 p->sched_class->update_curr(rq); in task_sched_runtime()
5604 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5605 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5924 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) in get_preempt_disable_ip() argument
5927 return p->preempt_disable_ip; in get_preempt_disable_ip()
6027 struct task_struct *p; in __pick_next_task() local
6038 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
6039 if (unlikely(p == RETRY_TASK)) in __pick_next_task()
6043 if (!p) { in __pick_next_task()
6045 p = pick_next_task_idle(rq); in __pick_next_task()
6048 return p; in __pick_next_task()
6055 p = class->pick_next_task(rq); in __pick_next_task()
6056 if (p) in __pick_next_task()
6057 return p; in __pick_next_task()
6085 struct task_struct *p; in pick_task() local
6088 p = class->pick_task(rq); in pick_task()
6089 if (p) in pick_task()
6090 return p; in pick_task()
6096 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6103 struct task_struct *next, *p, *max = NULL; in pick_next_task() local
6220 p = rq_i->core_pick = pick_task(rq_i); in pick_next_task()
6221 if (!max || prio_less(max, p, fi_before)) in pick_next_task()
6222 max = p; in pick_next_task()
6233 p = rq_i->core_pick; in pick_next_task()
6235 if (!cookie_equals(p, cookie)) { in pick_next_task()
6236 p = NULL; in pick_next_task()
6238 p = sched_core_find(rq_i, cookie); in pick_next_task()
6239 if (!p) in pick_next_task()
6240 p = idle_sched_class.pick_task(rq_i); in pick_next_task()
6243 rq_i->core_pick = p; in pick_next_task()
6245 if (p == rq_i->idle) { in pick_next_task()
6330 struct task_struct *p; in try_steal_cookie() local
6344 p = sched_core_find(src, cookie); in try_steal_cookie()
6345 if (p == src->idle) in try_steal_cookie()
6349 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6352 if (!is_cpu_allowed(p, this)) in try_steal_cookie()
6355 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6358 deactivate_task(src, p, 0); in try_steal_cookie()
6359 set_task_cpu(p, this); in try_steal_cookie()
6360 activate_task(dst, p, 0); in try_steal_cookie()
6368 p = sched_core_next(p, cookie); in try_steal_cookie()
6369 } while (p); in try_steal_cookie()
7057 static void __setscheduler_prio(struct task_struct *p, int prio) in __setscheduler_prio() argument
7060 p->sched_class = &dl_sched_class; in __setscheduler_prio()
7062 p->sched_class = &rt_sched_class; in __setscheduler_prio()
7064 p->sched_class = &fair_sched_class; in __setscheduler_prio()
7066 p->prio = prio; in __setscheduler_prio()
7079 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
7081 struct task_struct *pi_task = rt_mutex_get_top_task(p); in rt_effective_prio()
7097 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) in rt_mutex_setprio() argument
7106 trace_android_rvh_rtmutex_prepare_setprio(p, pi_task); in rt_mutex_setprio()
7108 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7110 trace_android_rvh_rtmutex_force_update(p, pi_task, &update); in rt_mutex_setprio()
7114 if (!update && p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7117 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7129 p->pi_top_task = pi_task; in rt_mutex_setprio()
7134 if (!update && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7149 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7150 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7151 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7155 trace_sched_pi_setprio(p, pi_task); in rt_mutex_setprio()
7156 oldprio = p->prio; in rt_mutex_setprio()
7161 prev_class = p->sched_class; in rt_mutex_setprio()
7162 queued = task_on_rq_queued(p); in rt_mutex_setprio()
7163 running = task_current(rq, p); in rt_mutex_setprio()
7165 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
7167 put_prev_task(rq, p); in rt_mutex_setprio()
7179 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7181 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7182 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7185 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7189 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7194 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7196 p->rt.timeout = 0; in rt_mutex_setprio()
7199 __setscheduler_prio(p, prio); in rt_mutex_setprio()
7202 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
7204 set_next_task(rq, p); in rt_mutex_setprio()
7206 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
7218 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
7224 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
7231 trace_android_rvh_set_user_nice(p, &nice, &allowed); in set_user_nice()
7232 if ((task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) && !allowed) in set_user_nice()
7238 rq = task_rq_lock(p, &rf); in set_user_nice()
7241 trace_android_rvh_set_user_nice_locked(p, &nice); in set_user_nice()
7242 if (task_nice(p) == nice) in set_user_nice()
7251 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
7252 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7255 queued = task_on_rq_queued(p); in set_user_nice()
7256 running = task_current(rq, p); in set_user_nice()
7258 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
7260 put_prev_task(rq, p); in set_user_nice()
7262 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7263 set_load_weight(p, true); in set_user_nice()
7264 old_prio = p->prio; in set_user_nice()
7265 p->prio = effective_prio(p); in set_user_nice()
7268 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
7270 set_next_task(rq, p); in set_user_nice()
7276 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
7279 task_rq_unlock(rq, p, &rf); in set_user_nice()
7291 static bool is_nice_reduction(const struct task_struct *p, const int nice) in is_nice_reduction() argument
7296 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); in is_nice_reduction()
7304 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
7306 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); in can_nice()
7356 int task_prio(const struct task_struct *p) in task_prio() argument
7358 return p->prio - MAX_RT_PRIO; in task_prio()
7437 struct task_struct *p) in effective_cpu_util() argument
7445 trace_android_rvh_effective_cpu_util(cpu, util_cfs, max, type, p, &new_util); in effective_cpu_util()
7477 util = uclamp_rq_util_with(rq, util, p); in effective_cpu_util()
7551 static void __setscheduler_params(struct task_struct *p, in __setscheduler_params() argument
7557 policy = p->policy; in __setscheduler_params()
7559 p->policy = policy; in __setscheduler_params()
7562 __setparam_dl(p, attr); in __setscheduler_params()
7564 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in __setscheduler_params()
7571 p->rt_priority = attr->sched_priority; in __setscheduler_params()
7572 p->normal_prio = normal_prio(p); in __setscheduler_params()
7573 set_load_weight(p, true); in __setscheduler_params()
7579 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
7585 pcred = __task_cred(p); in check_same_owner()
7597 static int user_check_sched_setscheduler(struct task_struct *p, in user_check_sched_setscheduler() argument
7602 if (attr->sched_nice < task_nice(p) && in user_check_sched_setscheduler()
7603 !is_nice_reduction(p, attr->sched_nice)) in user_check_sched_setscheduler()
7608 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); in user_check_sched_setscheduler()
7611 if (policy != p->policy && !rlim_rtprio) in user_check_sched_setscheduler()
7615 if (attr->sched_priority > p->rt_priority && in user_check_sched_setscheduler()
7633 if (task_has_idle_policy(p) && !idle_policy(policy)) { in user_check_sched_setscheduler()
7634 if (!is_nice_reduction(p, task_nice(p))) in user_check_sched_setscheduler()
7639 if (!check_same_owner(p)) in user_check_sched_setscheduler()
7643 if (p->sched_reset_on_fork && !reset_on_fork) in user_check_sched_setscheduler()
7655 static int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
7674 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
7675 policy = oldpolicy = p->policy; in __sched_setscheduler()
7698 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); in __sched_setscheduler()
7705 retval = security_task_setscheduler(p); in __sched_setscheduler()
7712 retval = uclamp_validate(p, attr, user); in __sched_setscheduler()
7721 if (dl_policy(policy) || dl_policy(p->policy)) { in __sched_setscheduler()
7733 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7739 if (p == rq->stop) { in __sched_setscheduler()
7748 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
7749 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) in __sched_setscheduler()
7751 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
7753 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
7758 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7771 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
7772 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
7787 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
7797 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
7799 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7810 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { in __sched_setscheduler()
7815 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7816 oldprio = p->prio; in __sched_setscheduler()
7827 newprio = rt_effective_prio(p, newprio); in __sched_setscheduler()
7832 queued = task_on_rq_queued(p); in __sched_setscheduler()
7833 running = task_current(rq, p); in __sched_setscheduler()
7835 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
7837 put_prev_task(rq, p); in __sched_setscheduler()
7839 prev_class = p->sched_class; in __sched_setscheduler()
7842 __setscheduler_params(p, attr); in __sched_setscheduler()
7843 __setscheduler_prio(p, newprio); in __sched_setscheduler()
7844 trace_android_rvh_setscheduler(p); in __sched_setscheduler()
7846 __setscheduler_uclamp(p, attr); in __sched_setscheduler()
7853 if (oldprio < p->prio) in __sched_setscheduler()
7856 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
7859 set_next_task(rq, p); in __sched_setscheduler()
7861 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
7866 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7871 rt_mutex_adjust_pi(p); in __sched_setscheduler()
7881 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7887 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
7893 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
7903 return __sched_setscheduler(p, &attr, check, true); in _sched_setscheduler()
7917 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
7920 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
7924 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
7926 return __sched_setscheduler(p, attr, true, true); in sched_setattr()
7930 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) in sched_setattr_nocheck() argument
7932 return __sched_setscheduler(p, attr, false, true); in sched_setattr_nocheck()
7949 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
7952 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
7974 void sched_set_fifo(struct task_struct *p) in sched_set_fifo() argument
7977 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo()
7984 void sched_set_fifo_low(struct task_struct *p) in sched_set_fifo_low() argument
7987 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo_low()
7991 void sched_set_normal(struct task_struct *p, int nice) in sched_set_normal() argument
7997 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); in sched_set_normal()
8005 struct task_struct *p; in do_sched_setscheduler() local
8015 p = find_process_by_pid(pid); in do_sched_setscheduler()
8016 if (p != NULL) in do_sched_setscheduler()
8017 retval = sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
8068 static void get_params(struct task_struct *p, struct sched_attr *attr) in get_params() argument
8070 if (task_has_dl_policy(p)) in get_params()
8071 __getparam_dl(p, attr); in get_params()
8072 else if (task_has_rt_policy(p)) in get_params()
8073 attr->sched_priority = p->rt_priority; in get_params()
8075 attr->sched_nice = task_nice(p); in get_params()
8116 struct task_struct *p; in SYSCALL_DEFINE3() local
8133 p = find_process_by_pid(pid); in SYSCALL_DEFINE3()
8134 if (likely(p)) in SYSCALL_DEFINE3()
8135 get_task_struct(p); in SYSCALL_DEFINE3()
8138 if (likely(p)) { in SYSCALL_DEFINE3()
8140 get_params(p, &attr); in SYSCALL_DEFINE3()
8141 retval = sched_setattr(p, &attr); in SYSCALL_DEFINE3()
8142 put_task_struct(p); in SYSCALL_DEFINE3()
8157 struct task_struct *p; in SYSCALL_DEFINE1() local
8165 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
8166 if (p) { in SYSCALL_DEFINE1()
8167 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
8169 retval = p->policy in SYSCALL_DEFINE1()
8170 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); in SYSCALL_DEFINE1()
8187 struct task_struct *p; in SYSCALL_DEFINE2() local
8194 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
8196 if (!p) in SYSCALL_DEFINE2()
8199 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
8203 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
8204 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
8269 struct task_struct *p; in SYSCALL_DEFINE4() local
8277 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
8279 if (!p) in SYSCALL_DEFINE4()
8282 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
8286 kattr.sched_policy = p->policy; in SYSCALL_DEFINE4()
8287 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
8289 get_params(p, &kattr); in SYSCALL_DEFINE4()
8298 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; in SYSCALL_DEFINE4()
8299 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; in SYSCALL_DEFINE4()
8312 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity() argument
8320 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) in dl_task_check_affinity()
8330 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
8338 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask) in __sched_setaffinity() argument
8351 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
8354 retval = dl_task_check_affinity(p, new_mask); in __sched_setaffinity()
8358 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); in __sched_setaffinity()
8362 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
8381 struct task_struct *p; in sched_setaffinity() local
8387 p = find_process_by_pid(pid); in sched_setaffinity()
8388 if (!p) { in sched_setaffinity()
8394 get_task_struct(p); in sched_setaffinity()
8397 if (p->flags & PF_NO_SETAFFINITY) { in sched_setaffinity()
8402 if (!check_same_owner(p)) { in sched_setaffinity()
8404 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { in sched_setaffinity()
8412 trace_android_vh_sched_setaffinity_early(p, in_mask, &skip); in sched_setaffinity()
8415 retval = security_task_setscheduler(p); in sched_setaffinity()
8419 retval = __sched_setaffinity(p, in_mask); in sched_setaffinity()
8420 trace_android_rvh_sched_setaffinity(p, in_mask, &retval); in sched_setaffinity()
8423 put_task_struct(p); in sched_setaffinity()
8464 struct task_struct *p; in sched_getaffinity() local
8471 p = find_process_by_pid(pid); in sched_getaffinity()
8472 if (!p) in sched_getaffinity()
8475 retval = security_task_getscheduler(p); in sched_getaffinity()
8479 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_getaffinity()
8480 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); in sched_getaffinity()
8481 trace_android_rvh_sched_getaffinity(p, mask); in sched_getaffinity()
8482 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_getaffinity()
8889 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
8900 p_rq = task_rq(p); in yield_to()
8911 if (task_rq(p) != p_rq) { in yield_to()
8919 if (curr->sched_class != p->sched_class) in yield_to()
8922 if (task_on_cpu(p_rq, p) || !task_is_running(p)) in yield_to()
8925 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
9044 struct task_struct *p; in sched_rr_get_interval() local
9055 p = find_process_by_pid(pid); in sched_rr_get_interval()
9056 if (!p) in sched_rr_get_interval()
9059 retval = security_task_getscheduler(p); in sched_rr_get_interval()
9063 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
9065 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
9066 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
9067 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
9114 void sched_show_task(struct task_struct *p) in sched_show_task() argument
9119 if (!try_get_task_stack(p)) in sched_show_task()
9122 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
9124 if (task_is_running(p)) in sched_show_task()
9127 free = stack_not_used(p); in sched_show_task()
9131 if (pid_alive(p)) in sched_show_task()
9132 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
9135 free, task_pid_nr(p), ppid, in sched_show_task()
9136 read_task_thread_flags(p)); in sched_show_task()
9138 print_worker_info(KERN_INFO, p); in sched_show_task()
9139 print_stop_info(KERN_INFO, p); in sched_show_task()
9140 trace_android_vh_sched_show_task(p); in sched_show_task()
9141 show_stack(p, NULL, KERN_INFO); in sched_show_task()
9142 put_task_stack(p); in sched_show_task()
9147 state_filter_match(unsigned long state_filter, struct task_struct *p) in state_filter_match() argument
9149 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
9172 struct task_struct *g, *p; in show_state_filter() local
9175 for_each_process_thread(g, p) { in show_state_filter()
9185 if (state_filter_match(state_filter, p)) in show_state_filter()
9186 sched_show_task(p); in show_state_filter()
9289 int task_can_attach(struct task_struct *p) in task_can_attach() argument
9302 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
9312 int migrate_task_to(struct task_struct *p, int target_cpu) in migrate_task_to() argument
9314 struct migration_arg arg = { p, target_cpu }; in migrate_task_to()
9315 int curr_cpu = task_cpu(p); in migrate_task_to()
9320 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
9325 trace_sched_move_numa(p, curr_cpu, target_cpu); in migrate_task_to()
9333 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
9339 rq = task_rq_lock(p, &rf); in sched_setnuma()
9340 queued = task_on_rq_queued(p); in sched_setnuma()
9341 running = task_current(rq, p); in sched_setnuma()
9344 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
9346 put_prev_task(rq, p); in sched_setnuma()
9348 p->numa_preferred_nid = nid; in sched_setnuma()
9351 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
9353 set_next_task(rq, p); in sched_setnuma()
9354 task_rq_unlock(rq, p, &rf); in sched_setnuma()
9398 struct task_struct *p = arg; in __balance_push_cpu_stop() local
9403 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9408 if (task_rq(p) == rq && task_on_rq_queued(p)) { in __balance_push_cpu_stop()
9409 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
9410 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9414 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9416 put_task_struct(p); in __balance_push_cpu_stop()
9785 struct task_struct *g, *p; in dump_rq_tasks() local
9791 for_each_process_thread(g, p) { in dump_rq_tasks()
9792 if (task_cpu(p) != cpu) in dump_rq_tasks()
9795 if (!task_on_rq_queued(p)) in dump_rq_tasks()
9798 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
10236 struct task_struct *g, *p; in normalize_rt_tasks() local
10242 for_each_process_thread(g, p) { in normalize_rt_tasks()
10246 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
10249 p->se.exec_start = 0; in normalize_rt_tasks()
10250 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
10251 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
10252 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
10254 if (!dl_task(p) && !rt_task(p)) { in normalize_rt_tasks()
10259 if (task_nice(p) < 0) in normalize_rt_tasks()
10260 set_user_nice(p, 0); in normalize_rt_tasks()
10264 __sched_setscheduler(p, &attr, false, false); in normalize_rt_tasks()
10313 void ia64_set_curr_task(int cpu, struct task_struct *p) in ia64_set_curr_task() argument
10315 cpu_curr(cpu) = p; in ia64_set_curr_task()