• Home
  • Raw
  • Download

Lines Matching refs:p

208 static inline int task_has_idle_policy(struct task_struct *p)  in task_has_idle_policy()  argument
210 return idle_policy(p->policy); in task_has_idle_policy()
213 static inline int task_has_rt_policy(struct task_struct *p) in task_has_rt_policy() argument
215 return rt_policy(p->policy); in task_has_rt_policy()
218 static inline int task_has_dl_policy(struct task_struct *p) in task_has_dl_policy() argument
220 return dl_policy(p->policy); in task_has_dl_policy()
290 void __dl_clear_params(struct task_struct *p);
330 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
331 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
332 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
334 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
1219 static inline bool is_migration_disabled(struct task_struct *p) in is_migration_disabled() argument
1222 return p->migration_disabled; in is_migration_disabled()
1232 #define task_rq(p) cpu_rq(task_cpu(p)) argument
1280 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1286 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1289 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1309 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1313 struct task_struct *p, in sched_group_cookie_match() argument
1322 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { in sched_group_cookie_match()
1323 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match()
1329 static inline bool sched_core_enqueued(struct task_struct *p) in sched_core_enqueued() argument
1331 return !RB_EMPTY_NODE(&p->core_node); in sched_core_enqueued()
1334 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1335 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1362 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1367 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1373 struct task_struct *p, in sched_group_cookie_match() argument
1445 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument
1447 return p->se.cfs_rq; in task_cfs_rq()
1469 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument
1471 return &task_rq(p)->cfs; in task_cfs_rq()
1476 struct task_struct *p = task_of(se); in cfs_rq_of() local
1477 struct rq *rq = task_rq(p); in cfs_rq_of()
1592 struct task_struct *p, int dest_cpu);
1642 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1645 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1646 __acquires(p->pi_lock)
1657 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1659 __releases(p->pi_lock) in task_rq_unlock()
1663 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_unlock()
1759 extern void sched_setnuma(struct task_struct *p, int node);
1760 extern int migrate_task_to(struct task_struct *p, int cpu);
1761 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1764 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
1771 extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1794 #define rcu_dereference_check_sched_domain(p) \ argument
1795 rcu_dereference_check((p), \
1968 static inline struct task_group *task_group(struct task_struct *p) in task_group() argument
1970 return p->sched_task_group; in task_group()
1974 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) in set_task_rq() argument
1977 struct task_group *tg = task_group(p); in set_task_rq()
1981 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq()
1982 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq()
1983 p->se.parent = tg->se[cpu]; in set_task_rq()
1984 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; in set_task_rq()
1988 p->rt.rt_rq = tg->rt_rq[cpu]; in set_task_rq()
1989 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
1995 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } in set_task_rq() argument
1996 static inline struct task_group *task_group(struct task_struct *p) in task_group() argument
2003 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) in __set_task_cpu() argument
2005 set_task_rq(p, cpu); in __set_task_cpu()
2013 WRITE_ONCE(task_thread_info(p)->cpu, cpu); in __set_task_cpu()
2014 p->wake_cpu = cpu; in __set_task_cpu()
2100 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2102 return rq->curr == p; in task_current()
2105 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) in task_on_cpu() argument
2108 return p->on_cpu; in task_on_cpu()
2110 return task_current(rq, p); in task_on_cpu()
2114 static inline int task_on_rq_queued(struct task_struct *p) in task_on_rq_queued() argument
2116 return p->on_rq == TASK_ON_RQ_QUEUED; in task_on_rq_queued()
2119 static inline int task_on_rq_migrating(struct task_struct *p) in task_on_rq_migrating() argument
2121 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; in task_on_rq_migrating()
2202 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2203 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2205 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2207 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2211 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2212 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2216 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
2220 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
2224 void (*set_cpus_allowed)(struct task_struct *p,
2231 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2234 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2235 void (*task_fork)(struct task_struct *p);
2236 void (*task_dead)(struct task_struct *p);
2254 void (*task_change_group)(struct task_struct *p);
2337 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flag…
2341 struct task_struct *p = rq->curr; in get_push_task() local
2348 if (p->nr_cpus_allowed == 1) in get_push_task()
2351 if (p->migration_disabled) in get_push_task()
2355 return get_task_struct(p); in get_push_task()
2398 extern void reweight_task(struct task_struct *p, int prio);
2419 extern void post_init_entity_util_avg(struct task_struct *p);
2477 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2478 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2480 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2803 show_numa_stats(struct task_struct *p, struct seq_file *m);
2948 struct task_struct *p);
2958 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) in dl_task_fits_capacity() argument
2962 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); in dl_task_fits_capacity()
3030 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
3068 struct task_struct *p) in uclamp_rq_util_with() argument
3076 if (p) { in uclamp_rq_util_with()
3077 min_util = uclamp_eff_value(p, UCLAMP_MIN); in uclamp_rq_util_with()
3078 max_util = uclamp_eff_value(p, UCLAMP_MAX); in uclamp_rq_util_with()
3130 static inline unsigned long uclamp_eff_value(struct task_struct *p, in uclamp_eff_value() argument
3141 struct task_struct *p) in uclamp_rq_util_with() argument
3250 static inline bool is_per_cpu_kthread(struct task_struct *p) in is_per_cpu_kthread() argument
3252 if (!(p->flags & PF_KTHREAD)) in is_per_cpu_kthread()
3255 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()