• Home
  • Raw
  • Download

Lines Matching refs:p

43 static inline int normal_prio(struct task_struct *p)  in normal_prio()  argument
45 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio()
55 static int effective_prio(struct task_struct *p) in effective_prio() argument
57 p->normal_prio = normal_prio(p); in effective_prio()
63 if (!rt_or_dl_prio(p->prio)) in effective_prio()
64 return p->normal_prio; in effective_prio()
65 return p->prio; in effective_prio()
68 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
75 trace_android_rvh_set_user_nice(p, &nice); in set_user_nice()
76 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice()
82 CLASS(task_rq_lock, rq_guard)(p); in set_user_nice()
87 trace_android_rvh_set_user_nice_locked(p, &nice, &allowed); in set_user_nice()
97 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
98 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
102 queued = task_on_rq_queued(p); in set_user_nice()
103 running = task_current_donor(rq, p); in set_user_nice()
105 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
107 put_prev_task(rq, p); in set_user_nice()
109 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
110 set_load_weight(p, true); in set_user_nice()
111 old_prio = p->prio; in set_user_nice()
112 p->prio = effective_prio(p); in set_user_nice()
115 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
117 set_next_task(rq, p); in set_user_nice()
123 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
135 static bool is_nice_reduction(const struct task_struct *p, const int nice) in is_nice_reduction() argument
140 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); in is_nice_reduction()
148 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
150 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); in can_nice()
200 int task_prio(const struct task_struct *p) in task_prio() argument
202 return p->prio - MAX_RT_PRIO; in task_prio()
284 struct task_struct *p; in find_get_task() local
287 p = find_process_by_pid(pid); in find_get_task()
288 if (likely(p)) in find_get_task()
289 get_task_struct(p); in find_get_task()
291 return p; in find_get_task()
303 static void __setscheduler_params(struct task_struct *p, in DEFINE_CLASS()
309 policy = p->policy; in DEFINE_CLASS()
311 p->policy = policy; in DEFINE_CLASS()
314 __setparam_dl(p, attr); in DEFINE_CLASS()
316 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in DEFINE_CLASS()
318 p->se.custom_slice = 1; in DEFINE_CLASS()
319 p->se.slice = clamp_t(u64, attr->sched_runtime, in DEFINE_CLASS()
323 p->se.custom_slice = 0; in DEFINE_CLASS()
324 p->se.slice = sysctl_sched_base_slice; in DEFINE_CLASS()
329 if (rt_or_dl_task_policy(p)) { in DEFINE_CLASS()
330 p->timer_slack_ns = 0; in DEFINE_CLASS()
331 } else if (p->timer_slack_ns == 0) { in DEFINE_CLASS()
333 p->timer_slack_ns = p->default_timer_slack_ns; in DEFINE_CLASS()
341 p->rt_priority = attr->sched_priority; in DEFINE_CLASS()
342 p->normal_prio = normal_prio(p); in DEFINE_CLASS()
343 set_load_weight(p, true); in DEFINE_CLASS()
349 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
354 pcred = __task_cred(p); in check_same_owner()
361 static int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
364 int util_min = p->uclamp_req[UCLAMP_MIN].value; in uclamp_validate()
365 int util_max = p->uclamp_req[UCLAMP_MAX].value; in uclamp_validate()
369 trace_android_vh_uclamp_validate(p, attr, &ret, &done); in uclamp_validate()
428 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
434 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; in __setscheduler_uclamp()
444 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) in __setscheduler_uclamp()
458 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], in __setscheduler_uclamp()
460 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MIN, attr->sched_util_min); in __setscheduler_uclamp()
465 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], in __setscheduler_uclamp()
467 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MAX, attr->sched_util_max); in __setscheduler_uclamp()
473 static inline int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
478 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
487 static int user_check_sched_setscheduler(struct task_struct *p, in user_check_sched_setscheduler() argument
492 if (attr->sched_nice < task_nice(p) && in user_check_sched_setscheduler()
493 !is_nice_reduction(p, attr->sched_nice)) in user_check_sched_setscheduler()
498 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); in user_check_sched_setscheduler()
501 if (policy != p->policy && !rlim_rtprio) in user_check_sched_setscheduler()
505 if (attr->sched_priority > p->rt_priority && in user_check_sched_setscheduler()
523 if (task_has_idle_policy(p) && !idle_policy(policy)) { in user_check_sched_setscheduler()
524 if (!is_nice_reduction(p, task_nice(p))) in user_check_sched_setscheduler()
529 if (!check_same_owner(p)) in user_check_sched_setscheduler()
533 if (p->sched_reset_on_fork && !reset_on_fork) in user_check_sched_setscheduler()
551 int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
570 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
571 policy = oldpolicy = p->policy; in __sched_setscheduler()
594 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); in __sched_setscheduler()
601 retval = security_task_setscheduler(p); in __sched_setscheduler()
608 retval = uclamp_validate(p, attr); in __sched_setscheduler()
617 if (dl_policy(policy) || dl_policy(p->policy)) { in __sched_setscheduler()
629 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
635 if (p == rq->stop) { in __sched_setscheduler()
640 retval = scx_check_setscheduler(p, policy); in __sched_setscheduler()
648 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
650 (attr->sched_nice != task_nice(p) || in __sched_setscheduler()
651 (attr->sched_runtime != p->se.slice))) in __sched_setscheduler()
653 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
655 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
660 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
673 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
674 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
689 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
699 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
701 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
712 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { in __sched_setscheduler()
717 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
718 oldprio = p->prio; in __sched_setscheduler()
729 newprio = rt_effective_prio(p, newprio); in __sched_setscheduler()
734 prev_class = p->sched_class; in __sched_setscheduler()
737 if (prev_class != next_class && p->se.sched_delayed) in __sched_setscheduler()
738 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); in __sched_setscheduler()
740 queued = task_on_rq_queued(p); in __sched_setscheduler()
741 running = task_current_donor(rq, p); in __sched_setscheduler()
743 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
745 put_prev_task(rq, p); in __sched_setscheduler()
748 __setscheduler_params(p, attr); in __sched_setscheduler()
749 p->sched_class = next_class; in __sched_setscheduler()
750 p->prio = newprio; in __sched_setscheduler()
751 trace_android_rvh_setscheduler(p); in __sched_setscheduler()
753 __setscheduler_uclamp(p, attr); in __sched_setscheduler()
754 check_class_changing(rq, p, prev_class); in __sched_setscheduler()
761 if (oldprio < p->prio) in __sched_setscheduler()
764 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
767 set_next_task(rq, p); in __sched_setscheduler()
769 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
774 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
779 rt_mutex_adjust_pi(p); in __sched_setscheduler()
789 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
795 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
801 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
804 if (p->se.custom_slice) in _sched_setscheduler()
805 attr.sched_runtime = p->se.slice; in _sched_setscheduler()
814 return __sched_setscheduler(p, &attr, check, true); in _sched_setscheduler()
828 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
831 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
835 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
837 return __sched_setscheduler(p, attr, true, true); in sched_setattr()
841 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) in sched_setattr_nocheck() argument
843 return __sched_setscheduler(p, attr, false, true); in sched_setattr_nocheck()
860 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
863 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
885 void sched_set_fifo(struct task_struct *p) in sched_set_fifo() argument
888 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo()
895 void sched_set_fifo_low(struct task_struct *p) in sched_set_fifo_low() argument
898 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo_low()
902 void sched_set_normal(struct task_struct *p, int nice) in sched_set_normal() argument
908 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); in sched_set_normal()
922 CLASS(find_get_task, p)(pid); in do_sched_setscheduler()
923 if (!p) in do_sched_setscheduler()
926 return sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
974 static void get_params(struct task_struct *p, struct sched_attr *attr) in get_params() argument
976 if (task_has_dl_policy(p)) { in get_params()
977 __getparam_dl(p, attr); in get_params()
978 } else if (task_has_rt_policy(p)) { in get_params()
979 attr->sched_priority = p->rt_priority; in get_params()
981 attr->sched_nice = task_nice(p); in get_params()
982 attr->sched_runtime = p->se.slice; in get_params()
1038 CLASS(find_get_task, p)(pid); in SYSCALL_DEFINE3()
1039 if (!p) in SYSCALL_DEFINE3()
1043 get_params(p, &attr); in SYSCALL_DEFINE3()
1045 return sched_setattr(p, &attr); in SYSCALL_DEFINE3()
1057 struct task_struct *p; in SYSCALL_DEFINE1() local
1064 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
1065 if (!p) in SYSCALL_DEFINE1()
1068 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
1070 retval = p->policy; in SYSCALL_DEFINE1()
1071 if (p->sched_reset_on_fork) in SYSCALL_DEFINE1()
1088 struct task_struct *p; in SYSCALL_DEFINE2() local
1095 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
1096 if (!p) in SYSCALL_DEFINE2()
1099 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
1103 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
1104 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
1163 struct task_struct *p; in SYSCALL_DEFINE4() local
1171 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
1172 if (!p) in SYSCALL_DEFINE4()
1175 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
1179 kattr.sched_policy = p->policy; in SYSCALL_DEFINE4()
1180 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
1182 get_params(p, &kattr); in SYSCALL_DEFINE4()
1191 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; in SYSCALL_DEFINE4()
1192 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; in SYSCALL_DEFINE4()
1200 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity() argument
1206 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) in dl_task_check_affinity()
1216 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
1223 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) in __sched_setaffinity() argument
1236 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1242 retval = dl_task_check_affinity(p, new_mask); in __sched_setaffinity()
1246 retval = __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
1250 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1273 __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
1291 CLASS(find_get_task, p)(pid); in sched_setaffinity()
1292 if (!p) in sched_setaffinity()
1295 if (p->flags & PF_NO_SETAFFINITY) in sched_setaffinity()
1298 if (!check_same_owner(p)) { in sched_setaffinity()
1300 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) in sched_setaffinity()
1304 trace_android_vh_sched_setaffinity_early(p, in_mask, &skip); in sched_setaffinity()
1307 retval = security_task_setscheduler(p); in sched_setaffinity()
1328 retval = __sched_setaffinity(p, &ac); in sched_setaffinity()
1371 struct task_struct *p; in sched_getaffinity() local
1375 p = find_process_by_pid(pid); in sched_getaffinity()
1376 if (!p) in sched_getaffinity()
1379 retval = security_task_getscheduler(p); in sched_getaffinity()
1383 guard(raw_spinlock_irqsave)(&p->pi_lock); in sched_getaffinity()
1384 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); in sched_getaffinity()
1508 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
1514 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in yield_to()
1518 p_rq = task_rq(p); in yield_to()
1527 if (task_rq(p) != p_rq) in yield_to()
1533 if (curr->sched_class != p->sched_class) in yield_to()
1536 if (task_on_cpu(p_rq, p) || !task_is_running(p)) in yield_to()
1539 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
1622 struct task_struct *p = find_process_by_pid(pid); in sched_rr_get_interval() local
1623 if (!p) in sched_rr_get_interval()
1626 retval = security_task_getscheduler(p); in sched_rr_get_interval()
1630 scoped_guard (task_rq_lock, p) { in sched_rr_get_interval()
1632 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
1633 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()