| /kernel/sched/ |
| D | syscalls.c | 43 static inline int normal_prio(struct task_struct *p) in normal_prio() argument 45 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio() 55 static int effective_prio(struct task_struct *p) in effective_prio() argument 57 p->normal_prio = normal_prio(p); in effective_prio() 63 if (!rt_or_dl_prio(p->prio)) in effective_prio() 64 return p->normal_prio; in effective_prio() 65 return p->prio; in effective_prio() 68 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument 75 trace_android_rvh_set_user_nice(p, &nice); in set_user_nice() 76 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice() [all …]
|
| D | core.c | 206 static inline int __task_prio(const struct task_struct *p) in __task_prio() argument 208 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio() 211 if (p->dl_server) in __task_prio() 214 if (rt_or_dl_prio(p->prio)) in __task_prio() 215 return p->prio; /* [-1, 99] */ in __task_prio() 217 if (p->sched_class == &idle_sched_class) in __task_prio() 220 if (task_on_scx(p)) in __task_prio() 301 const struct task_struct *p = __node_2_sc(node); in rb_sched_core_cmp() local 304 if (cookie < p->core_cookie) in rb_sched_core_cmp() 307 if (cookie > p->core_cookie) in rb_sched_core_cmp() [all …]
|
| D | ext.c | 226 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); 241 void (*enqueue)(struct task_struct *p, u64 enq_flags); 257 void (*dequeue)(struct task_struct *p, u64 deq_flags); 290 void (*tick)(struct task_struct *p); 317 void (*runnable)(struct task_struct *p, u64 enq_flags); 325 void (*running)(struct task_struct *p); 336 void (*stopping)(struct task_struct *p, bool runnable); 356 void (*quiescent)(struct task_struct *p, u64 deq_flags); 398 void (*set_weight)(struct task_struct *p, u32 weight); 407 void (*set_cpumask)(struct task_struct *p, [all …]
|
| D | deadline.c | 345 static void dl_change_utilization(struct task_struct *p, u64 new_bw) in dl_change_utilization() argument 347 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization() 349 if (task_on_rq_queued(p)) in dl_change_utilization() 352 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); in dl_change_utilization() 448 struct task_struct *p = dl_task_of(dl_se); in task_non_contending() local 450 if (dl_task(p)) in task_non_contending() 453 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in task_non_contending() 454 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() 456 if (READ_ONCE(p->__state) == TASK_DEAD) in task_non_contending() 459 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending() [all …]
|
| D | core_sched.c | 53 static unsigned long sched_core_update_cookie(struct task_struct *p, in sched_core_update_cookie() argument 60 rq = task_rq_lock(p, &rf); in sched_core_update_cookie() 68 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); in sched_core_update_cookie() 70 if (sched_core_enqueued(p)) in sched_core_update_cookie() 71 sched_core_dequeue(rq, p, DEQUEUE_SAVE); in sched_core_update_cookie() 73 old_cookie = p->core_cookie; in sched_core_update_cookie() 74 p->core_cookie = cookie; in sched_core_update_cookie() 79 if (cookie && task_on_rq_queued(p)) in sched_core_update_cookie() 80 sched_core_enqueue(rq, p); in sched_core_update_cookie() 91 if (task_on_cpu(rq, p)) in sched_core_update_cookie() [all …]
|
| D | fair.c | 1092 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 1093 static unsigned long task_h_load(struct task_struct *p); 1142 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument 1144 struct sched_entity *se = &p->se; in post_init_entity_util_avg() 1150 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg() 1187 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument 1338 struct task_struct *p = NULL; in update_stats_wait_start_fair() local 1346 p = task_of(se); in update_stats_wait_start_fair() 1348 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair() 1355 struct task_struct *p = NULL; in update_stats_wait_end_fair() local [all …]
|
| D | rt.c | 301 struct task_struct *p = rt_task_of(rt_se); in rq_of_rt_se() local 303 return task_rq(p); in rq_of_rt_se() 389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument 391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task() 392 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task() 393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task() 396 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task() 397 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task() 405 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument 407 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task() [all …]
|
| D | stats.h | 47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p, 50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p, 52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, 85 # define __update_stats_wait_start(rq, p, stats) do { } while (0) argument 86 # define __update_stats_wait_end(rq, p, stats) do { } while (0) argument 87 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0) argument 130 static inline void psi_enqueue(struct task_struct *p, int flags) in psi_enqueue() argument 142 if (task_on_cpu(task_rq(p), p)) in psi_enqueue() 145 if (p->se.sched_delayed) { in psi_enqueue() 148 if (p->in_memstall) in psi_enqueue() [all …]
|
| D | cputime.c | 112 static inline void task_group_account_field(struct task_struct *p, int index, in task_group_account_field() argument 123 cgroup_account_cputime_field(p, index, tmp); in task_group_account_field() 131 void account_user_time(struct task_struct *p, u64 cputime) in account_user_time() argument 136 p->utime += cputime; in account_user_time() 137 account_group_user_time(p, cputime); in account_user_time() 139 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; in account_user_time() 142 task_group_account_field(p, index, cputime); in account_user_time() 145 acct_account_cputime(p); in account_user_time() 148 cpufreq_acct_update_power(p, cputime); in account_user_time() 156 void account_guest_time(struct task_struct *p, u64 cputime) in account_guest_time() argument [all …]
|
| D | sched.h | 231 static inline int task_has_idle_policy(struct task_struct *p) in task_has_idle_policy() argument 233 return idle_policy(p->policy); in task_has_idle_policy() 236 static inline int task_has_rt_policy(struct task_struct *p) in task_has_rt_policy() argument 238 return rt_policy(p->policy); in task_has_rt_policy() 241 static inline int task_has_dl_policy(struct task_struct *p) in task_has_dl_policy() argument 243 return dl_policy(p->policy); in task_has_dl_policy() 364 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 365 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 366 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 368 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); [all …]
|
| D | autogroup.c | 70 static inline struct autogroup *autogroup_task_get(struct task_struct *p) in autogroup_task_get() argument 75 if (!lock_task_sighand(p, &flags)) in autogroup_task_get() 78 ag = autogroup_kref_get(p->signal->autogroup); in autogroup_task_get() 79 unlock_task_sighand(p, &flags); in autogroup_task_get() 128 bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) in task_wants_autogroup() argument 140 if (p->flags & PF_EXITING) in task_wants_autogroup() 146 void sched_autogroup_exit_task(struct task_struct *p) in sched_autogroup_exit_task() argument 153 sched_move_task(p, true); in sched_autogroup_exit_task() 157 autogroup_move_group(struct task_struct *p, struct autogroup *ag) in autogroup_move_group() argument 163 if (WARN_ON_ONCE(!lock_task_sighand(p, &flags))) in autogroup_move_group() [all …]
|
| D | ext.h | 13 void scx_pre_fork(struct task_struct *p); 14 int scx_fork(struct task_struct *p); 15 void scx_post_fork(struct task_struct *p); 16 void scx_cancel_fork(struct task_struct *p); 20 int scx_check_setscheduler(struct task_struct *p, int policy); 32 static inline bool task_on_scx(const struct task_struct *p) in task_on_scx() argument 34 return scx_enabled() && p->sched_class == &ext_sched_class; in task_on_scx() 45 static inline void scx_pre_fork(struct task_struct *p) {} in scx_pre_fork() argument 46 static inline int scx_fork(struct task_struct *p) { return 0; } in scx_fork() argument 47 static inline void scx_post_fork(struct task_struct *p) {} in scx_post_fork() argument [all …]
|
| D | membarrier.c | 269 struct task_struct *p; in membarrier_global_expedited() local 290 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 291 if (!p->mm) in membarrier_global_expedited() 363 struct task_struct *p; in membarrier_private_expedited() local 368 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 369 if (!p || p->mm != mm) { in membarrier_private_expedited() 379 struct task_struct *p; in membarrier_private_expedited() local 381 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 382 if (p && p->mm == mm) in membarrier_private_expedited() 478 struct task_struct *p; in sync_runqueues_membarrier_state() local [all …]
|
| D | debug.c | 735 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument 737 if (task_current(rq, p)) in print_task() 740 SEQ_printf(m, " %c", task_state_to_char(p)); in print_task() 743 p->comm, task_pid_nr(p), in print_task() 744 SPLIT_NS(p->se.vruntime), in print_task() 745 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N', in print_task() 746 SPLIT_NS(p->se.deadline), in print_task() 747 p->se.custom_slice ? 'S' : ' ', in print_task() 748 SPLIT_NS(p->se.slice), in print_task() 749 SPLIT_NS(p->se.sum_exec_runtime), in print_task() [all …]
|
| /kernel/trace/ |
| D | trace_boot.c | 28 const char *p; in trace_boot_set_instance_options() local 33 xbc_node_for_each_array_value(node, "options", anode, p) { in trace_boot_set_instance_options() 34 if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) { in trace_boot_set_instance_options() 35 pr_err("String is too long: %s\n", p); in trace_boot_set_instance_options() 43 p = xbc_node_find_value(node, "tracing_on", NULL); in trace_boot_set_instance_options() 44 if (p && *p != '\0') { in trace_boot_set_instance_options() 45 if (kstrtoul(p, 10, &v)) in trace_boot_set_instance_options() 46 pr_err("Failed to set tracing on: %s\n", p); in trace_boot_set_instance_options() 53 p = xbc_node_find_value(node, "trace_clock", NULL); in trace_boot_set_instance_options() 54 if (p && *p != '\0') { in trace_boot_set_instance_options() [all …]
|
| D | trace_branch.c | 41 const char *p; in probe_likely_condition() local 72 p = f->data.file + strlen(f->data.file); in probe_likely_condition() 73 while (p >= f->data.file && *p != '/') in probe_likely_condition() 74 p--; in probe_likely_condition() 75 p++; in probe_likely_condition() 78 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition() 247 static inline long get_incorrect_percent(const struct ftrace_branch_data *p) in get_incorrect_percent() argument 251 if (p->correct) { in get_incorrect_percent() 252 percent = p->incorrect * 100; in get_incorrect_percent() 253 percent /= p->correct + p->incorrect; in get_incorrect_percent() [all …]
|
| /kernel/ |
| D | kprobes.c | 379 struct kprobe *p; in get_kprobe() local 382 hlist_for_each_entry_rcu(p, head, hlist, in get_kprobe() 384 if (p->addr == addr) in get_kprobe() 385 return p; in get_kprobe() 392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 395 static inline bool kprobe_aggrprobe(struct kprobe *p) in kprobe_aggrprobe() argument 397 return p->pre_handler == aggr_pre_handler; in kprobe_aggrprobe() 401 static inline bool kprobe_unused(struct kprobe *p) in kprobe_unused() argument 403 return kprobe_aggrprobe(p) && kprobe_disabled(p) && in kprobe_unused() 404 list_empty(&p->list); in kprobe_unused() [all …]
|
| D | freezer.c | 41 bool freezing_slow_path(struct task_struct *p) in freezing_slow_path() argument 43 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path() 46 if (test_tsk_thread_flag(p, TIF_MEMDIE)) in freezing_slow_path() 49 if (pm_nosig_freezing || cgroup_freezing(p)) in freezing_slow_path() 52 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path() 59 bool frozen(struct task_struct *p) in frozen() argument 61 return READ_ONCE(p->__state) & TASK_FROZEN; in frozen() 102 static void fake_signal_wake_up(struct task_struct *p) in fake_signal_wake_up() argument 106 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up() 107 signal_wake_up(p, 0); in fake_signal_wake_up() [all …]
|
| D | fork.c | 1228 struct task_struct *p) in mm_clear_owner() argument 1231 if (mm->owner == p) in mm_clear_owner() 1236 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument 1239 mm->owner = p; in mm_init_owner() 1259 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument 1278 mm_init_owner(mm, p); in mm_init() 1300 if (init_new_context(p, mm)) in mm_init() 1909 static void copy_seccomp(struct task_struct *p) in copy_seccomp() argument 1922 p->seccomp = current->seccomp; in copy_seccomp() 1930 task_set_no_new_privs(p); in copy_seccomp() [all …]
|
| D | exit.c | 128 static void __unhash_process(struct task_struct *p, bool group_dead) in __unhash_process() argument 131 detach_pid(p, PIDTYPE_PID); in __unhash_process() 133 detach_pid(p, PIDTYPE_TGID); in __unhash_process() 134 detach_pid(p, PIDTYPE_PGID); in __unhash_process() 135 detach_pid(p, PIDTYPE_SID); in __unhash_process() 137 list_del_rcu(&p->tasks); in __unhash_process() 138 list_del_init(&p->sibling); in __unhash_process() 141 list_del_rcu(&p->thread_node); in __unhash_process() 243 void release_task(struct task_struct *p) in release_task() argument 252 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); in release_task() [all …]
|
| D | resource.c | 53 static struct resource *next_resource(struct resource *p, bool skip_children) in next_resource() argument 55 if (!skip_children && p->child) in next_resource() 56 return p->child; in next_resource() 57 while (!p->sibling && p->parent) in next_resource() 58 p = p->parent; in next_resource() 59 return p->sibling; in next_resource() 73 struct resource *p; in r_start() local 77 for_each_resource(root, p, false) { in r_start() 82 return p; in r_start() 87 struct resource *p = v; in r_next() local [all …]
|
| /kernel/cgroup/ |
| D | pids.c | 96 static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids) in pids_update_watermark() argument 102 if (nr_pids > READ_ONCE(p->watermark)) in pids_update_watermark() 103 WRITE_ONCE(p->watermark, nr_pids); in pids_update_watermark() 130 struct pids_cgroup *p; in pids_uncharge() local 132 for (p = pids; parent_pids(p); p = parent_pids(p)) in pids_uncharge() 133 pids_cancel(p, num); in pids_uncharge() 147 struct pids_cgroup *p; in pids_charge() local 149 for (p = pids; parent_pids(p); p = parent_pids(p)) { in pids_charge() 150 int64_t new = atomic64_add_return(num, &p->counter); in pids_charge() 152 pids_update_watermark(p, new); in pids_charge() [all …]
|
| /kernel/debug/kdb/ |
| D | kdb_bt.c | 22 static void kdb_show_stack(struct task_struct *p, void *addr) in kdb_show_stack() argument 26 if (!addr && kdb_task_has_cpu(p)) { in kdb_show_stack() 30 kdb_dump_stack_on_cpu(kdb_process_cpu(p)); in kdb_show_stack() 33 show_stack(p, addr, KERN_EMERG); in kdb_show_stack() 77 kdb_bt1(struct task_struct *p, const char *mask, bool btaprompt) in kdb_bt1() argument 81 if (kdb_getarea(ch, (unsigned long)p) || in kdb_bt1() 82 kdb_getarea(ch, (unsigned long)(p+1)-1)) in kdb_bt1() 84 if (!kdb_task_state(p, mask)) in kdb_bt1() 86 kdb_printf("Stack traceback for pid %d\n", p->pid); in kdb_bt1() 87 kdb_ps1(p); in kdb_bt1() [all …]
|
| /kernel/bpf/ |
| D | cgroup_iter.c | 59 struct cgroup_iter_priv *p = seq->private; in cgroup_iter_seq_start() local 65 if (p->visited_all) in cgroup_iter_seq_start() 75 p->terminate = false; in cgroup_iter_seq_start() 76 p->visited_all = false; in cgroup_iter_seq_start() 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 78 return css_next_descendant_pre(NULL, p->start_css); in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 80 return css_next_descendant_post(NULL, p->start_css); in cgroup_iter_seq_start() 82 return p->start_css; in cgroup_iter_seq_start() 90 struct cgroup_iter_priv *p = seq->private; in cgroup_iter_seq_stop() local [all …]
|
| /kernel/power/ |
| D | process.c | 34 struct task_struct *g, *p; in try_to_freeze_tasks() local 55 for_each_process_thread(g, p) { in try_to_freeze_tasks() 56 if (p == current || !freeze_task(p)) in try_to_freeze_tasks() 102 for_each_process_thread(g, p) { in try_to_freeze_tasks() 103 if (p != current && freezing(p) && !frozen(p)) { in try_to_freeze_tasks() 104 sched_show_task(p); in try_to_freeze_tasks() 106 trace_android_vh_try_to_freeze_todo_unfrozen(p); in try_to_freeze_tasks() 189 struct task_struct *g, *p; in thaw_processes() local 206 for_each_process_thread(g, p) { in thaw_processes() 208 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes() [all …]
|