Home
last modified time | relevance | path

Searched refs:p (Results 1 – 25 of 139) sorted by relevance

123456

/kernel/trace/
Dtrace_boot.c28 const char *p; in trace_boot_set_instance_options() local
33 xbc_node_for_each_array_value(node, "options", anode, p) { in trace_boot_set_instance_options()
34 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { in trace_boot_set_instance_options()
35 pr_err("String is too long: %s\n", p); in trace_boot_set_instance_options()
43 p = xbc_node_find_value(node, "tracing_on", NULL); in trace_boot_set_instance_options()
44 if (p && *p != '\0') { in trace_boot_set_instance_options()
45 if (kstrtoul(p, 10, &v)) in trace_boot_set_instance_options()
46 pr_err("Failed to set tracing on: %s\n", p); in trace_boot_set_instance_options()
53 p = xbc_node_find_value(node, "trace_clock", NULL); in trace_boot_set_instance_options()
54 if (p && *p != '\0') { in trace_boot_set_instance_options()
[all …]
Dtrace_branch.c41 const char *p; in probe_likely_condition() local
72 p = f->data.file + strlen(f->data.file); in probe_likely_condition()
73 while (p >= f->data.file && *p != '/') in probe_likely_condition()
74 p--; in probe_likely_condition()
75 p++; in probe_likely_condition()
78 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition()
247 static inline long get_incorrect_percent(const struct ftrace_branch_data *p) in get_incorrect_percent() argument
251 if (p->correct) { in get_incorrect_percent()
252 percent = p->incorrect * 100; in get_incorrect_percent()
253 percent /= p->correct + p->incorrect; in get_incorrect_percent()
[all …]
/kernel/sched/
Dcore.c170 static inline int __task_prio(struct task_struct *p) in __task_prio() argument
172 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
175 if (rt_prio(p->prio)) /* includes deadline */ in __task_prio()
176 return p->prio; /* [-1, 99] */ in __task_prio()
178 if (p->sched_class == &idle_sched_class) in __task_prio()
236 const struct task_struct *p = __node_2_sc(node); in rb_sched_core_cmp() local
239 if (cookie < p->core_cookie) in rb_sched_core_cmp()
242 if (cookie > p->core_cookie) in rb_sched_core_cmp()
248 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
252 if (!p->core_cookie) in sched_core_enqueue()
[all …]
Ddeadline.c69 struct task_struct *p = dl_task_of(dl_se); in dl_rq_of_se() local
70 struct rq *rq = task_rq(p); in dl_rq_of_se()
311 static void dl_change_utilization(struct task_struct *p, u64 new_bw) in dl_change_utilization() argument
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
317 if (task_on_rq_queued(p)) in dl_change_utilization()
320 rq = task_rq(p); in dl_change_utilization()
321 if (p->dl.dl_non_contending) { in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
323 p->dl.dl_non_contending = 0; in dl_change_utilization()
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
[all …]
Dcore_sched.c53 static unsigned long sched_core_update_cookie(struct task_struct *p, in sched_core_update_cookie() argument
60 rq = task_rq_lock(p, &rf); in sched_core_update_cookie()
68 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); in sched_core_update_cookie()
70 if (sched_core_enqueued(p)) in sched_core_update_cookie()
71 sched_core_dequeue(rq, p, DEQUEUE_SAVE); in sched_core_update_cookie()
73 old_cookie = p->core_cookie; in sched_core_update_cookie()
74 p->core_cookie = cookie; in sched_core_update_cookie()
79 if (cookie && task_on_rq_queued(p)) in sched_core_update_cookie()
80 sched_core_enqueue(rq, p); in sched_core_update_cookie()
91 if (task_on_cpu(rq, p)) in sched_core_update_cookie()
[all …]
Dfair.c789 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
790 static unsigned long task_h_load(struct task_struct *p);
838 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
840 struct sched_entity *se = &p->se; in post_init_entity_util_avg()
846 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg()
883 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
943 struct task_struct *p = NULL; in update_stats_wait_start_fair() local
951 p = task_of(se); in update_stats_wait_start_fair()
953 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
960 struct task_struct *p = NULL; in update_stats_wait_end_fair() local
[all …]
Drt.c303 struct task_struct *p = rt_task_of(rt_se); in rq_of_rt_se() local
305 return task_rq(p); in rq_of_rt_se()
382 struct task_struct *p; in inc_rt_migration() local
387 p = rt_task_of(rt_se); in inc_rt_migration()
391 if (p->nr_cpus_allowed > 1) in inc_rt_migration()
399 struct task_struct *p; in dec_rt_migration() local
404 p = rt_task_of(rt_se); in dec_rt_migration()
408 if (p->nr_cpus_allowed > 1) in dec_rt_migration()
438 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
440 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
[all …]
Dstats.h47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
85 # define __update_stats_wait_start(rq, p, stats) do { } while (0) argument
86 # define __update_stats_wait_end(rq, p, stats) do { } while (0) argument
87 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0) argument
121 static inline void psi_enqueue(struct task_struct *p, bool wakeup) in psi_enqueue() argument
128 if (p->in_memstall) in psi_enqueue()
131 if (!wakeup || p->sched_psi_wake_requeue) { in psi_enqueue()
132 if (p->in_memstall) in psi_enqueue()
[all …]
Dautogroup.c71 static inline struct autogroup *autogroup_task_get(struct task_struct *p) in autogroup_task_get() argument
76 if (!lock_task_sighand(p, &flags)) in autogroup_task_get()
79 ag = autogroup_kref_get(p->signal->autogroup); in autogroup_task_get()
80 unlock_task_sighand(p, &flags); in autogroup_task_get()
129 bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) in task_wants_autogroup() argument
141 if (p->flags & PF_EXITING) in task_wants_autogroup()
147 void sched_autogroup_exit_task(struct task_struct *p) in sched_autogroup_exit_task() argument
154 sched_move_task(p); in sched_autogroup_exit_task()
158 autogroup_move_group(struct task_struct *p, struct autogroup *ag) in autogroup_move_group() argument
164 if (WARN_ON_ONCE(!lock_task_sighand(p, &flags))) in autogroup_move_group()
[all …]
Dcputime.c107 static inline void task_group_account_field(struct task_struct *p, int index, in task_group_account_field() argument
118 cgroup_account_cputime_field(p, index, tmp); in task_group_account_field()
126 void account_user_time(struct task_struct *p, u64 cputime) in account_user_time() argument
131 p->utime += cputime; in account_user_time()
132 account_group_user_time(p, cputime); in account_user_time()
134 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; in account_user_time()
137 task_group_account_field(p, index, cputime); in account_user_time()
140 acct_account_cputime(p); in account_user_time()
143 cpufreq_acct_update_power(p, cputime); in account_user_time()
151 void account_guest_time(struct task_struct *p, u64 cputime) in account_guest_time() argument
[all …]
Dsched.h208 static inline int task_has_idle_policy(struct task_struct *p) in task_has_idle_policy() argument
210 return idle_policy(p->policy); in task_has_idle_policy()
213 static inline int task_has_rt_policy(struct task_struct *p) in task_has_rt_policy() argument
215 return rt_policy(p->policy); in task_has_rt_policy()
218 static inline int task_has_dl_policy(struct task_struct *p) in task_has_dl_policy() argument
220 return dl_policy(p->policy); in task_has_dl_policy()
290 void __dl_clear_params(struct task_struct *p);
330 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
331 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
332 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
[all …]
Dmembarrier.c268 struct task_struct *p; in membarrier_global_expedited() local
289 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
290 if (!p->mm) in membarrier_global_expedited()
357 struct task_struct *p; in membarrier_private_expedited() local
362 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
363 if (!p || p->mm != mm) { in membarrier_private_expedited()
373 struct task_struct *p; in membarrier_private_expedited() local
375 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
376 if (p && p->mm == mm) in membarrier_private_expedited()
472 struct task_struct *p; in sync_runqueues_membarrier_state() local
[all …]
Ddebug.c533 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument
535 if (task_current(rq, p)) in print_task()
538 SEQ_printf(m, " %c", task_state_to_char(p)); in print_task()
541 p->comm, task_pid_nr(p), in print_task()
542 SPLIT_NS(p->se.vruntime), in print_task()
543 (long long)(p->nvcsw + p->nivcsw), in print_task()
544 p->prio); in print_task()
547 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)), in print_task()
548 SPLIT_NS(p->se.sum_exec_runtime), in print_task()
549 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), in print_task()
[all …]
Dstats.c6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p, in __update_stats_wait_start() argument
14 if (p && likely(wait_start > prev_wait_start)) in __update_stats_wait_start()
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p, in __update_stats_wait_end() argument
25 if (p) { in __update_stats_wait_end()
26 if (task_on_rq_migrating(p)) { in __update_stats_wait_end()
37 trace_sched_stat_wait(p, delta); in __update_stats_wait_end()
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, in __update_stats_enqueue_sleeper() argument
67 if (p) { in __update_stats_enqueue_sleeper()
68 account_scheduler_latency(p, delta >> 10, 1); in __update_stats_enqueue_sleeper()
69 trace_sched_stat_sleep(p, delta); in __update_stats_enqueue_sleeper()
[all …]
Dstop_task.c13 select_task_rq_stop(struct task_struct *p, int cpu, int flags) in select_task_rq_stop() argument
15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
26 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
46 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop() local
48 if (p) in pick_next_task_stop()
49 set_next_task_stop(rq, p, true); in pick_next_task_stop()
51 return p; in pick_next_task_stop()
55 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
61 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument
99 static void switched_to_stop(struct rq *rq, struct task_struct *p) in switched_to_stop() argument
[all …]
/kernel/
Dkprobes.c379 struct kprobe *p; in get_kprobe() local
382 hlist_for_each_entry_rcu(p, head, hlist, in get_kprobe()
384 if (p->addr == addr) in get_kprobe()
385 return p; in get_kprobe()
392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
395 static inline bool kprobe_aggrprobe(struct kprobe *p) in kprobe_aggrprobe() argument
397 return p->pre_handler == aggr_pre_handler; in kprobe_aggrprobe()
401 static inline bool kprobe_unused(struct kprobe *p) in kprobe_unused() argument
403 return kprobe_aggrprobe(p) && kprobe_disabled(p) && in kprobe_unused()
404 list_empty(&p->list); in kprobe_unused()
[all …]
Dfreezer.c41 bool freezing_slow_path(struct task_struct *p) in freezing_slow_path() argument
43 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path()
46 if (test_tsk_thread_flag(p, TIF_MEMDIE)) in freezing_slow_path()
49 if (pm_nosig_freezing || cgroup_freezing(p)) in freezing_slow_path()
52 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path()
59 bool frozen(struct task_struct *p) in frozen() argument
61 return READ_ONCE(p->__state) & TASK_FROZEN; in frozen()
102 static void fake_signal_wake_up(struct task_struct *p) in fake_signal_wake_up() argument
106 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up()
107 signal_wake_up(p, 0); in fake_signal_wake_up()
[all …]
Dexit.c124 static void __unhash_process(struct task_struct *p, bool group_dead) in __unhash_process() argument
127 detach_pid(p, PIDTYPE_PID); in __unhash_process()
129 detach_pid(p, PIDTYPE_TGID); in __unhash_process()
130 detach_pid(p, PIDTYPE_PGID); in __unhash_process()
131 detach_pid(p, PIDTYPE_SID); in __unhash_process()
133 list_del_rcu(&p->tasks); in __unhash_process()
134 list_del_init(&p->sibling); in __unhash_process()
137 list_del_rcu(&p->thread_group); in __unhash_process()
138 list_del_rcu(&p->thread_node); in __unhash_process()
240 void release_task(struct task_struct *p) in release_task() argument
[all …]
Dfork.c1195 struct task_struct *p) in mm_clear_owner() argument
1198 if (mm->owner == p) in mm_clear_owner()
1203 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument
1206 mm->owner = p; in mm_init_owner()
1217 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument
1239 mm_init_owner(mm, p); in mm_init()
1261 if (init_new_context(p, mm)) in mm_init()
1848 static void copy_seccomp(struct task_struct *p) in copy_seccomp() argument
1861 p->seccomp = current->seccomp; in copy_seccomp()
1869 task_set_no_new_privs(p); in copy_seccomp()
[all …]
Dresource.c59 static struct resource *next_resource(struct resource *p) in next_resource() argument
61 if (p->child) in next_resource()
62 return p->child; in next_resource()
63 while (!p->sibling && p->parent) in next_resource()
64 p = p->parent; in next_resource()
65 return p->sibling; in next_resource()
68 static struct resource *next_resource_skip_children(struct resource *p) in next_resource_skip_children() argument
70 while (!p->sibling && p->parent) in next_resource_skip_children()
71 p = p->parent; in next_resource_skip_children()
72 return p->sibling; in next_resource_skip_children()
[all …]
/kernel/debug/kdb/
Dkdb_bt.c22 static void kdb_show_stack(struct task_struct *p, void *addr) in kdb_show_stack() argument
26 if (!addr && kdb_task_has_cpu(p)) { in kdb_show_stack()
30 kdb_dump_stack_on_cpu(kdb_process_cpu(p)); in kdb_show_stack()
33 show_stack(p, addr, KERN_EMERG); in kdb_show_stack()
77 kdb_bt1(struct task_struct *p, const char *mask, bool btaprompt) in kdb_bt1() argument
81 if (kdb_getarea(ch, (unsigned long)p) || in kdb_bt1()
82 kdb_getarea(ch, (unsigned long)(p+1)-1)) in kdb_bt1()
84 if (!kdb_task_state(p, mask)) in kdb_bt1()
86 kdb_printf("Stack traceback for pid %d\n", p->pid); in kdb_bt1()
87 kdb_ps1(p); in kdb_bt1()
[all …]
/kernel/bpf/
Dcgroup_iter.c59 struct cgroup_iter_priv *p = seq->private; in cgroup_iter_seq_start() local
65 if (p->visited_all) in cgroup_iter_seq_start()
75 p->terminate = false; in cgroup_iter_seq_start()
76 p->visited_all = false; in cgroup_iter_seq_start()
77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start()
78 return css_next_descendant_pre(NULL, p->start_css); in cgroup_iter_seq_start()
79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start()
80 return css_next_descendant_post(NULL, p->start_css); in cgroup_iter_seq_start()
82 return p->start_css; in cgroup_iter_seq_start()
90 struct cgroup_iter_priv *p = seq->private; in cgroup_iter_seq_stop() local
[all …]
/kernel/power/
Dprocess.c34 struct task_struct *g, *p; in try_to_freeze_tasks() local
55 for_each_process_thread(g, p) { in try_to_freeze_tasks()
56 if (p == current || !freeze_task(p)) in try_to_freeze_tasks()
104 for_each_process_thread(g, p) { in try_to_freeze_tasks()
105 if (p != current && freezing(p) && !frozen(p)) { in try_to_freeze_tasks()
106 sched_show_task(p); in try_to_freeze_tasks()
107 trace_android_vh_try_to_freeze_todo_unfrozen(p); in try_to_freeze_tasks()
189 struct task_struct *g, *p; in thaw_processes() local
208 for_each_process_thread(g, p) { in thaw_processes()
210 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes()
[all …]
/kernel/cgroup/
Dpids.c89 static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids) in pids_update_watermark() argument
95 if (nr_pids > READ_ONCE(p->watermark)) in pids_update_watermark()
96 WRITE_ONCE(p->watermark, nr_pids); in pids_update_watermark()
123 struct pids_cgroup *p; in pids_uncharge() local
125 for (p = pids; parent_pids(p); p = parent_pids(p)) in pids_uncharge()
126 pids_cancel(p, num); in pids_uncharge()
140 struct pids_cgroup *p; in pids_charge() local
142 for (p = pids; parent_pids(p); p = parent_pids(p)) { in pids_charge()
143 int64_t new = atomic64_add_return(num, &p->counter); in pids_charge()
145 pids_update_watermark(p, new); in pids_charge()
[all …]
/kernel/time/
Dposix-cpu-timers.c194 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p) in cpu_clock_sample() argument
199 return task_sched_runtime(p); in cpu_clock_sample()
201 task_cputime(p, &utime, &stime); in cpu_clock_sample()
221 static void task_sample_cputime(struct task_struct *p, u64 *samples) in task_sample_cputime() argument
225 task_cputime(p, &utime, &stime); in task_sample_cputime()
226 store_samples(samples, stime, utime, p->se.sum_exec_runtime); in task_sample_cputime()
340 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p, in cpu_clock_sample_group() argument
343 struct thread_group_cputimer *cputimer = &p->signal->cputimer; in cpu_clock_sample_group()
344 struct posix_cputimers *pct = &p->signal->posix_cputimers; in cpu_clock_sample_group()
349 thread_group_start_cputime(p, samples); in cpu_clock_sample_group()
[all …]

123456