Lines Matching refs:p
735 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument
737 if (task_current(rq, p)) in print_task()
740 SEQ_printf(m, " %c", task_state_to_char(p)); in print_task()
743 p->comm, task_pid_nr(p), in print_task()
744 SPLIT_NS(p->se.vruntime), in print_task()
745 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N', in print_task()
746 SPLIT_NS(p->se.deadline), in print_task()
747 p->se.custom_slice ? 'S' : ' ', in print_task()
748 SPLIT_NS(p->se.slice), in print_task()
749 SPLIT_NS(p->se.sum_exec_runtime), in print_task()
750 (long long)(p->nvcsw + p->nivcsw), in print_task()
751 p->prio); in print_task()
754 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)), in print_task()
755 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), in print_task()
756 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); in print_task()
759 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); in print_task()
762 SEQ_printf_task_group_path(m, task_group(p), " %s") in print_task()
770 struct task_struct *g, *p; in print_rq() local
796 for_each_process_thread(g, p) { in print_rq()
797 if (task_cpu(p) != rq_cpu) in print_rq()
800 print_task(m, rq, p); in print_rq()
1133 #define P(F) __PS(#F, p->F)
1134 #define PM(F, M) __PS(#F, p->F & (M))
1137 #define PN(F) __PSN(#F, p->F)
1151 static void sched_show_numa(struct task_struct *p, struct seq_file *m) in sched_show_numa() argument
1154 if (p->mm) in sched_show_numa()
1161 task_node(p), task_numa_group_id(p)); in sched_show_numa()
1162 show_numa_stats(p, m); in sched_show_numa()
1166 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, in proc_sched_show_task() argument
1171 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), in proc_sched_show_task()
1172 get_nr_threads(p)); in proc_sched_show_task()
1177 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F)) in proc_sched_show_task()
1178 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F)) in proc_sched_show_task()
1184 nr_switches = p->nvcsw + p->nivcsw; in proc_sched_show_task()
1220 avg_atom = p->se.sum_exec_runtime; in proc_sched_show_task()
1226 avg_per_cpu = p->se.sum_exec_runtime; in proc_sched_show_task()
1227 if (p->se.nr_migrations) { in proc_sched_show_task()
1229 p->se.nr_migrations); in proc_sched_show_task()
1243 __PS("nr_voluntary_switches", p->nvcsw); in proc_sched_show_task()
1244 __PS("nr_involuntary_switches", p->nivcsw); in proc_sched_show_task()
1258 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); in proc_sched_show_task()
1259 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value); in proc_sched_show_task()
1260 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN)); in proc_sched_show_task()
1261 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX)); in proc_sched_show_task()
1265 if (task_has_dl_policy(p)) { in proc_sched_show_task()
1268 } else if (fair_policy(p->policy)) { in proc_sched_show_task()
1272 __PS("ext.enabled", task_on_scx(p)); in proc_sched_show_task()
1286 sched_show_numa(p, m); in proc_sched_show_task()
1289 void proc_sched_set_task(struct task_struct *p) in proc_sched_set_task() argument
1292 memset(&p->stats, 0, sizeof(p->stats)); in proc_sched_set_task()