/kernel/sched/ |
D | fair.c | 352 #define for_each_sched_entity(se) \ argument 353 for (; se; se = se->parent) 455 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument 457 if (se->cfs_rq == pse->cfs_rq) in is_same_group() 458 return se->cfs_rq; in is_same_group() 463 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument 465 return se->parent; in parent_entity() 469 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument 481 se_depth = (*se)->depth; in find_matching_se() 486 *se = parent_entity(*se); in find_matching_se() [all …]
|
D | pelt.c | 301 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument 303 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se() 304 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_blocked_se() 305 trace_pelt_se_tp(se); in __update_load_avg_blocked_se() 313 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 315 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), in __update_load_avg_se() 316 cfs_rq->curr == se)) { in __update_load_avg_se() 318 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_se() 319 cfs_se_util_change(&se->avg); in __update_load_avg_se() 320 trace_pelt_se_tp(se); in __update_load_avg_se()
|
D | debug.c | 451 struct sched_entity *se = tg->se[cpu]; in print_cfs_group_stats() local 460 if (!se) in print_cfs_group_stats() 463 PN(se->exec_start); in print_cfs_group_stats() 464 PN(se->vruntime); in print_cfs_group_stats() 465 PN(se->sum_exec_runtime); in print_cfs_group_stats() 469 stats = __schedstats_from_se(se); in print_cfs_group_stats() 483 P(se->load.weight); in print_cfs_group_stats() 485 P(se->avg.load_avg); in print_cfs_group_stats() 486 P(se->avg.util_avg); in print_cfs_group_stats() 487 P(se->avg.runnable_avg); in print_cfs_group_stats() [all …]
|
D | sched.h | 377 struct sched_entity **se; member 472 struct sched_entity *se, int cpu, 503 extern void set_task_rq_fair(struct sched_entity *se, 506 static inline void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument 773 #define entity_is_task(se) (!se->my_q) argument 775 static inline void se_update_runnable(struct sched_entity *se) in se_update_runnable() argument 777 if (!entity_is_task(se)) in se_update_runnable() 778 se->runnable_weight = se->my_q->h_nr_running; in se_update_runnable() 781 static inline long se_runnable(struct sched_entity *se) in se_runnable() argument 783 if (entity_is_task(se)) in se_runnable() [all …]
|
D | stats.h | 94 struct sched_entity se; member 100 __schedstats_from_se(struct sched_entity *se) in __schedstats_from_se() argument 103 if (!entity_is_task(se)) in __schedstats_from_se() 104 return &container_of(se, struct sched_entity_stats, se)->stats; in __schedstats_from_se() 106 return &task_of(se)->stats; in __schedstats_from_se()
|
D | stop_task.c | 33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop() 77 delta_exec = now - curr->se.exec_start; in put_prev_task_stop()
|
D | pelt.h | 4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
|
D | cputime.c | 302 return t->se.sum_exec_runtime; in read_sum_exec_runtime() 312 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime() 652 .sum_exec_runtime = p->se.sum_exec_runtime, in task_cputime_adjusted()
|
D | core.c | 1293 struct load_weight *load = &p->se.load; in set_load_weight() 3388 p->se.nr_migrations++; in set_task_cpu() 4526 p->se.on_rq = 0; in __sched_fork() 4527 p->se.exec_start = 0; in __sched_fork() 4528 p->se.sum_exec_runtime = 0; in __sched_fork() 4529 p->se.prev_sum_exec_runtime = 0; in __sched_fork() 4530 p->se.nr_migrations = 0; in __sched_fork() 4531 p->se.vruntime = 0; in __sched_fork() 4532 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork() 4535 p->se.cfs_rq = NULL; in __sched_fork() [all …]
|
D | rt.c | 1069 delta_exec = now - curr->se.exec_start; in update_curr_rt() 1839 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt() 2733 p->se.sum_exec_runtime); in watchdog()
|
D | deadline.c | 1329 delta_exec = now - curr->se.exec_start; in update_curr_dl() 1988 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
|
/kernel/trace/ |
D | trace_events_synth.c | 352 struct synth_event *se; in print_synth_event() local 358 se = container_of(event, struct synth_event, call.event); in print_synth_event() 360 trace_seq_printf(s, "%s: ", se->name); in print_synth_event() 362 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { in print_synth_event() 366 fmt = synth_field_fmt(se->fields[i]->type); in print_synth_event() 375 if (se->fields[i]->is_string) { in print_synth_event() 376 if (se->fields[i]->is_dynamic) { in print_synth_event() 385 trace_seq_printf(s, print_fmt, se->fields[i]->name, in print_synth_event() 388 i == se->n_fields - 1 ? "" : " "); in print_synth_event() 391 trace_seq_printf(s, print_fmt, se->fields[i]->name, in print_synth_event() [all …]
|
D | trace_events_hist.c | 6410 struct synth_event *se; in hist_unreg_all() local 6425 se = find_synth_event(se_name); in hist_unreg_all() 6426 if (se) in hist_unreg_all() 6427 se->ref--; in hist_unreg_all() 6448 struct synth_event *se; in event_hist_trigger_parse() local 6544 se = find_synth_event(se_name); in event_hist_trigger_parse() 6545 if (se) in event_hist_trigger_parse() 6546 se->ref--; in event_hist_trigger_parse() 6580 se = find_synth_event(se_name); in event_hist_trigger_parse() 6581 if (se) in event_hist_trigger_parse() [all …]
|
/kernel/ |
D | delayacct.c | 155 t3 = tsk->se.sum_exec_runtime; in delayacct_add_tsk()
|
D | exit.c | 177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal() 198 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
|
/kernel/events/ |
D | core.c | 8824 struct perf_switch_event *se = data; in perf_event_switch_output() local 8834 se->event_id.header.type = PERF_RECORD_SWITCH; in perf_event_switch_output() 8835 se->event_id.header.size = sizeof(se->event_id.header); in perf_event_switch_output() 8837 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; in perf_event_switch_output() 8838 se->event_id.header.size = sizeof(se->event_id); in perf_event_switch_output() 8839 se->event_id.next_prev_pid = in perf_event_switch_output() 8840 perf_event_pid(event, se->next_prev); in perf_event_switch_output() 8841 se->event_id.next_prev_tid = in perf_event_switch_output() 8842 perf_event_tid(event, se->next_prev); in perf_event_switch_output() 8845 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output() [all …]
|
/kernel/time/ |
D | posix-cpu-timers.c | 226 store_samples(samples, stime, utime, p->se.sum_exec_runtime); in task_sample_cputime()
|