Home
last modified time | relevance | path

Searched refs:se (Results 1 – 17 of 17) sorted by relevance

/kernel/sched/
Dfair.c279 #define for_each_sched_entity(se) \ argument
280 for (; se; se = se->parent)
395 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
397 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
398 return se->cfs_rq; in is_same_group()
403 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
405 return se->parent; in parent_entity()
409 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
421 se_depth = (*se)->depth; in find_matching_se()
426 *se = parent_entity(*se); in find_matching_se()
[all …]
Dpelt.c302 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument
304 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se()
305 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_blocked_se()
306 trace_pelt_se_tp(se); in __update_load_avg_blocked_se()
314 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
316 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), in __update_load_avg_se()
317 cfs_rq->curr == se)) { in __update_load_avg_se()
319 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_se()
320 cfs_se_util_change(&se->avg); in __update_load_avg_se()
321 trace_pelt_se_tp(se); in __update_load_avg_se()
Ddebug.c450 struct sched_entity *se = tg->se[cpu]; in print_cfs_group_stats() local
459 if (!se) in print_cfs_group_stats()
462 PN(se->exec_start); in print_cfs_group_stats()
463 PN(se->vruntime); in print_cfs_group_stats()
464 PN(se->sum_exec_runtime); in print_cfs_group_stats()
468 stats = __schedstats_from_se(se); in print_cfs_group_stats()
482 P(se->load.weight); in print_cfs_group_stats()
484 P(se->avg.load_avg); in print_cfs_group_stats()
485 P(se->avg.util_avg); in print_cfs_group_stats()
486 P(se->avg.runnable_avg); in print_cfs_group_stats()
[all …]
Dsched.h395 struct sched_entity **se; member
490 struct sched_entity *se, int cpu,
524 extern void set_task_rq_fair(struct sched_entity *se,
527 static inline void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
746 #define entity_is_task(se) (!se->my_q) argument
748 static inline void se_update_runnable(struct sched_entity *se) in se_update_runnable() argument
750 if (!entity_is_task(se)) in se_update_runnable()
751 se->runnable_weight = se->my_q->h_nr_running; in se_update_runnable()
754 static inline long se_runnable(struct sched_entity *se) in se_runnable() argument
756 if (entity_is_task(se)) in se_runnable()
[all …]
Dstats.h62 struct sched_entity se; member
68 __schedstats_from_se(struct sched_entity *se) in __schedstats_from_se() argument
71 if (!entity_is_task(se)) in __schedstats_from_se()
72 return &container_of(se, struct sched_entity_stats, se)->stats; in __schedstats_from_se()
74 return &task_of(se)->stats; in __schedstats_from_se()
Dstop_task.c34 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
77 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in put_prev_task_stop()
84 curr->se.sum_exec_runtime += delta_exec; in put_prev_task_stop()
87 curr->se.exec_start = rq_clock_task(rq); in put_prev_task_stop()
Dpelt.h4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
Dcputime.c293 return t->se.sum_exec_runtime; in read_sum_exec_runtime()
303 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime()
638 .sum_exec_runtime = p->se.sum_exec_runtime, in task_cputime_adjusted()
Dcore.c1236 struct load_weight *load = &p->se.load; in set_load_weight()
3161 p->se.nr_migrations++; in set_task_cpu()
4356 p->se.on_rq = 0; in __sched_fork()
4357 p->se.exec_start = 0; in __sched_fork()
4358 p->se.sum_exec_runtime = 0; in __sched_fork()
4359 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4360 p->se.nr_migrations = 0; in __sched_fork()
4361 p->se.vruntime = 0; in __sched_fork()
4362 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4365 p->se.cfs_rq = NULL; in __sched_fork()
[all …]
Drt.c1029 delta_exec = now - curr->se.exec_start; in update_curr_rt()
1036 curr->se.sum_exec_runtime += delta_exec; in update_curr_rt()
1039 curr->se.exec_start = now; in update_curr_rt()
1687 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
2570 p->se.sum_exec_runtime); in watchdog()
Ddeadline.c1263 delta_exec = now - curr->se.exec_start; in update_curr_dl()
1273 curr->se.sum_exec_runtime += delta_exec; in update_curr_dl()
1276 curr->se.exec_start = now; in update_curr_dl()
1840 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
/kernel/trace/
Dtrace_events_synth.c333 struct synth_event *se; in print_synth_event() local
339 se = container_of(event, struct synth_event, call.event); in print_synth_event()
341 trace_seq_printf(s, "%s: ", se->name); in print_synth_event()
343 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { in print_synth_event()
347 fmt = synth_field_fmt(se->fields[i]->type); in print_synth_event()
356 if (se->fields[i]->is_string) { in print_synth_event()
357 if (se->fields[i]->is_dynamic) { in print_synth_event()
366 trace_seq_printf(s, print_fmt, se->fields[i]->name, in print_synth_event()
369 i == se->n_fields - 1 ? "" : " "); in print_synth_event()
372 trace_seq_printf(s, print_fmt, se->fields[i]->name, in print_synth_event()
[all …]
Dtrace_events_hist.c6136 struct synth_event *se; in hist_unreg_all() local
6151 se = find_synth_event(se_name); in hist_unreg_all()
6152 if (se) in hist_unreg_all()
6153 se->ref--; in hist_unreg_all()
6173 struct synth_event *se; in event_hist_trigger_func() local
6282 se = find_synth_event(se_name); in event_hist_trigger_func()
6283 if (se) in event_hist_trigger_func()
6284 se->ref--; in event_hist_trigger_func()
6324 se = find_synth_event(se_name); in event_hist_trigger_func()
6325 if (se) in event_hist_trigger_func()
[all …]
/kernel/
Ddelayacct.c144 t3 = tsk->se.sum_exec_runtime; in delayacct_add_tsk()
Dexit.c173 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal()
194 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
/kernel/events/
Dcore.c8876 struct perf_switch_event *se = data; in perf_event_switch_output() local
8886 se->event_id.header.type = PERF_RECORD_SWITCH; in perf_event_switch_output()
8887 se->event_id.header.size = sizeof(se->event_id.header); in perf_event_switch_output()
8889 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; in perf_event_switch_output()
8890 se->event_id.header.size = sizeof(se->event_id); in perf_event_switch_output()
8891 se->event_id.next_prev_pid = in perf_event_switch_output()
8892 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
8893 se->event_id.next_prev_tid = in perf_event_switch_output()
8894 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
8897 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
[all …]
/kernel/time/
Dposix-cpu-timers.c219 store_samples(samples, stime, utime, p->se.sum_exec_runtime); in task_sample_cputime()