/kernel/ |
D | tsacct.c | 22 struct taskstats *stats, struct task_struct *tsk) in bacct_add_tsk() argument 37 stats->ac_tgetime = delta; in bacct_add_tsk() 40 stats->ac_etime = delta; in bacct_add_tsk() 43 stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX); in bacct_add_tsk() 44 stats->ac_btime64 = btime; in bacct_add_tsk() 47 stats->ac_exitcode = tsk->exit_code; in bacct_add_tsk() 49 stats->ac_flag |= AFORK; in bacct_add_tsk() 51 stats->ac_flag |= ASU; in bacct_add_tsk() 53 stats->ac_flag |= ACORE; in bacct_add_tsk() 55 stats->ac_flag |= AXSIG; in bacct_add_tsk() [all …]
|
D | taskstats.c | 157 static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk) in exe_add_tsk() argument 164 stats->ac_exe_dev = in exe_add_tsk() 166 stats->ac_exe_inode = exe_file->f_inode->i_ino; in exe_add_tsk() 169 stats->ac_exe_dev = 0; in exe_add_tsk() 170 stats->ac_exe_inode = 0; in exe_add_tsk() 176 struct task_struct *tsk, struct taskstats *stats) in fill_stats() argument 178 memset(stats, 0, sizeof(*stats)); in fill_stats() 186 delayacct_add_tsk(stats, tsk); in fill_stats() 189 stats->version = TASKSTATS_VERSION; in fill_stats() 190 stats->nvcsw = tsk->nvcsw; in fill_stats() [all …]
|
/kernel/sched/ |
D | stats.c | 7 struct sched_statistics *stats) in __update_stats_wait_start() argument 12 prev_wait_start = schedstat_val(stats->wait_start); in __update_stats_wait_start() 17 __schedstat_set(stats->wait_start, wait_start); in __update_stats_wait_start() 21 struct sched_statistics *stats) in __update_stats_wait_end() argument 23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); in __update_stats_wait_end() 32 __schedstat_set(stats->wait_start, delta); in __update_stats_wait_end() 40 __schedstat_set(stats->wait_max, in __update_stats_wait_end() 41 max(schedstat_val(stats->wait_max), delta)); in __update_stats_wait_end() 42 __schedstat_inc(stats->wait_count); in __update_stats_wait_end() 43 __schedstat_add(stats->wait_sum, delta); in __update_stats_wait_end() [all …]
|
D | stats.h | 48 struct sched_statistics *stats); 51 struct sched_statistics *stats); 53 struct sched_statistics *stats); 85 # define __update_stats_wait_start(rq, p, stats) do { } while (0) argument 86 # define __update_stats_wait_end(rq, p, stats) do { } while (0) argument 87 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0) argument 95 struct sched_statistics stats; member 104 return &container_of(se, struct sched_entity_stats, se)->stats; in __schedstats_from_se() 106 return &task_of(se)->stats; in __schedstats_from_se()
|
D | debug.c | 455 #F, (long long)schedstat_val(stats->F)) in print_cfs_group_stats() 458 #F, SPLIT_NS((long long)schedstat_val(stats->F))) in print_cfs_group_stats() 468 struct sched_statistics *stats; in print_cfs_group_stats() local 469 stats = __schedstats_from_se(se); in print_cfs_group_stats() 547 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)), in print_task() 549 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), in print_task() 550 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); in print_task() 959 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F)) in proc_sched_show_task() 960 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F)) in proc_sched_show_task() 1070 memset(&p->stats, 0, sizeof(p->stats)); in proc_sched_set_task()
|
D | rt.c | 1073 schedstat_set(curr->stats.exec_max, in update_curr_rt() 1074 max(curr->stats.exec_max, delta_exec)); in update_curr_rt() 1348 return &rt_task_of(rt_se)->stats; in __schedstats_from_rt_se() 1354 struct sched_statistics *stats; in update_stats_wait_start_rt() local 1363 stats = __schedstats_from_rt_se(rt_se); in update_stats_wait_start_rt() 1364 if (!stats) in update_stats_wait_start_rt() 1367 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt() 1373 struct sched_statistics *stats; in update_stats_enqueue_sleeper_rt() local 1382 stats = __schedstats_from_rt_se(rt_se); in update_stats_enqueue_sleeper_rt() 1383 if (!stats) in update_stats_enqueue_sleeper_rt() [all …]
|
D | stop_task.c | 81 schedstat_set(curr->stats.exec_max, in put_prev_task_stop() 82 max(curr->stats.exec_max, delta_exec)); in put_prev_task_stop()
|
D | deadline.c | 1336 schedstat_set(curr->stats.exec_max, in update_curr_dl() 1337 max(curr->stats.exec_max, delta_exec)); in update_curr_dl() 1543 return &dl_task_of(dl_se)->stats; in __schedstats_from_dl_se() 1549 struct sched_statistics *stats; in update_stats_wait_start_dl() local 1554 stats = __schedstats_from_dl_se(dl_se); in update_stats_wait_start_dl() 1555 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); in update_stats_wait_start_dl() 1561 struct sched_statistics *stats; in update_stats_wait_end_dl() local 1566 stats = __schedstats_from_dl_se(dl_se); in update_stats_wait_end_dl() 1567 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); in update_stats_wait_end_dl() 1573 struct sched_statistics *stats; in update_stats_enqueue_sleeper_dl() local [all …]
|
D | fair.c | 910 struct sched_statistics *stats; in update_curr() local 912 stats = __schedstats_from_se(curr); in update_curr() 913 __schedstat_set(stats->exec_max, in update_curr() 914 max(delta_exec, stats->exec_max)); in update_curr() 942 struct sched_statistics *stats; in update_stats_wait_start_fair() local 948 stats = __schedstats_from_se(se); in update_stats_wait_start_fair() 953 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair() 959 struct sched_statistics *stats; in update_stats_wait_end_fair() local 965 stats = __schedstats_from_se(se); in update_stats_wait_end_fair() 973 if (unlikely(!schedstat_val(stats->wait_start))) in update_stats_wait_end_fair() [all …]
|
D | core.c | 3742 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat() 3746 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat() 3758 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat() 3762 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat() 3765 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat() 4542 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork() 10250 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks() 10251 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks() 10252 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks() 11110 struct sched_statistics *stats; in cpu_cfs_stat_show() local [all …]
|
D | cputime.c | 253 __schedstat_add(p->stats.core_forceidle_sum, delta); in __account_forceidle_time()
|
/kernel/locking/ |
D | lockdep_proc.c | 398 struct lock_class_stats stats; member 403 struct lock_stat_data stats[MAX_LOCKDEP_KEYS]; member 414 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; in lock_stat_cmp() 415 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr; in lock_stat_cmp() 461 struct lock_class_stats *stats; in seq_stats() local 468 stats = &data->stats; in seq_stats() 505 if (stats->write_holdtime.nr) { in seq_stats() 506 if (stats->read_holdtime.nr) in seq_stats() 511 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]); in seq_stats() 512 seq_lock_time(m, &stats->write_waittime); in seq_stats() [all …]
|
D | lockdep.c | 296 struct lock_class_stats stats; in lock_stats() local 299 memset(&stats, 0, sizeof(struct lock_class_stats)); in lock_stats() 304 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) in lock_stats() 305 stats.contention_point[i] += pcs->contention_point[i]; in lock_stats() 307 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) in lock_stats() 308 stats.contending_point[i] += pcs->contending_point[i]; in lock_stats() 310 lock_time_add(&pcs->read_waittime, &stats.read_waittime); in lock_stats() 311 lock_time_add(&pcs->write_waittime, &stats.write_waittime); in lock_stats() 313 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); in lock_stats() 314 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); in lock_stats() [all …]
|
/kernel/bpf/ |
D | cpumap.c | 183 struct xdp_cpumap_stats *stats) in cpu_map_bpf_prog_run_skb() argument 201 stats->drop++; in cpu_map_bpf_prog_run_skb() 203 stats->redirect++; in cpu_map_bpf_prog_run_skb() 215 stats->drop++; in cpu_map_bpf_prog_run_skb() 223 struct xdp_cpumap_stats *stats) in cpu_map_bpf_prog_run_xdp() argument 249 stats->drop++; in cpu_map_bpf_prog_run_xdp() 252 stats->pass++; in cpu_map_bpf_prog_run_xdp() 260 stats->drop++; in cpu_map_bpf_prog_run_xdp() 262 stats->redirect++; in cpu_map_bpf_prog_run_xdp() 270 stats->drop++; in cpu_map_bpf_prog_run_xdp() [all …]
|
D | trampoline.c | 898 struct bpf_prog_stats *stats; in update_prog_stats() local 909 stats = this_cpu_ptr(prog->stats); in update_prog_stats() 910 flags = u64_stats_update_begin_irqsave(&stats->syncp); in update_prog_stats() 911 u64_stats_inc(&stats->cnt); in update_prog_stats() 912 u64_stats_add(&stats->nsecs, sched_clock() - start); in update_prog_stats() 913 u64_stats_update_end_irqrestore(&stats->syncp, flags); in update_prog_stats()
|
D | syscall.c | 2104 struct bpf_prog_stats *stats; in bpf_prog_inc_misses_counter() local 2107 stats = this_cpu_ptr(prog->stats); in bpf_prog_inc_misses_counter() 2108 flags = u64_stats_update_begin_irqsave(&stats->syncp); in bpf_prog_inc_misses_counter() 2109 u64_stats_inc(&stats->misses); in bpf_prog_inc_misses_counter() 2110 u64_stats_update_end_irqrestore(&stats->syncp, flags); in bpf_prog_inc_misses_counter() 2114 struct bpf_prog_kstats *stats) in bpf_prog_get_stats() argument 2124 st = per_cpu_ptr(prog->stats, cpu); in bpf_prog_get_stats() 2135 stats->nsecs = nsecs; in bpf_prog_get_stats() 2136 stats->cnt = cnt; in bpf_prog_get_stats() 2137 stats->misses = misses; in bpf_prog_get_stats() [all …]
|
D | core.c | 134 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); in bpf_prog_alloc() 135 if (!prog->stats) { in bpf_prog_alloc() 145 pstats = per_cpu_ptr(prog->stats, cpu); in bpf_prog_alloc() 253 fp_old->stats = NULL; in bpf_prog_realloc() 269 free_percpu(fp->stats); in __bpf_prog_free() 1382 fp->stats = NULL; in bpf_prog_clone_free()
|
/kernel/cgroup/ |
D | cgroup-v1.c | 701 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) in cgroupstats_build() argument 730 stats->nr_running++; in cgroupstats_build() 733 stats->nr_sleeping++; in cgroupstats_build() 736 stats->nr_uninterruptible++; in cgroupstats_build() 739 stats->nr_stopped++; in cgroupstats_build() 743 stats->nr_io_wait++; in cgroupstats_build()
|
/kernel/rcu/ |
D | rcutorture.c | 374 void (*stats)(void); member 557 .stats = NULL, 606 .stats = NULL, 720 .stats = srcu_torture_stats, 758 .stats = srcu_torture_stats, 780 .stats = srcu_torture_stats, 824 .stats = NULL, 868 .stats = NULL, 909 .stats = NULL, 961 .stats = NULL, [all …]
|
/kernel/trace/ |
D | trace_events_hist.c | 5378 struct hist_val_stat *stats, in hist_trigger_print_val() argument 5386 pc = __get_percentage(val, stats[idx].total); in hist_trigger_print_val() 5394 __fill_bar_str(bar, 20, val, stats[idx].max)); in hist_trigger_print_val() 5404 struct hist_val_stat *stats, in hist_trigger_entry_print() argument 5415 hist_trigger_print_val(m, i, "hitcount", 0, stats, elt); in hist_trigger_entry_print() 5425 hist_trigger_print_val(m, i, field_name, flags, stats, elt); in hist_trigger_entry_print() 5439 struct hist_val_stat *stats = NULL; in print_entries() local 5453 if (!stats) { in print_entries() 5454 stats = kcalloc(hist_data->n_vals, sizeof(*stats), in print_entries() 5456 if (!stats) { in print_entries() [all …]
|
D | ring_buffer.c | 4584 overrun = footer->stats.overrun; in rb_swap_reader_page_ext() 5174 local_set(&cpu_buffer->entries, footer->stats.entries); in ring_buffer_update_view() 5175 local_set(&cpu_buffer->pages_touched, footer->stats.pages_touched); in ring_buffer_update_view() 5176 local_set(&cpu_buffer->overrun, footer->stats.overrun); in ring_buffer_update_view()
|
D | Kconfig | 1014 It will output the stats of each per cpu buffer. What
|