Lines Matching refs:stats
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
39 stats->flags |= (1 << BFQG_stats_##name); \
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
43 stats->flags &= ~(1 << BFQG_stats_##name); \
45 static int bfqg_stats_##name(struct bfqg_stats *stats) \
47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) in BFQG_FLAG_FNS()
60 if (!bfqg_stats_waiting(stats)) in BFQG_FLAG_FNS()
64 if (time_after64(now, stats->start_group_wait_time)) in BFQG_FLAG_FNS()
65 blkg_stat_add(&stats->group_wait_time, in BFQG_FLAG_FNS()
66 now - stats->start_group_wait_time); in BFQG_FLAG_FNS()
67 bfqg_stats_clear_waiting(stats); in BFQG_FLAG_FNS()
74 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_group_wait_time() local
76 if (bfqg_stats_waiting(stats)) in bfqg_stats_set_start_group_wait_time()
80 stats->start_group_wait_time = sched_clock(); in bfqg_stats_set_start_group_wait_time()
81 bfqg_stats_mark_waiting(stats); in bfqg_stats_set_start_group_wait_time()
85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) in bfqg_stats_end_empty_time() argument
89 if (!bfqg_stats_empty(stats)) in bfqg_stats_end_empty_time()
93 if (time_after64(now, stats->start_empty_time)) in bfqg_stats_end_empty_time()
94 blkg_stat_add(&stats->empty_time, in bfqg_stats_end_empty_time()
95 now - stats->start_empty_time); in bfqg_stats_end_empty_time()
96 bfqg_stats_clear_empty(stats); in bfqg_stats_end_empty_time()
101 blkg_stat_add(&bfqg->stats.dequeue, 1); in bfqg_stats_update_dequeue()
106 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_empty_time() local
108 if (blkg_rwstat_total(&stats->queued)) in bfqg_stats_set_start_empty_time()
116 if (bfqg_stats_empty(stats)) in bfqg_stats_set_start_empty_time()
119 stats->start_empty_time = sched_clock(); in bfqg_stats_set_start_empty_time()
120 bfqg_stats_mark_empty(stats); in bfqg_stats_set_start_empty_time()
125 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_idle_time() local
127 if (bfqg_stats_idling(stats)) { in bfqg_stats_update_idle_time()
130 if (time_after64(now, stats->start_idle_time)) in bfqg_stats_update_idle_time()
131 blkg_stat_add(&stats->idle_time, in bfqg_stats_update_idle_time()
132 now - stats->start_idle_time); in bfqg_stats_update_idle_time()
133 bfqg_stats_clear_idling(stats); in bfqg_stats_update_idle_time()
139 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_idle_time() local
141 stats->start_idle_time = sched_clock(); in bfqg_stats_set_start_idle_time()
142 bfqg_stats_mark_idling(stats); in bfqg_stats_set_start_idle_time()
147 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_avg_queue_size() local
149 blkg_stat_add(&stats->avg_queue_size_sum, in bfqg_stats_update_avg_queue_size()
150 blkg_rwstat_total(&stats->queued)); in bfqg_stats_update_avg_queue_size()
151 blkg_stat_add(&stats->avg_queue_size_samples, 1); in bfqg_stats_update_avg_queue_size()
152 bfqg_stats_update_group_wait_time(stats); in bfqg_stats_update_avg_queue_size()
235 blkg_rwstat_add(&bfqg->stats.queued, op, 1); in bfqg_stats_update_io_add()
236 bfqg_stats_end_empty_time(&bfqg->stats); in bfqg_stats_update_io_add()
243 blkg_rwstat_add(&bfqg->stats.queued, op, -1); in bfqg_stats_update_io_remove()
248 blkg_rwstat_add(&bfqg->stats.merged, op, 1); in bfqg_stats_update_io_merged()
254 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_completion() local
258 blkg_rwstat_add(&stats->service_time, op, in bfqg_stats_update_completion()
261 blkg_rwstat_add(&stats->wait_time, op, in bfqg_stats_update_completion()
266 static void bfqg_stats_reset(struct bfqg_stats *stats) in bfqg_stats_reset() argument
269 blkg_rwstat_reset(&stats->merged); in bfqg_stats_reset()
270 blkg_rwstat_reset(&stats->service_time); in bfqg_stats_reset()
271 blkg_rwstat_reset(&stats->wait_time); in bfqg_stats_reset()
272 blkg_stat_reset(&stats->time); in bfqg_stats_reset()
273 blkg_stat_reset(&stats->avg_queue_size_sum); in bfqg_stats_reset()
274 blkg_stat_reset(&stats->avg_queue_size_samples); in bfqg_stats_reset()
275 blkg_stat_reset(&stats->dequeue); in bfqg_stats_reset()
276 blkg_stat_reset(&stats->group_wait_time); in bfqg_stats_reset()
277 blkg_stat_reset(&stats->idle_time); in bfqg_stats_reset()
278 blkg_stat_reset(&stats->empty_time); in bfqg_stats_reset()
320 bfqg_stats_add_aux(&parent->stats, &bfqg->stats); in bfqg_stats_xfer_dead()
321 bfqg_stats_reset(&bfqg->stats); in bfqg_stats_xfer_dead()
343 static void bfqg_stats_exit(struct bfqg_stats *stats) in bfqg_stats_exit() argument
345 blkg_rwstat_exit(&stats->merged); in bfqg_stats_exit()
346 blkg_rwstat_exit(&stats->service_time); in bfqg_stats_exit()
347 blkg_rwstat_exit(&stats->wait_time); in bfqg_stats_exit()
348 blkg_rwstat_exit(&stats->queued); in bfqg_stats_exit()
349 blkg_stat_exit(&stats->time); in bfqg_stats_exit()
350 blkg_stat_exit(&stats->avg_queue_size_sum); in bfqg_stats_exit()
351 blkg_stat_exit(&stats->avg_queue_size_samples); in bfqg_stats_exit()
352 blkg_stat_exit(&stats->dequeue); in bfqg_stats_exit()
353 blkg_stat_exit(&stats->group_wait_time); in bfqg_stats_exit()
354 blkg_stat_exit(&stats->idle_time); in bfqg_stats_exit()
355 blkg_stat_exit(&stats->empty_time); in bfqg_stats_exit()
358 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) in bfqg_stats_init() argument
360 if (blkg_rwstat_init(&stats->merged, gfp) || in bfqg_stats_init()
361 blkg_rwstat_init(&stats->service_time, gfp) || in bfqg_stats_init()
362 blkg_rwstat_init(&stats->wait_time, gfp) || in bfqg_stats_init()
363 blkg_rwstat_init(&stats->queued, gfp) || in bfqg_stats_init()
364 blkg_stat_init(&stats->time, gfp) || in bfqg_stats_init()
365 blkg_stat_init(&stats->avg_queue_size_sum, gfp) || in bfqg_stats_init()
366 blkg_stat_init(&stats->avg_queue_size_samples, gfp) || in bfqg_stats_init()
367 blkg_stat_init(&stats->dequeue, gfp) || in bfqg_stats_init()
368 blkg_stat_init(&stats->group_wait_time, gfp) || in bfqg_stats_init()
369 blkg_stat_init(&stats->idle_time, gfp) || in bfqg_stats_init()
370 blkg_stat_init(&stats->empty_time, gfp)) { in bfqg_stats_init()
371 bfqg_stats_exit(stats); in bfqg_stats_init()
419 if (bfqg_stats_init(&bfqg->stats, gfp)) { in bfq_pd_alloc()
452 bfqg_stats_exit(&bfqg->stats); in bfq_pd_free()
460 bfqg_stats_reset(&bfqg->stats); in bfq_pd_reset_stats()
980 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); in bfqg_prfill_avg_queue_size()
984 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); in bfqg_prfill_avg_queue_size()
1038 .private = offsetof(struct bfq_group, stats.time),
1057 .private = offsetof(struct bfq_group, stats.service_time),
1062 .private = offsetof(struct bfq_group, stats.wait_time),
1067 .private = offsetof(struct bfq_group, stats.merged),
1072 .private = offsetof(struct bfq_group, stats.queued),
1079 .private = offsetof(struct bfq_group, stats.time),
1098 .private = offsetof(struct bfq_group, stats.service_time),
1103 .private = offsetof(struct bfq_group, stats.wait_time),
1108 .private = offsetof(struct bfq_group, stats.merged),
1113 .private = offsetof(struct bfq_group, stats.queued),
1122 .private = offsetof(struct bfq_group, stats.group_wait_time),
1127 .private = offsetof(struct bfq_group, stats.idle_time),
1132 .private = offsetof(struct bfq_group, stats.empty_time),
1137 .private = offsetof(struct bfq_group, stats.dequeue),