Searched refs:stats (Results 1 – 9 of 9) sorted by relevance
/block/ |
D | bfq-cgroup.c | 104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ 106 stats->flags |= (1 << BFQG_stats_##name); \ 108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ 110 stats->flags &= ~(1 << BFQG_stats_##name); \ 112 static int bfqg_stats_##name(struct bfqg_stats *stats) \ 114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) in BFQG_FLAG_FNS() 127 if (!bfqg_stats_waiting(stats)) in BFQG_FLAG_FNS() 131 if (now > stats->start_group_wait_time) in BFQG_FLAG_FNS() 132 bfq_stat_add(&stats->group_wait_time, in BFQG_FLAG_FNS() [all …]
|
D | blk-stat.c | 65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add() 151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback() 152 list_add_tail_rcu(&cb->list, &q->stats->callbacks); in blk_stat_add_callback() 154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback() 162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback() 164 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) in blk_stat_remove_callback() 166 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_remove_callback() 191 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_enable_accounting() 192 q->stats->enable_accounting = true; in blk_stat_enable_accounting() 194 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_enable_accounting() [all …]
|
D | mq-deadline-cgroup.h | 26 struct io_stats_per_prio stats[4]; member 36 struct blkcg_io_stats __percpu *stats; member 45 struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \ 49 local_inc(&io_stats->stats[(prio_class)].event_type); \ 66 sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \ 67 stats[(prio)].event_type); \
|
D | mq-deadline-cgroup.c | 17 pd->stats = alloc_percpu_gfp(typeof(*pd->stats), in dd_cpd_alloc() 19 if (!pd->stats) { in dd_cpd_alloc() 30 free_percpu(dd_blkcg->stats); in dd_cpd_free() 67 for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++) in dd_pd_stat()
|
D | mq-deadline-main.c | 60 struct io_stats_per_prio stats[DD_PRIO_COUNT]; member 90 struct io_stats __percpu *stats; member 108 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ 112 local_inc(&io_stats->stats[(prio)].event_type); \ 128 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ 129 stats[(prio)].event_type); \ 580 free_percpu(dd->stats); in dd_exit_sched() 611 dd->stats = alloc_percpu_gfp(typeof(*dd->stats), in dd_init_sched() 613 if (!dd->stats) in dd_init_sched() 646 free_percpu(dd->stats); in dd_init_sched()
|
D | blk-iolatency.c | 140 struct latency_stat __percpu *stats; member 221 struct latency_stat *stat = get_cpu_ptr(iolat->stats); in latency_stat_record_time() 538 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies() 915 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat() 963 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), in iolatency_pd_alloc() 965 if (!iolat->stats) { in iolatency_pd_alloc() 988 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init() 1029 free_percpu(iolat->stats); in iolatency_pd_free()
|
D | blk-core.c | 549 q->stats = blk_alloc_queue_stats(); in blk_alloc_queue() 550 if (!q->stats) in blk_alloc_queue() 601 blk_free_queue_stats(q->stats); in blk_alloc_queue()
|
D | bfq-iosched.h | 921 struct bfqg_stats stats; member
|
D | blk-sysfs.c | 793 blk_free_queue_stats(q->stats); in blk_release_queue()
|