Searched refs:buckets (Results 1 – 3 of 3) sorted by relevance
86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()105 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()130 cb->buckets = buckets; in blk_stat_alloc_callback()147 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()221 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local236 samples += buckets[bucket]; in calculate_percentile()255 if (buckets[bucket] >= percentile_samples) in calculate_percentile()257 percentile_samples -= buckets[bucket]; in calculate_percentile()259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile()634 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); in add_latency_sample()
45 unsigned int buckets; member87 unsigned int buckets, void *data);