Home
last modified time | relevance | path

Searched refs:depth (Results 1 – 7 of 7) sorted by relevance

/block/
Dblk-rq-qos.c118 unsigned int depth; in rq_depth_calc_max_depth() local
143 depth = min_t(unsigned int, rqd->default_depth, in rq_depth_calc_max_depth()
146 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); in rq_depth_calc_max_depth()
150 depth = 1 + ((depth - 1) << -rqd->scale_step); in rq_depth_calc_max_depth()
151 if (depth > maxd) { in rq_depth_calc_max_depth()
152 depth = maxd; in rq_depth_calc_max_depth()
157 rqd->max_depth = depth; in rq_depth_calc_max_depth()
Dkyber-iosched.c269 unsigned int sched_domain, unsigned int depth) in kyber_resize_domain() argument
271 depth = clamp(depth, 1U, kyber_depth[sched_domain]); in kyber_resize_domain()
272 if (depth != kqd->domain_tokens[sched_domain].sb.depth) { in kyber_resize_domain()
273 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); in kyber_resize_domain()
275 depth); in kyber_resize_domain()
319 unsigned int orig_depth, depth; in kyber_timer_fn() local
352 orig_depth = kqd->domain_tokens[sched_domain].sb.depth; in kyber_timer_fn()
353 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT; in kyber_timer_fn()
354 kyber_resize_domain(kqd, sched_domain, depth); in kyber_timer_fn()
Dblk-mq-tag.c464 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, in bt_alloc() argument
467 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, in bt_alloc()
476 unsigned int depth = queue_depth - reserved; in blk_mq_init_bitmaps() local
479 if (bt_alloc(bitmap_tags, depth, round_robin, node)) in blk_mq_init_bitmaps()
Dblk-mq-tag.h50 unsigned int depth, bool can_grow);
Dblk-mq.c1936 unsigned int depth = 1; in blk_mq_flush_plug_list() local
1943 depth++; in blk_mq_flush_plug_list()
1947 trace_block_unplug(head_rq->q, depth, !from_schedule); in blk_mq_flush_plug_list()
2448 unsigned int hctx_idx, unsigned int depth) in blk_mq_alloc_rqs() argument
2467 left = rq_size * depth; in blk_mq_alloc_rqs()
2469 for (i = 0; i < depth; ) { in blk_mq_alloc_rqs()
2503 to_do = min(entries_per_page, depth - i); in blk_mq_alloc_rqs()
3378 unsigned int depth; in blk_mq_alloc_map_and_requests() local
3381 depth = set->queue_depth; in blk_mq_alloc_map_and_requests()
3399 if (depth != set->queue_depth) in blk_mq_alloc_map_and_requests()
[all …]
Dblk-settings.c847 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth() argument
849 q->queue_depth = depth; in blk_set_queue_depth()
Dblk-mq.h66 unsigned int hctx_idx, unsigned int depth);