Home
last modified time | relevance | path

Searched refs:depth (Results 1 – 7 of 7) sorted by relevance

/block/
Dblk-mq-tag.c30 ret = find_first_zero_bit(&bm->word, bm->depth); in bt_has_free_tags()
31 if (ret < bm->depth) in bt_has_free_tags()
123 unsigned int depth, users; in hctx_may_queue() local
133 if (bt->depth == 1) in hctx_may_queue()
143 depth = max((bt->depth + users - 1) / users, 4U); in hctx_may_queue()
144 return atomic_read(&hctx->nr_active) < depth; in hctx_may_queue()
153 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag); in __bt_get_word()
154 if (unlikely(tag >= bm->depth)) { in __bt_get_word()
171 if (last_tag >= bm->depth - 1) in __bt_get_word()
234 if (last_tag >= bt->depth - 1) in __bt_get()
[all …]
Dblk-tag.c89 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) in init_tag_map() argument
95 if (q && depth > q->nr_requests * 2) { in init_tag_map()
96 depth = q->nr_requests * 2; in init_tag_map()
98 __func__, depth); in init_tag_map()
101 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); in init_tag_map()
105 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; in init_tag_map()
110 tags->real_max_depth = depth; in init_tag_map()
111 tags->max_depth = depth; in init_tag_map()
122 int depth, int alloc_policy) in __blk_queue_init_tags() argument
130 if (init_tag_map(q, tags, depth)) in __blk_queue_init_tags()
[all …]
Dblk-mq-tag.h20 unsigned int depth; member
59 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
Dblk-mq.c722 bit = find_next_bit(&bm->word, bm->depth, bit); in flush_busy_ctxs()
723 if (bit >= bm->depth) in flush_busy_ctxs()
1040 int depth, in blk_mq_insert_requests() argument
1047 trace_block_unplug(q, depth, !from_schedule); in blk_mq_insert_requests()
1092 unsigned int depth; in blk_mq_flush_plug_list() local
1100 depth = 0; in blk_mq_flush_plug_list()
1109 &ctx_list, depth, in blk_mq_flush_plug_list()
1115 depth = 0; in blk_mq_flush_plug_list()
1118 depth++; in blk_mq_flush_plug_list()
1127 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth, in blk_mq_flush_plug_list()
[all …]
Dblk-mq.h71 unsigned long depth; member
Dblk-core.c3217 static void queue_unplugged(struct request_queue *q, unsigned int depth, in queue_unplugged() argument
3221 trace_block_unplug(q, depth, !from_schedule); in queue_unplugged()
3278 unsigned int depth; in blk_flush_plug_list() local
3293 depth = 0; in blk_flush_plug_list()
3309 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3311 depth = 0; in blk_flush_plug_list()
3331 depth++; in blk_flush_plug_list()
3338 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
Dcfq-iosched.c3449 unsigned int depth; in cfq_may_dispatch() local
3451 depth = last_sync / cfqd->cfq_slice[1]; in cfq_may_dispatch()
3452 if (!depth && !cfqq->dispatched) in cfq_may_dispatch()
3453 depth = 1; in cfq_may_dispatch()
3454 if (depth < max_dispatch) in cfq_may_dispatch()
3455 max_dispatch = depth; in cfq_may_dispatch()