Home
last modified time | relevance | path

Searched refs:queue_depth (Results 1 – 10 of 10) sorted by relevance

/block/
Dblk-rq-qos.c128 if (rqd->queue_depth == 1) { in rq_depth_calc_max_depth()
144 rqd->queue_depth); in rq_depth_calc_max_depth()
148 unsigned int maxd = 3 * rqd->queue_depth / 4; in rq_depth_calc_max_depth()
Dblk-mq.c2352 for (i = 0; i < set->queue_depth; i++) { in blk_mq_clear_rq_mapping()
2676 unsigned int queue_depth, struct request *flush_rq) in blk_mq_clear_flush_rq_mapping() argument
2687 for (i = 0; i < queue_depth; i++) in blk_mq_clear_flush_rq_mapping()
2711 set->queue_depth, flush_rq); in blk_mq_exit_hctx()
2887 set->queue_depth, set->reserved_tags, flags); in __blk_mq_alloc_map_and_request()
2892 set->queue_depth); in __blk_mq_alloc_map_and_request()
3335 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue()
3400 depth = set->queue_depth; in blk_mq_alloc_map_and_requests()
3406 set->queue_depth >>= 1; in blk_mq_alloc_map_and_requests()
3407 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { in blk_mq_alloc_map_and_requests()
[all …]
Dblk-mq-tag.c477 unsigned int queue_depth, unsigned int reserved, in blk_mq_init_bitmaps() argument
480 unsigned int depth = queue_depth - reserved; in blk_mq_init_bitmaps()
519 set->queue_depth, set->reserved_tags, in blk_mq_init_shared_sbitmap()
Dblk-mq-tag.h39 unsigned int queue_depth,
Dblk-mq-sched.c605 q->nr_requests = q->tag_set->queue_depth; in blk_mq_init_sched()
614 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched()
Dblk-rq-qos.h58 unsigned int queue_depth; member
Dblk-iolatency.c999 iolat->rq_depth.queue_depth = blkg->q->nr_requests; in iolatency_pd_init()
1001 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth; in iolatency_pd_init()
Dbsg-lib.c384 set->queue_depth = 128; in bsg_setup_queue()
Dblk-wbt.c686 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q); in wbt_queue_depth_changed()
Dblk-settings.c779 q->queue_depth = depth; in blk_set_queue_depth()