Home
last modified time | relevance | path

Searched refs:set (Results 1 – 9 of 9) sorted by relevance

/block/
Dblk-mq.c2340 static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, in blk_mq_clear_rq_mapping() argument
2343 struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; in blk_mq_clear_rq_mapping()
2352 for (i = 0; i < set->queue_depth; i++) { in blk_mq_clear_rq_mapping()
2373 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, in blk_mq_free_rqs() argument
2378 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs()
2386 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
2391 blk_mq_clear_rq_mapping(set, tags, hctx_idx); in blk_mq_free_rqs()
2415 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, in blk_mq_alloc_rq_map() argument
2424 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); in blk_mq_alloc_rq_map()
2426 node = set->numa_node; in blk_mq_alloc_rq_map()
[all …]
Dblk-mq-tag.c29 struct blk_mq_tag_set *set = q->tag_set; in __blk_mq_tag_busy() local
33 atomic_inc(&set->active_queues_shared_sbitmap); in __blk_mq_tag_busy()
61 struct blk_mq_tag_set *set = q->tag_set; in __blk_mq_tag_idle() local
67 atomic_dec(&set->active_queues_shared_sbitmap); in __blk_mq_tag_idle()
513 int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) in blk_mq_init_shared_sbitmap() argument
515 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); in blk_mq_init_shared_sbitmap()
518 ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, in blk_mq_init_shared_sbitmap()
519 set->queue_depth, set->reserved_tags, in blk_mq_init_shared_sbitmap()
520 set->numa_node, alloc_policy); in blk_mq_init_shared_sbitmap()
524 for (i = 0; i < set->nr_hw_queues; i++) { in blk_mq_init_shared_sbitmap()
[all …]
Dbsg-lib.c294 static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req, in bsg_init_rq() argument
317 static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req, in bsg_exit_rq() argument
370 struct blk_mq_tag_set *set; in bsg_setup_queue() local
381 set = &bset->tag_set; in bsg_setup_queue()
382 set->ops = &bsg_mq_ops; in bsg_setup_queue()
383 set->nr_hw_queues = 1; in bsg_setup_queue()
384 set->queue_depth = 128; in bsg_setup_queue()
385 set->numa_node = NUMA_NO_NODE; in bsg_setup_queue()
386 set->cmd_size = sizeof(struct bsg_job) + dd_job_size; in bsg_setup_queue()
387 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING; in bsg_setup_queue()
[all …]
Dblk-mq-sched.c528 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_sched_alloc_tags() local
531 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, in blk_mq_sched_alloc_tags()
532 set->reserved_tags, set->flags); in blk_mq_sched_alloc_tags()
536 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); in blk_mq_sched_alloc_tags()
538 blk_mq_free_rq_map(hctx->sched_tags, set->flags); in blk_mq_sched_alloc_tags()
561 struct blk_mq_tag_set *set = queue->tag_set; in blk_mq_init_sched_shared_sbitmap() local
562 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); in blk_mq_init_sched_shared_sbitmap()
572 MAX_SCHED_RQ, set->reserved_tags, in blk_mq_init_sched_shared_sbitmap()
573 set->numa_node, alloc_policy); in blk_mq_init_sched_shared_sbitmap()
585 queue->nr_requests - set->reserved_tags); in blk_mq_init_sched_shared_sbitmap()
Dblk-mq-tag.h43 extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set);
44 extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set);
51 extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set,
Dblk-mq.h57 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
60 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
65 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
340 struct blk_mq_tag_set *set = q->tag_set; in hctx_may_queue() local
344 users = atomic_read(&set->active_queues_shared_sbitmap); in hctx_may_queue()
Dblk-timeout.c43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); in part_timeout_show() local
45 return sprintf(buf, "%d\n", set != 0); in part_timeout_show()
Dblk-sysfs.c362 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show() local
365 return queue_var_show(set << force, page); in queue_rq_affinity_show()
540 int set = -1; in queue_wc_store() local
543 set = 1; in queue_wc_store()
546 set = 0; in queue_wc_store()
548 if (set == -1) in queue_wc_store()
551 if (set) in queue_wc_store()
Ddisk-events.c435 .set = disk_events_set_dfl_poll_msecs,