Home
last modified time | relevance | path

Searched refs:tag_set (Results 1 – 14 of 14) sorted by relevance

/drivers/nvme/target/
Dloop.c57 struct blk_mq_tag_set tag_set; member
307 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl()
427 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl()
568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues()
569 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues()
570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues()
571 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_loop_create_io_queues()
572 ctrl->tag_set.numa_node = NUMA_NO_NODE; in nvme_loop_create_io_queues()
573 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_loop_create_io_queues()
574 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + in nvme_loop_create_io_queues()
[all …]
/drivers/mtd/ubi/
Dblock.c97 struct blk_mq_tag_set tag_set; member
408 dev->tag_set.ops = &ubiblock_mq_ops; in ubiblock_create()
409 dev->tag_set.queue_depth = 64; in ubiblock_create()
410 dev->tag_set.numa_node = NUMA_NO_NODE; in ubiblock_create()
411 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubiblock_create()
412 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); in ubiblock_create()
413 dev->tag_set.driver_data = dev; in ubiblock_create()
414 dev->tag_set.nr_hw_queues = 1; in ubiblock_create()
416 ret = blk_mq_alloc_tag_set(&dev->tag_set); in ubiblock_create()
422 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create()
[all …]
/drivers/block/
Dnbd.c53 struct blk_mq_tag_set tag_set; member
384 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat()
385 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
489 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
573 nbd->tag_set.timeout = 0; in nbd_reset()
672 nbd->tag_set.timeout = arg * HZ; in __nbd_ioctl()
836 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
955 nbd_dev[i].tag_set.ops = &nbd_mq_ops; in nbd_init()
956 nbd_dev[i].tag_set.nr_hw_queues = 1; in nbd_init()
957 nbd_dev[i].tag_set.queue_depth = 128; in nbd_init()
[all …]
Dvirtio_blk.c38 struct blk_mq_tag_set tag_set; member
618 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
619 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
620 vblk->tag_set.queue_depth = virtblk_queue_depth; in virtblk_probe()
621 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
622 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe()
623 vblk->tag_set.cmd_size = in virtblk_probe()
626 vblk->tag_set.driver_data = vblk; in virtblk_probe()
627 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
629 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
[all …]
Dnull_blk.c38 struct blk_mq_tag_set tag_set; member
595 blk_mq_free_tag_set(&nullb->tag_set); in null_del_dev()
718 nullb->tag_set.ops = &null_mq_ops; in null_add_dev()
719 nullb->tag_set.nr_hw_queues = submit_queues; in null_add_dev()
720 nullb->tag_set.queue_depth = hw_queue_depth; in null_add_dev()
721 nullb->tag_set.numa_node = home_node; in null_add_dev()
722 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); in null_add_dev()
723 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in null_add_dev()
724 nullb->tag_set.driver_data = nullb; in null_add_dev()
726 rv = blk_mq_alloc_tag_set(&nullb->tag_set); in null_add_dev()
[all …]
Dxen-blkfront.c210 struct blk_mq_tag_set tag_set; member
957 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_init_blk_queue()
958 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_init_blk_queue()
959 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_init_blk_queue()
967 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; in xlvbd_init_blk_queue()
969 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_init_blk_queue()
970 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_init_blk_queue()
971 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in xlvbd_init_blk_queue()
972 info->tag_set.cmd_size = 0; in xlvbd_init_blk_queue()
973 info->tag_set.driver_data = info; in xlvbd_init_blk_queue()
[all …]
Dloop.c1756 lo->tag_set.ops = &loop_mq_ops; in loop_add()
1757 lo->tag_set.nr_hw_queues = 1; in loop_add()
1758 lo->tag_set.queue_depth = 128; in loop_add()
1759 lo->tag_set.numa_node = NUMA_NO_NODE; in loop_add()
1760 lo->tag_set.cmd_size = sizeof(struct loop_cmd); in loop_add()
1761 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in loop_add()
1762 lo->tag_set.driver_data = lo; in loop_add()
1764 err = blk_mq_alloc_tag_set(&lo->tag_set); in loop_add()
1768 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); in loop_add()
1824 blk_mq_free_tag_set(&lo->tag_set); in loop_add()
[all …]
Dloop.h64 struct blk_mq_tag_set tag_set; member
Drbd.c403 struct blk_mq_tag_set tag_set; member
4251 blk_mq_free_tag_set(&rbd_dev->tag_set); in rbd_free_disk()
4489 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); in rbd_init_disk()
4490 rbd_dev->tag_set.ops = &rbd_mq_ops; in rbd_init_disk()
4491 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; in rbd_init_disk()
4492 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; in rbd_init_disk()
4493 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in rbd_init_disk()
4494 rbd_dev->tag_set.nr_hw_queues = 1; in rbd_init_disk()
4495 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); in rbd_init_disk()
4497 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); in rbd_init_disk()
[all …]
/drivers/md/
Ddm-rq.c969 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
970 if (!md->tag_set) in dm_mq_init_request_queue()
973 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
974 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
975 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
976 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in dm_mq_init_request_queue()
977 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
978 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
980 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
984 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
[all …]
Ddm-core.h126 struct blk_mq_tag_set *tag_set; member
/drivers/nvme/host/
Drdma.c114 struct blk_mq_tag_set tag_set; member
716 ret = blk_mq_reinit_tagset(&ctrl->tag_set); in nvme_rdma_reconnect_ctrl_work()
799 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_rdma_error_recovery_work()
1121 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1657 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_rdma_shutdown_ctrl()
1679 blk_mq_free_tag_set(&ctrl->tag_set); in __nvme_rdma_remove_ctrl()
1748 ret = blk_mq_reinit_tagset(&ctrl->tag_set); in nvme_rdma_reset_ctrl_work()
1824 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_rdma_create_io_queues()
1825 ctrl->tag_set.ops = &nvme_rdma_mq_ops; in nvme_rdma_create_io_queues()
1826 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_rdma_create_io_queues()
[all …]
/drivers/scsi/
Dscsi_lib.c2092 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); in scsi_mq_alloc_queue()
2113 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); in scsi_mq_setup_tags()
2114 shost->tag_set.ops = &scsi_mq_ops; in scsi_mq_setup_tags()
2115 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
2116 shost->tag_set.queue_depth = shost->can_queue; in scsi_mq_setup_tags()
2117 shost->tag_set.cmd_size = cmd_size; in scsi_mq_setup_tags()
2118 shost->tag_set.numa_node = NUMA_NO_NODE; in scsi_mq_setup_tags()
2119 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in scsi_mq_setup_tags()
2120 shost->tag_set.flags |= in scsi_mq_setup_tags()
2122 shost->tag_set.driver_data = shost; in scsi_mq_setup_tags()
[all …]
Dhosts.c360 if (shost->tag_set.tags) in scsi_host_dev_release()