Home
last modified time | relevance | path

Searched refs:tag_set (Results 1 – 25 of 48) sorted by relevance

12

/drivers/nvme/target/
Dloop.c35 struct blk_mq_tag_set tag_set; member
90 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_loop_tagset()
212 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request()
275 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl()
411 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl()
413 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in nvme_loop_shutdown_ctrl()
472 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_loop_reset_ctrl_work()
513 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues()
514 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues()
515 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues()
[all …]
/drivers/md/
Ddm-rq.c543 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
544 if (!md->tag_set) in dm_mq_init_request_queue()
547 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
548 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
549 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
550 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; in dm_mq_init_request_queue()
551 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
552 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
554 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
558 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
[all …]
Ddm-core.h112 struct blk_mq_tag_set *tag_set; member
/drivers/mmc/core/
Dqueue.c427 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
428 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
434 mq->tag_set.queue_depth = in mmc_init_queue()
437 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
438 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
439 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
440 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
441 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
442 mq->tag_set.driver_data = mq; in mmc_init_queue()
456 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
[all …]
Dqueue.h77 struct blk_mq_tag_set tag_set; member
/drivers/mtd/ubi/
Dblock.c89 struct blk_mq_tag_set tag_set; member
419 dev->tag_set.ops = &ubiblock_mq_ops; in ubiblock_create()
420 dev->tag_set.queue_depth = 64; in ubiblock_create()
421 dev->tag_set.numa_node = NUMA_NO_NODE; in ubiblock_create()
422 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubiblock_create()
423 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); in ubiblock_create()
424 dev->tag_set.driver_data = dev; in ubiblock_create()
425 dev->tag_set.nr_hw_queues = 1; in ubiblock_create()
427 ret = blk_mq_alloc_tag_set(&dev->tag_set); in ubiblock_create()
433 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create()
[all …]
/drivers/block/paride/
Dpd.c239 struct blk_mq_tag_set tag_set; member
904 memset(&disk->tag_set, 0, sizeof(disk->tag_set)); in pd_probe_drive()
905 disk->tag_set.ops = &pd_mq_ops; in pd_probe_drive()
906 disk->tag_set.cmd_size = sizeof(struct pd_req); in pd_probe_drive()
907 disk->tag_set.nr_hw_queues = 1; in pd_probe_drive()
908 disk->tag_set.nr_maps = 1; in pd_probe_drive()
909 disk->tag_set.queue_depth = 2; in pd_probe_drive()
910 disk->tag_set.numa_node = NUMA_NO_NODE; in pd_probe_drive()
911 disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in pd_probe_drive()
913 if (blk_mq_alloc_tag_set(&disk->tag_set)) in pd_probe_drive()
[all …]
Dpcd.c203 struct blk_mq_tag_set tag_set; member
314 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, in pcd_init_units()
761 blk_mq_free_tag_set(&cd->tag_set); in pcd_detect()
1024 blk_mq_free_tag_set(&cd->tag_set); in pcd_init()
1056 blk_mq_free_tag_set(&cd->tag_set); in pcd_exit()
Dpf.c242 struct blk_mq_tag_set tag_set; member
300 disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops, in pf_init_units()
770 blk_mq_free_tag_set(&pf->tag_set); in pf_detect()
1039 blk_mq_free_tag_set(&pf->tag_set); in pf_init()
1069 blk_mq_free_tag_set(&pf->tag_set); in pf_exit()
/drivers/s390/block/
Dscm_blk.c453 bdev->tag_set.ops = &scm_mq_ops; in scm_blk_dev_setup()
454 bdev->tag_set.cmd_size = sizeof(blk_status_t); in scm_blk_dev_setup()
455 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup()
456 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup()
457 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in scm_blk_dev_setup()
458 bdev->tag_set.numa_node = NUMA_NO_NODE; in scm_blk_dev_setup()
460 ret = blk_mq_alloc_tag_set(&bdev->tag_set); in scm_blk_dev_setup()
464 rq = blk_mq_init_queue(&bdev->tag_set); in scm_blk_dev_setup()
509 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_setup()
519 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_cleanup()
Dscm_blk.h21 struct blk_mq_tag_set tag_set; member
/drivers/block/
Dvirtio_blk.c42 struct blk_mq_tag_set tag_set; member
789 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
790 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
791 vblk->tag_set.queue_depth = virtblk_queue_depth; in virtblk_probe()
792 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
793 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe()
794 vblk->tag_set.cmd_size = in virtblk_probe()
797 vblk->tag_set.driver_data = vblk; in virtblk_probe()
798 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
800 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
[all …]
Dsx8.c278 struct blk_mq_tag_set tag_set; member
926 rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx); in carm_handle_resp()
1363 q = blk_mq_init_queue(&host->tag_set); in carm_init_disk()
1459 memset(&host->tag_set, 0, sizeof(host->tag_set)); in carm_init_one()
1460 host->tag_set.ops = &carm_mq_ops; in carm_init_one()
1461 host->tag_set.cmd_size = sizeof(struct carm_request); in carm_init_one()
1462 host->tag_set.nr_hw_queues = 1; in carm_init_one()
1463 host->tag_set.nr_maps = 1; in carm_init_one()
1464 host->tag_set.queue_depth = max_queue; in carm_init_one()
1465 host->tag_set.numa_node = NUMA_NO_NODE; in carm_init_one()
[all …]
Dnbd.c107 struct blk_mq_tag_set tag_set; member
229 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
429 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
695 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat()
696 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
811 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
1087 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1088 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1213 nbd->tag_set.timeout = 0; in nbd_config_put()
1249 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
[all …]
Dnull_blk_main.c72 static struct blk_mq_tag_set tag_set; variable
1425 nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1426 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1677 nullb->tag_set = &tag_set; in null_add_dev()
1680 nullb->tag_set = &nullb->__tag_set; in null_add_dev()
1681 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev()
1690 nullb->tag_set->timeout = 5 * HZ; in null_add_dev()
1691 nullb->q = blk_mq_init_queue(nullb->tag_set); in null_add_dev()
1761 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev()
1762 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
[all …]
Dz2ram.c340 static struct blk_mq_tag_set tag_set; variable
363 z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16, in z2_init()
399 blk_mq_free_tag_set(&tag_set); in z2_exit()
Dxen-blkfront.c216 struct blk_mq_tag_set tag_set; member
966 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_init_blk_queue()
967 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_init_blk_queue()
968 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_init_blk_queue()
976 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; in xlvbd_init_blk_queue()
978 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_init_blk_queue()
979 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_init_blk_queue()
980 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in xlvbd_init_blk_queue()
981 info->tag_set.cmd_size = sizeof(struct blkif_req); in xlvbd_init_blk_queue()
982 info->tag_set.driver_data = info; in xlvbd_init_blk_queue()
[all …]
Dskd_main.c217 struct blk_mq_tag_set tag_set; member
396 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); in skd_in_flight()
1520 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], in skd_isr_completion_posted()
1526 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); in skd_isr_completion_posted()
1917 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); in skd_recover_requests()
2835 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); in skd_cons_disk()
2836 skdev->tag_set.ops = &skd_mq_ops; in skd_cons_disk()
2837 skdev->tag_set.nr_hw_queues = 1; in skd_cons_disk()
2838 skdev->tag_set.queue_depth = skd_max_queue_depth; in skd_cons_disk()
2839 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + in skd_cons_disk()
[all …]
Dps3disk.c33 struct blk_mq_tag_set tag_set; member
444 queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1, in ps3disk_probe()
496 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_probe()
522 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_remove()
Dloop.c2007 lo->tag_set.ops = &loop_mq_ops; in loop_add()
2008 lo->tag_set.nr_hw_queues = 1; in loop_add()
2009 lo->tag_set.queue_depth = 128; in loop_add()
2010 lo->tag_set.numa_node = NUMA_NO_NODE; in loop_add()
2011 lo->tag_set.cmd_size = sizeof(struct loop_cmd); in loop_add()
2012 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in loop_add()
2013 lo->tag_set.driver_data = lo; in loop_add()
2015 err = blk_mq_alloc_tag_set(&lo->tag_set); in loop_add()
2019 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); in loop_add()
2078 blk_mq_free_tag_set(&lo->tag_set); in loop_add()
[all …]
Dloop.h63 struct blk_mq_tag_set tag_set; member
/drivers/mtd/
Dmtd_blkdevs.c35 blk_mq_free_tag_set(dev->tag_set); in blktrans_dev_release()
36 kfree(dev->tag_set); in blktrans_dev_release()
429 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); in add_mtd_blktrans_dev()
430 if (!new->tag_set) in add_mtd_blktrans_dev()
433 new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2, in add_mtd_blktrans_dev()
469 kfree(new->tag_set); in add_mtd_blktrans_dev()
/drivers/nvme/host/
Dfc.c149 struct blk_mq_tag_set tag_set; member
1768 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2024 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free()
2463 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues()
2464 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues()
2465 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues()
2466 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues()
2467 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_create_io_queues()
2468 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_fc_create_io_queues()
2469 ctrl->tag_set.cmd_size = in nvme_fc_create_io_queues()
[all …]
/drivers/scsi/
Dscsi_lib.c1775 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues()
1872 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); in scsi_mq_alloc_queue()
1893 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); in scsi_mq_setup_tags()
1895 shost->tag_set.ops = &scsi_mq_ops; in scsi_mq_setup_tags()
1897 shost->tag_set.ops = &scsi_mq_ops_no_commit; in scsi_mq_setup_tags()
1898 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
1899 shost->tag_set.queue_depth = shost->can_queue; in scsi_mq_setup_tags()
1900 shost->tag_set.cmd_size = cmd_size; in scsi_mq_setup_tags()
1901 shost->tag_set.numa_node = NUMA_NO_NODE; in scsi_mq_setup_tags()
1902 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in scsi_mq_setup_tags()
[all …]
/drivers/cdrom/
Dgdrom.c105 struct blk_mq_tag_set tag_set; member
781 gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1, in probe_gdrom()
805 blk_mq_free_tag_set(&gd.tag_set); in probe_gdrom()
824 blk_mq_free_tag_set(&gd.tag_set); in remove_gdrom()

12