/kernel/linux/linux-5.10/drivers/md/ |
D | dm-rq.c | 542 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue() 543 if (!md->tag_set) in dm_mq_init_request_queue() 546 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue() 547 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue() 548 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue() 549 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; in dm_mq_init_request_queue() 550 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue() 551 md->tag_set->driver_data = md; in dm_mq_init_request_queue() 553 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue() 557 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue() [all …]
|
/kernel/linux/linux-5.10/drivers/nvme/target/ |
D | loop.c | 35 struct blk_mq_tag_set tag_set; member 88 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_loop_tagset() 211 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 285 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl() 425 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl() 427 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in nvme_loop_shutdown_ctrl() 487 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_loop_reset_ctrl_work() 527 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues() 528 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues() 529 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues() [all …]
|
/kernel/linux/linux-5.10/drivers/mmc/core/ |
D | queue.c | 437 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue() 438 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue() 444 mq->tag_set.queue_depth = in mmc_init_queue() 447 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue() 448 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue() 449 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue() 450 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue() 451 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue() 452 mq->tag_set.driver_data = mq; in mmc_init_queue() 466 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue() [all …]
|
/kernel/linux/linux-5.10/block/ |
D | bsg-lib.c | 21 struct blk_mq_tag_set tag_set; member 271 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_queue_rq() 328 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_remove_queue() 332 blk_mq_free_tag_set(&bset->tag_set); in bsg_remove_queue() 341 container_of(rq->q->tag_set, struct bsg_set, tag_set); in bsg_timeout() 380 set = &bset->tag_set; in bsg_setup_queue()
|
D | blk-mq-sched.c | 521 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_sched_alloc_tags() 566 q->nr_requests = q->tag_set->queue_depth; in blk_mq_init_sched() 575 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched() 624 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); in blk_mq_sched_free_requests()
|
D | elevator.c | 619 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) in elv_support_iosched() 630 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) in elevator_get_default()
|
/kernel/linux/linux-5.10/drivers/mtd/ubi/ |
D | block.c | 89 struct blk_mq_tag_set tag_set; member 419 dev->tag_set.ops = &ubiblock_mq_ops; in ubiblock_create() 420 dev->tag_set.queue_depth = 64; in ubiblock_create() 421 dev->tag_set.numa_node = NUMA_NO_NODE; in ubiblock_create() 422 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubiblock_create() 423 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); in ubiblock_create() 424 dev->tag_set.driver_data = dev; in ubiblock_create() 425 dev->tag_set.nr_hw_queues = 1; in ubiblock_create() 427 ret = blk_mq_alloc_tag_set(&dev->tag_set); in ubiblock_create() 433 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create() [all …]
|
/kernel/linux/linux-5.10/drivers/block/paride/ |
D | pd.c | 239 struct blk_mq_tag_set tag_set; member 905 memset(&disk->tag_set, 0, sizeof(disk->tag_set)); in pd_probe_drive() 906 disk->tag_set.ops = &pd_mq_ops; in pd_probe_drive() 907 disk->tag_set.cmd_size = sizeof(struct pd_req); in pd_probe_drive() 908 disk->tag_set.nr_hw_queues = 1; in pd_probe_drive() 909 disk->tag_set.nr_maps = 1; in pd_probe_drive() 910 disk->tag_set.queue_depth = 2; in pd_probe_drive() 911 disk->tag_set.numa_node = NUMA_NO_NODE; in pd_probe_drive() 912 disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in pd_probe_drive() 914 if (blk_mq_alloc_tag_set(&disk->tag_set)) in pd_probe_drive() [all …]
|
/kernel/linux/linux-5.10/drivers/s390/block/ |
D | scm_blk.c | 454 bdev->tag_set.ops = &scm_mq_ops; in scm_blk_dev_setup() 455 bdev->tag_set.cmd_size = sizeof(blk_status_t); in scm_blk_dev_setup() 456 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup() 457 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup() 458 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in scm_blk_dev_setup() 459 bdev->tag_set.numa_node = NUMA_NO_NODE; in scm_blk_dev_setup() 461 ret = blk_mq_alloc_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 465 rq = blk_mq_init_queue(&bdev->tag_set); in scm_blk_dev_setup() 510 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_setup() 520 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_cleanup()
|
/kernel/linux/linux-5.10/include/scsi/ |
D | scsi_tcq.h | 32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
/kernel/linux/linux-5.10/drivers/block/ |
D | virtio_blk.c | 51 struct blk_mq_tag_set tag_set; member 767 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe() 768 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe() 769 vblk->tag_set.queue_depth = virtblk_queue_depth; in virtblk_probe() 770 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe() 771 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe() 772 vblk->tag_set.cmd_size = in virtblk_probe() 775 vblk->tag_set.driver_data = vblk; in virtblk_probe() 776 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe() 778 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe() [all …]
|
D | sx8.c | 278 struct blk_mq_tag_set tag_set; member 926 rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx); in carm_handle_resp() 1363 q = blk_mq_init_queue(&host->tag_set); in carm_init_disk() 1459 memset(&host->tag_set, 0, sizeof(host->tag_set)); in carm_init_one() 1460 host->tag_set.ops = &carm_mq_ops; in carm_init_one() 1461 host->tag_set.cmd_size = sizeof(struct carm_request); in carm_init_one() 1462 host->tag_set.nr_hw_queues = 1; in carm_init_one() 1463 host->tag_set.nr_maps = 1; in carm_init_one() 1464 host->tag_set.queue_depth = max_queue; in carm_init_one() 1465 host->tag_set.numa_node = NUMA_NO_NODE; in carm_init_one() [all …]
|
D | nbd.c | 106 struct blk_mq_tag_set tag_set; member 234 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove() 411 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout() 444 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout() 728 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply() 729 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply() 886 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que() 1182 if (nbd->tag_set.timeout) in nbd_reconnect_socket() 1183 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket() 1308 nbd->tag_set.timeout = 0; in nbd_config_put() [all …]
|
D | z2ram.c | 340 static struct blk_mq_tag_set tag_set; variable 363 z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16, in z2_init() 399 blk_mq_free_tag_set(&tag_set); in z2_exit()
|
D | null_blk_main.c | 73 static struct blk_mq_tag_set tag_set; variable 338 set = nullb->tag_set; in nullb_apply_submit_queues() 1593 nullb->tag_set == &nullb->__tag_set) in null_del_dev() 1594 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev() 1828 nullb->tag_set = &tag_set; in null_add_dev() 1831 nullb->tag_set = &nullb->__tag_set; in null_add_dev() 1832 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev() 1841 nullb->tag_set->timeout = 5 * HZ; in null_add_dev() 1842 nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb); in null_add_dev() 1912 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev() [all …]
|
D | xen-blkfront.c | 224 struct blk_mq_tag_set tag_set; member 996 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_init_blk_queue() 997 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_init_blk_queue() 998 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_init_blk_queue() 1006 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; in xlvbd_init_blk_queue() 1008 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_init_blk_queue() 1009 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_init_blk_queue() 1010 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in xlvbd_init_blk_queue() 1011 info->tag_set.cmd_size = sizeof(struct blkif_req); in xlvbd_init_blk_queue() 1012 info->tag_set.driver_data = info; in xlvbd_init_blk_queue() [all …]
|
D | skd_main.c | 216 struct blk_mq_tag_set tag_set; member 395 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); in skd_in_flight() 1521 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], in skd_isr_completion_posted() 1527 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); in skd_isr_completion_posted() 1919 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); in skd_recover_requests() 2837 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); in skd_cons_disk() 2838 skdev->tag_set.ops = &skd_mq_ops; in skd_cons_disk() 2839 skdev->tag_set.nr_hw_queues = 1; in skd_cons_disk() 2840 skdev->tag_set.queue_depth = skd_max_queue_depth; in skd_cons_disk() 2841 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + in skd_cons_disk() [all …]
|
D | ps3disk.c | 33 struct blk_mq_tag_set tag_set; member 444 queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1, in ps3disk_probe() 495 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_probe() 521 blk_mq_free_tag_set(&priv->tag_set); in ps3disk_remove()
|
/kernel/linux/linux-5.10/drivers/mtd/ |
D | mtd_blkdevs.c | 35 blk_mq_free_tag_set(dev->tag_set); in blktrans_dev_release() 36 kfree(dev->tag_set); in blktrans_dev_release() 426 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); in add_mtd_blktrans_dev() 427 if (!new->tag_set) in add_mtd_blktrans_dev() 430 new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2, in add_mtd_blktrans_dev() 467 kfree(new->tag_set); in add_mtd_blktrans_dev()
|
/kernel/linux/linux-5.10/drivers/block/rnbd/ |
D | rnbd-clt.c | 746 if (sess->tag_set.tags) in destroy_mq_tags() 747 blk_mq_free_tag_set(&sess->tag_set); in destroy_mq_tags() 1170 struct blk_mq_tag_set *tag_set = &sess->tag_set; in setup_mq_tags() local 1172 memset(tag_set, 0, sizeof(*tag_set)); in setup_mq_tags() 1173 tag_set->ops = &rnbd_mq_ops; in setup_mq_tags() 1174 tag_set->queue_depth = sess->queue_depth; in setup_mq_tags() 1175 tag_set->numa_node = NUMA_NO_NODE; in setup_mq_tags() 1176 tag_set->flags = BLK_MQ_F_SHOULD_MERGE | in setup_mq_tags() 1178 tag_set->cmd_size = sizeof(struct rnbd_iu); in setup_mq_tags() 1179 tag_set->nr_hw_queues = num_online_cpus(); in setup_mq_tags() [all …]
|
/kernel/linux/linux-5.10/drivers/scsi/ |
D | scsi_lib.c | 1784 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues() 1881 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); in scsi_mq_alloc_queue() 1894 struct blk_mq_tag_set *tag_set = &shost->tag_set; in scsi_mq_setup_tags() local 1903 memset(tag_set, 0, sizeof(*tag_set)); in scsi_mq_setup_tags() 1905 tag_set->ops = &scsi_mq_ops; in scsi_mq_setup_tags() 1907 tag_set->ops = &scsi_mq_ops_no_commit; in scsi_mq_setup_tags() 1908 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags() 1909 tag_set->queue_depth = shost->can_queue; in scsi_mq_setup_tags() 1910 tag_set->cmd_size = cmd_size; in scsi_mq_setup_tags() 1911 tag_set->numa_node = NUMA_NO_NODE; in scsi_mq_setup_tags() [all …]
|
D | hosts.c | 347 if (shost->tag_set.tags) in scsi_host_dev_release() 591 blk_mq_tagset_busy_iter(&shost->tag_set, in scsi_host_busy() 690 blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter, in scsi_host_complete_all_commands() 727 blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn, in scsi_host_busy_iter()
|
/kernel/linux/linux-5.10/arch/um/drivers/ |
D | ubd_kern.c | 169 struct blk_mq_tag_set tag_set; member 867 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_device_release() 930 ubd_dev->tag_set.ops = &ubd_mq_ops; in ubd_add() 931 ubd_dev->tag_set.queue_depth = 64; in ubd_add() 932 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; in ubd_add() 933 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubd_add() 934 ubd_dev->tag_set.driver_data = ubd_dev; in ubd_add() 935 ubd_dev->tag_set.nr_hw_queues = 1; in ubd_add() 937 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); in ubd_add() 941 ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set); in ubd_add() [all …]
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
D | fc.c | 167 struct blk_mq_tag_set tag_set; member 2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request() 2376 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free() 2490 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios() 2492 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios() 2877 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues() 2878 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues() 2879 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues() 2880 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues() 2881 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_create_io_queues() [all …]
|
/kernel/linux/linux-5.10/drivers/cdrom/ |
D | gdrom.c | 105 struct blk_mq_tag_set tag_set; member 791 gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1, in probe_gdrom() 815 blk_mq_free_tag_set(&gd.tag_set); in probe_gdrom() 834 blk_mq_free_tag_set(&gd.tag_set); in remove_gdrom()
|