/drivers/target/ |
D | target_core_file.c | 263 struct se_device *se_dev = cmd->se_dev; in fd_do_prot_rw() local 264 struct fd_dev *dev = FD_DEV(se_dev); in fd_do_prot_rw() 266 loff_t pos = (cmd->t_task_lba * se_dev->prot_length); in fd_do_prot_rw() 271 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * in fd_do_prot_rw() 272 se_dev->prot_length; in fd_do_prot_rw() 321 struct se_device *se_dev = cmd->se_dev; in fd_do_rw() local 322 struct fd_dev *dev = FD_DEV(se_dev); in fd_do_rw() 327 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); in fd_do_rw() 388 struct se_device *dev = cmd->se_dev; in fd_execute_sync_cache() 434 struct se_device *se_dev = cmd->se_dev; in fd_setup_write_same_buf() local [all …]
|
D | target_core_xcopy.c | 68 struct se_device *se_dev; in target_xcopy_locate_se_dev_e4() local 79 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { in target_xcopy_locate_se_dev_e4() 81 if (!se_dev->dev_attrib.emulate_3pc) in target_xcopy_locate_se_dev_e4() 85 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); in target_xcopy_locate_se_dev_e4() 92 xop->dst_dev = se_dev; in target_xcopy_locate_se_dev_e4() 96 xop->src_dev = se_dev; in target_xcopy_locate_se_dev_e4() 102 &se_dev->dev_group.cg_item); in target_xcopy_locate_se_dev_e4() 105 " %d for se_dev: %p\n", rc, se_dev); in target_xcopy_locate_se_dev_e4() 111 " se_dev->se_dev_group: %p\n", subsys, se_dev, in target_xcopy_locate_se_dev_e4() 112 &se_dev->dev_group); in target_xcopy_locate_se_dev_e4() [all …]
|
D | target_core_spc.c | 73 struct se_device *dev = cmd->se_dev; in spc_emulate_inquiry_std() 131 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_80() 180 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_83() 474 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_86() 496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); in spc_emulate_evpd_86() 499 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); in spc_emulate_evpd_86() 507 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_b0() 594 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_b1() 607 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_b2() 659 struct se_device *dev = cmd->se_dev; in spc_emulate_evpd_b3() [all …]
|
D | target_core_pr.c | 86 struct se_device *dev = cmd->se_dev; in target_scsi2_reservation_check() 119 struct se_device *dev = cmd->se_dev; in target_check_scsi2_reservation_conflict() 124 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, in target_check_scsi2_reservation_conflict() 190 struct se_device *dev = cmd->se_dev; in target_scsi2_reservation_release() 235 struct se_device *dev = cmd->se_dev; in target_scsi2_reservation_reserve() 546 struct se_device *dev = cmd->se_dev; in target_scsi3_pr_reservation_check() 1421 struct se_device *dev = cmd->se_dev; in core_scsi3_decode_spec_i_port() 1459 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, in core_scsi3_decode_spec_i_port() 1696 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, in core_scsi3_decode_spec_i_port() 1744 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, in core_scsi3_decode_spec_i_port() [all …]
|
D | target_core_sbc.c | 42 struct se_device *dev = cmd->se_dev; in sbc_emulate_readcapacity() 91 struct se_device *dev = cmd->se_dev; in sbc_emulate_readcapacity_16() 162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - in sbc_get_write_same_sectors() 176 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size() 254 struct se_device *dev = cmd->se_dev; in sbc_setup_write_same() 264 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same() 266 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same() 370 struct se_device *dev = cmd->se_dev; in compare_and_write_post() 396 struct se_device *dev = cmd->se_dev; in compare_and_write_callback() 546 struct se_device *dev = cmd->se_dev; in sbc_compare_and_write() [all …]
|
D | target_core_rd.c | 388 struct se_device *se_dev = cmd->se_dev; in rd_execute_rw() local 389 struct rd_dev *dev = RD_DEV(se_dev); in rd_execute_rw() 405 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; in rd_execute_rw() 424 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; in rd_execute_rw() 427 tmp = cmd->t_task_lba * se_dev->prot_length; in rd_execute_rw() 507 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; in rd_execute_rw() 510 tmp = cmd->t_task_lba * se_dev->prot_length; in rd_execute_rw()
|
D | target_core_transport.c | 520 struct se_device *dev = cmd->se_dev; in target_remove_from_state_list() 643 struct se_device *dev = cmd->se_dev; in transport_get_sense_buffer() 662 struct se_device *dev = cmd->se_dev; in target_complete_cmd() 740 struct se_device *dev = cmd->se_dev; in target_add_to_state_list() 1080 struct se_device *dev = cmd->se_dev; in target_cmd_size_check() 1164 struct se_device *dev = cmd->se_dev; in transport_check_alloc_task_attr() 1192 struct se_device *dev = cmd->se_dev; in target_setup_cmd_from_cdb() 1671 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) in transport_generic_request_failure() 1700 transport_handle_queue_full(cmd, cmd->se_dev); in transport_generic_request_failure() 1722 struct se_device *dev = cmd->se_dev; in target_handle_task_attr() [all …]
|
D | target_core_iblock.c | 337 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); in iblock_get_bio() 396 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); in iblock_execute_sync_cache() 435 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; in iblock_execute_unmap() 443 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; in iblock_execute_write_same_unmap() 469 sg->length != cmd->se_dev->dev_attrib.block_size) { in iblock_execute_write_same() 472 cmd->se_dev->dev_attrib.block_size); in iblock_execute_write_same() 626 struct se_device *dev = cmd->se_dev; in iblock_alloc_bip() 672 struct se_device *dev = cmd->se_dev; in iblock_execute_rw()
|
D | target_core_user.c | 82 struct se_device se_dev; member 118 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 162 struct se_device *se_dev = se_cmd->se_dev; in tcmu_alloc_cmd() local 163 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_alloc_cmd() 429 struct se_device *se_dev = se_cmd->se_dev; in tcmu_queue_cmd() local 430 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_queue_cmd() 660 return &udev->se_dev; in tcmu_alloc_device() 1052 struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); in tcmu_parse_cdb()
|
D | target_core_alua.c | 68 struct se_device *dev = cmd->se_dev; in target_emulate_report_referrals() 148 struct se_device *dev = cmd->se_dev; in target_emulate_report_target_port_groups() 287 struct se_device *dev = cmd->se_dev; in target_emulate_set_target_port_groups() 490 struct se_device *dev = cmd->se_dev; in core_alua_state_lba_dependent() 697 struct se_device *dev = cmd->se_dev; in target_alua_state_check()
|
D | target_core_pscsi.c | 599 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); in pscsi_transport_complete() 853 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); in pscsi_map_sg() 1019 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); in pscsi_execute_cmd()
|
D | target_core_ua.c | 200 struct se_device *dev = cmd->se_dev; in core_scsi3_ua_for_check_condition()
|
D | target_core_tmr.c | 177 if (dev != se_cmd->se_dev) in core_tmr_abort_task()
|
D | target_core_device.c | 131 se_cmd->se_dev = se_lun->lun_se_dev; in transport_lookup_cmd_lun() 177 se_cmd->se_dev = se_lun->lun_se_dev; in transport_lookup_tmr_lun()
|
/drivers/scsi/qla2xxx/ |
D | tcm_qla2xxx.c | 494 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; in tcm_qla2xxx_write_pending() 673 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; in tcm_qla2xxx_queue_data_in()
|
/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 1840 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; in isert_check_pi_status() 2759 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; in isert_set_dif_domain()
|