Lines Matching +full:usb +full:- +full:sdp
9 * - Drew Eckhardt <drew@colorado.edu> original
10 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * Support loadable low-level scsi drivers.
13 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
16 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * - Alex Davis <letmein@erols.com> Fix problem where partition info
21 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
22 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
27 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
28 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
29 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
30 * - entering other commands: SCSI_LOG_HLQUEUE level 3
53 #include <linux/sed-opal.h>
56 #include <linux/t10-pi.h>
128 /* This semaphore is used to mediate the 0->1 reference get in the
146 if (sdkp->WCE) { in sd_set_flush_flag()
148 if (sdkp->DPOFUA) in sd_set_flush_flag()
152 blk_queue_write_cache(sdkp->disk->queue, wc, fua); in sd_set_flush_flag()
161 struct scsi_device *sdp = sdkp->device; in cache_type_store() local
169 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) in cache_type_store()
173 return -EINVAL; in cache_type_store()
175 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { in cache_type_store()
176 buf += sizeof(temp) - 1; in cache_type_store()
177 sdkp->cache_override = 1; in cache_type_store()
179 sdkp->cache_override = 0; in cache_type_store()
184 return -EINVAL; in cache_type_store()
187 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; in cache_type_store()
189 if (sdkp->cache_override) { in cache_type_store()
190 sdkp->WCE = wce; in cache_type_store()
191 sdkp->RCD = rcd; in cache_type_store()
196 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, in cache_type_store()
198 return -EINVAL; in cache_type_store()
199 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - in cache_type_store()
214 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, in cache_type_store()
218 return -EINVAL; in cache_type_store()
220 revalidate_disk(sdkp->disk); in cache_type_store()
229 struct scsi_device *sdp = sdkp->device; in manage_start_stop_show() local
231 return sprintf(buf, "%u\n", sdp->manage_start_stop); in manage_start_stop_show()
239 struct scsi_device *sdp = sdkp->device; in manage_start_stop_store() local
243 return -EACCES; in manage_start_stop_store()
246 return -EINVAL; in manage_start_stop_store()
248 sdp->manage_start_stop = v; in manage_start_stop_store()
259 return sprintf(buf, "%u\n", sdkp->device->allow_restart); in allow_restart_show()
268 struct scsi_device *sdp = sdkp->device; in allow_restart_store() local
271 return -EACCES; in allow_restart_store()
273 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) in allow_restart_store()
274 return -EINVAL; in allow_restart_store()
277 return -EINVAL; in allow_restart_store()
279 sdp->allow_restart = v; in allow_restart_store()
289 int ct = sdkp->RCD + 2*sdkp->WCE; in cache_type_show()
300 return sprintf(buf, "%u\n", sdkp->DPOFUA); in FUA_show()
310 return sprintf(buf, "%u\n", sdkp->protection_type); in protection_type_show()
322 return -EACCES; in protection_type_store()
330 sdkp->protection_type = val; in protection_type_store()
341 struct scsi_device *sdp = sdkp->device; in protection_mode_show() local
344 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); in protection_mode_show()
345 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); in protection_mode_show()
347 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { in protection_mode_show()
364 return sprintf(buf, "%u\n", sdkp->ATO); in app_tag_own_show()
374 return sprintf(buf, "%u\n", sdkp->lbpme); in thin_provisioning_show()
394 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); in provisioning_mode_show()
402 struct scsi_device *sdp = sdkp->device; in provisioning_mode_store() local
406 return -EACCES; in provisioning_mode_store()
413 if (sdp->type != TYPE_DISK) in provisioning_mode_store()
414 return -EINVAL; in provisioning_mode_store()
418 return -EINVAL; in provisioning_mode_store()
440 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); in zeroing_mode_show()
451 return -EACCES; in zeroing_mode_store()
455 return -EINVAL; in zeroing_mode_store()
457 sdkp->zeroing_mode = mode; in zeroing_mode_store()
469 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); in max_medium_access_timeouts_show()
481 return -EACCES; in max_medium_access_timeouts_store()
483 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); in max_medium_access_timeouts_store()
495 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); in max_write_same_blocks_show()
503 struct scsi_device *sdp = sdkp->device; in max_write_same_blocks_store() local
508 return -EACCES; in max_write_same_blocks_store()
510 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) in max_write_same_blocks_store()
511 return -EINVAL; in max_write_same_blocks_store()
519 sdp->no_write_same = 1; in max_write_same_blocks_store()
521 sdp->no_write_same = 0; in max_write_same_blocks_store()
522 sdkp->max_ws_blocks = max; in max_write_same_blocks_store()
582 * Dummy kobj_map->probe function.
583 * The default ->probe function will call modprobe, which is
595 * |............|.............|....|....| <- dev_t
598 * Inside a major, we have 16k disks, however mapped non-
603 * the well-know SCSI majors 8, 65--71, 136--143.
611 return SCSI_DISK1_MAJOR + major_idx - 1; in sd_major()
613 return SCSI_DISK8_MAJOR + major_idx - 8; in sd_major()
626 if (disk->private_data) { in scsi_disk_get()
628 if (scsi_device_get(sdkp->device) == 0) in scsi_disk_get()
629 get_device(&sdkp->dev); in scsi_disk_get()
639 struct scsi_device *sdev = sdkp->device; in scsi_disk_put()
642 put_device(&sdkp->dev); in scsi_disk_put()
663 return ret <= 0 ? ret : -EIO; in sd_sec_submit()
670 struct bio *bio = scmd->request->bio; in sd_setup_protect_cmnd()
671 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif); in sd_setup_protect_cmnd()
676 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; in sd_setup_protect_cmnd()
679 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; in sd_setup_protect_cmnd()
683 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; in sd_setup_protect_cmnd()
686 scmd->prot_flags |= SCSI_PROT_REF_CHECK; in sd_setup_protect_cmnd()
690 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; in sd_setup_protect_cmnd()
700 scmd->prot_flags &= sd_prot_flag_mask(prot_op); in sd_setup_protect_cmnd()
707 struct request_queue *q = sdkp->disk->queue; in sd_config_discard()
708 unsigned int logical_block_size = sdkp->device->sector_size; in sd_config_discard()
711 q->limits.discard_alignment = in sd_config_discard()
712 sdkp->unmap_alignment * logical_block_size; in sd_config_discard()
713 q->limits.discard_granularity = in sd_config_discard()
714 max(sdkp->physical_block_size, in sd_config_discard()
715 sdkp->unmap_granularity * logical_block_size); in sd_config_discard()
716 sdkp->provisioning_mode = mode; in sd_config_discard()
727 max_blocks = min_not_zero(sdkp->max_unmap_blocks, in sd_config_discard()
732 if (sdkp->device->unmap_limit_for_ws) in sd_config_discard()
733 max_blocks = sdkp->max_unmap_blocks; in sd_config_discard()
735 max_blocks = sdkp->max_ws_blocks; in sd_config_discard()
741 if (sdkp->device->unmap_limit_for_ws) in sd_config_discard()
742 max_blocks = sdkp->max_unmap_blocks; in sd_config_discard()
744 max_blocks = sdkp->max_ws_blocks; in sd_config_discard()
750 max_blocks = min_not_zero(sdkp->max_ws_blocks, in sd_config_discard()
761 struct scsi_device *sdp = cmd->device; in sd_setup_unmap_cmnd() local
762 struct request *rq = cmd->request; in sd_setup_unmap_cmnd()
763 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_unmap_cmnd()
764 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_unmap_cmnd()
768 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); in sd_setup_unmap_cmnd()
769 if (!rq->special_vec.bv_page) in sd_setup_unmap_cmnd()
771 clear_highpage(rq->special_vec.bv_page); in sd_setup_unmap_cmnd()
772 rq->special_vec.bv_offset = 0; in sd_setup_unmap_cmnd()
773 rq->special_vec.bv_len = data_len; in sd_setup_unmap_cmnd()
774 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in sd_setup_unmap_cmnd()
776 cmd->cmd_len = 10; in sd_setup_unmap_cmnd()
777 cmd->cmnd[0] = UNMAP; in sd_setup_unmap_cmnd()
778 cmd->cmnd[8] = 24; in sd_setup_unmap_cmnd()
780 buf = page_address(rq->special_vec.bv_page); in sd_setup_unmap_cmnd()
786 cmd->allowed = SD_MAX_RETRIES; in sd_setup_unmap_cmnd()
787 cmd->transfersize = data_len; in sd_setup_unmap_cmnd()
788 rq->timeout = SD_TIMEOUT; in sd_setup_unmap_cmnd()
789 scsi_req(rq)->resid_len = data_len; in sd_setup_unmap_cmnd()
796 struct scsi_device *sdp = cmd->device; in sd_setup_write_same16_cmnd() local
797 struct request *rq = cmd->request; in sd_setup_write_same16_cmnd()
798 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same16_cmnd()
799 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same16_cmnd()
800 u32 data_len = sdp->sector_size; in sd_setup_write_same16_cmnd()
802 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); in sd_setup_write_same16_cmnd()
803 if (!rq->special_vec.bv_page) in sd_setup_write_same16_cmnd()
805 clear_highpage(rq->special_vec.bv_page); in sd_setup_write_same16_cmnd()
806 rq->special_vec.bv_offset = 0; in sd_setup_write_same16_cmnd()
807 rq->special_vec.bv_len = data_len; in sd_setup_write_same16_cmnd()
808 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in sd_setup_write_same16_cmnd()
810 cmd->cmd_len = 16; in sd_setup_write_same16_cmnd()
811 cmd->cmnd[0] = WRITE_SAME_16; in sd_setup_write_same16_cmnd()
813 cmd->cmnd[1] = 0x8; /* UNMAP */ in sd_setup_write_same16_cmnd()
814 put_unaligned_be64(sector, &cmd->cmnd[2]); in sd_setup_write_same16_cmnd()
815 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); in sd_setup_write_same16_cmnd()
817 cmd->allowed = SD_MAX_RETRIES; in sd_setup_write_same16_cmnd()
818 cmd->transfersize = data_len; in sd_setup_write_same16_cmnd()
819 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; in sd_setup_write_same16_cmnd()
820 scsi_req(rq)->resid_len = data_len; in sd_setup_write_same16_cmnd()
827 struct scsi_device *sdp = cmd->device; in sd_setup_write_same10_cmnd() local
828 struct request *rq = cmd->request; in sd_setup_write_same10_cmnd()
829 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same10_cmnd()
830 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same10_cmnd()
831 u32 data_len = sdp->sector_size; in sd_setup_write_same10_cmnd()
833 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); in sd_setup_write_same10_cmnd()
834 if (!rq->special_vec.bv_page) in sd_setup_write_same10_cmnd()
836 clear_highpage(rq->special_vec.bv_page); in sd_setup_write_same10_cmnd()
837 rq->special_vec.bv_offset = 0; in sd_setup_write_same10_cmnd()
838 rq->special_vec.bv_len = data_len; in sd_setup_write_same10_cmnd()
839 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in sd_setup_write_same10_cmnd()
841 cmd->cmd_len = 10; in sd_setup_write_same10_cmnd()
842 cmd->cmnd[0] = WRITE_SAME; in sd_setup_write_same10_cmnd()
844 cmd->cmnd[1] = 0x8; /* UNMAP */ in sd_setup_write_same10_cmnd()
845 put_unaligned_be32(sector, &cmd->cmnd[2]); in sd_setup_write_same10_cmnd()
846 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); in sd_setup_write_same10_cmnd()
848 cmd->allowed = SD_MAX_RETRIES; in sd_setup_write_same10_cmnd()
849 cmd->transfersize = data_len; in sd_setup_write_same10_cmnd()
850 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; in sd_setup_write_same10_cmnd()
851 scsi_req(rq)->resid_len = data_len; in sd_setup_write_same10_cmnd()
858 struct request *rq = cmd->request; in sd_setup_write_zeroes_cmnd()
859 struct scsi_device *sdp = cmd->device; in sd_setup_write_zeroes_cmnd() local
860 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); in sd_setup_write_zeroes_cmnd()
861 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_zeroes_cmnd()
862 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_zeroes_cmnd()
864 if (!(rq->cmd_flags & REQ_NOUNMAP)) { in sd_setup_write_zeroes_cmnd()
865 switch (sdkp->zeroing_mode) { in sd_setup_write_zeroes_cmnd()
873 if (sdp->no_write_same) in sd_setup_write_zeroes_cmnd()
876 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) in sd_setup_write_zeroes_cmnd()
884 struct request_queue *q = sdkp->disk->queue; in sd_config_write_same()
885 unsigned int logical_block_size = sdkp->device->sector_size; in sd_config_write_same()
887 if (sdkp->device->no_write_same) { in sd_config_write_same()
888 sdkp->max_ws_blocks = 0; in sd_config_write_same()
897 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) in sd_config_write_same()
898 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, in sd_config_write_same()
900 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) in sd_config_write_same()
901 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, in sd_config_write_same()
904 sdkp->device->no_write_same = 1; in sd_config_write_same()
905 sdkp->max_ws_blocks = 0; in sd_config_write_same()
908 if (sdkp->lbprz && sdkp->lbpws) in sd_config_write_same()
909 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; in sd_config_write_same()
910 else if (sdkp->lbprz && sdkp->lbpws10) in sd_config_write_same()
911 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; in sd_config_write_same()
912 else if (sdkp->max_ws_blocks) in sd_config_write_same()
913 sdkp->zeroing_mode = SD_ZERO_WS; in sd_config_write_same()
915 sdkp->zeroing_mode = SD_ZERO_WRITE; in sd_config_write_same()
917 if (sdkp->max_ws_blocks && in sd_config_write_same()
918 sdkp->physical_block_size > logical_block_size) { in sd_config_write_same()
929 * sequential write required zones of host-managed ZBC disks. in sd_config_write_same()
931 sdkp->max_ws_blocks = in sd_config_write_same()
932 round_down(sdkp->max_ws_blocks, in sd_config_write_same()
933 bytes_to_logical(sdkp->device, in sd_config_write_same()
934 sdkp->physical_block_size)); in sd_config_write_same()
938 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * in sd_config_write_same()
940 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * in sd_config_write_same()
945 * sd_setup_write_same_cmnd - write the same data to multiple blocks
953 struct request *rq = cmd->request; in sd_setup_write_same_cmnd()
954 struct scsi_device *sdp = cmd->device; in sd_setup_write_same_cmnd() local
955 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); in sd_setup_write_same_cmnd()
956 struct bio *bio = rq->bio; in sd_setup_write_same_cmnd()
962 if (sdkp->device->no_write_same) in sd_setup_write_same_cmnd()
965 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); in sd_setup_write_same_cmnd()
967 sector >>= ilog2(sdp->sector_size) - 9; in sd_setup_write_same_cmnd()
968 nr_sectors >>= ilog2(sdp->sector_size) - 9; in sd_setup_write_same_cmnd()
970 rq->timeout = SD_WRITE_SAME_TIMEOUT; in sd_setup_write_same_cmnd()
972 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { in sd_setup_write_same_cmnd()
973 cmd->cmd_len = 16; in sd_setup_write_same_cmnd()
974 cmd->cmnd[0] = WRITE_SAME_16; in sd_setup_write_same_cmnd()
975 put_unaligned_be64(sector, &cmd->cmnd[2]); in sd_setup_write_same_cmnd()
976 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); in sd_setup_write_same_cmnd()
978 cmd->cmd_len = 10; in sd_setup_write_same_cmnd()
979 cmd->cmnd[0] = WRITE_SAME; in sd_setup_write_same_cmnd()
980 put_unaligned_be32(sector, &cmd->cmnd[2]); in sd_setup_write_same_cmnd()
981 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); in sd_setup_write_same_cmnd()
984 cmd->transfersize = sdp->sector_size; in sd_setup_write_same_cmnd()
985 cmd->allowed = SD_MAX_RETRIES; in sd_setup_write_same_cmnd()
997 rq->__data_len = sdp->sector_size; in sd_setup_write_same_cmnd()
999 rq->__data_len = nr_bytes; in sd_setup_write_same_cmnd()
1006 struct request *rq = cmd->request; in sd_setup_flush_cmnd()
1009 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); in sd_setup_flush_cmnd()
1011 cmd->cmnd[0] = SYNCHRONIZE_CACHE; in sd_setup_flush_cmnd()
1012 cmd->cmd_len = 10; in sd_setup_flush_cmnd()
1013 cmd->transfersize = 0; in sd_setup_flush_cmnd()
1014 cmd->allowed = SD_MAX_RETRIES; in sd_setup_flush_cmnd()
1016 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; in sd_setup_flush_cmnd()
1022 struct request *rq = SCpnt->request; in sd_setup_read_write_cmnd()
1023 struct scsi_device *sdp = SCpnt->device; in sd_setup_read_write_cmnd() local
1024 struct gendisk *disk = rq->rq_disk; in sd_setup_read_write_cmnd()
1036 WARN_ON_ONCE(SCpnt != rq->special); in sd_setup_read_write_cmnd()
1047 if (!sdp || !scsi_device_online(sdp) || in sd_setup_read_write_cmnd()
1057 if (sdp->changed) { in sd_setup_read_write_cmnd()
1067 * Some SD card readers can't handle multi-sector accesses which touch in sd_setup_read_write_cmnd()
1070 threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS * in sd_setup_read_write_cmnd()
1071 (sdp->sector_size / 512); in sd_setup_read_write_cmnd()
1073 if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) { in sd_setup_read_write_cmnd()
1076 this_count = threshold - block; in sd_setup_read_write_cmnd()
1079 this_count = sdp->sector_size / 512; in sd_setup_read_write_cmnd()
1088 * 512 byte sectors. In theory we could handle this - in fact in sd_setup_read_write_cmnd()
1092 * with the cdrom, since it is read-only. For performance in sd_setup_read_write_cmnd()
1097 if (sdp->sector_size == 1024) { in sd_setup_read_write_cmnd()
1107 if (sdp->sector_size == 2048) { in sd_setup_read_write_cmnd()
1117 if (sdp->sector_size == 4096) { in sd_setup_read_write_cmnd()
1128 SCpnt->cmnd[0] = WRITE_6; in sd_setup_read_write_cmnd()
1131 t10_pi_prepare(SCpnt->request, sdkp->protection_type); in sd_setup_read_write_cmnd()
1134 SCpnt->cmnd[0] = READ_6; in sd_setup_read_write_cmnd()
1147 dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type); in sd_setup_read_write_cmnd()
1154 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { in sd_setup_read_write_cmnd()
1155 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); in sd_setup_read_write_cmnd()
1157 if (unlikely(SCpnt->cmnd == NULL)) { in sd_setup_read_write_cmnd()
1162 SCpnt->cmd_len = SD_EXT_CDB_SIZE; in sd_setup_read_write_cmnd()
1163 memset(SCpnt->cmnd, 0, SCpnt->cmd_len); in sd_setup_read_write_cmnd()
1164 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; in sd_setup_read_write_cmnd()
1165 SCpnt->cmnd[7] = 0x18; in sd_setup_read_write_cmnd()
1166 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; in sd_setup_read_write_cmnd()
1167 SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); in sd_setup_read_write_cmnd()
1170 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; in sd_setup_read_write_cmnd()
1171 SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; in sd_setup_read_write_cmnd()
1172 SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; in sd_setup_read_write_cmnd()
1173 SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0; in sd_setup_read_write_cmnd()
1174 SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff; in sd_setup_read_write_cmnd()
1175 SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff; in sd_setup_read_write_cmnd()
1176 SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff; in sd_setup_read_write_cmnd()
1177 SCpnt->cmnd[19] = (unsigned char) block & 0xff; in sd_setup_read_write_cmnd()
1180 SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff; in sd_setup_read_write_cmnd()
1181 SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff; in sd_setup_read_write_cmnd()
1182 SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff; in sd_setup_read_write_cmnd()
1183 SCpnt->cmnd[23] = (unsigned char) block & 0xff; in sd_setup_read_write_cmnd()
1186 SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff; in sd_setup_read_write_cmnd()
1187 SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff; in sd_setup_read_write_cmnd()
1188 SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff; in sd_setup_read_write_cmnd()
1189 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; in sd_setup_read_write_cmnd()
1190 } else if (sdp->use_16_for_rw || (this_count > 0xffff)) { in sd_setup_read_write_cmnd()
1191 SCpnt->cmnd[0] += READ_16 - READ_6; in sd_setup_read_write_cmnd()
1192 SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); in sd_setup_read_write_cmnd()
1193 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; in sd_setup_read_write_cmnd()
1194 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; in sd_setup_read_write_cmnd()
1195 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; in sd_setup_read_write_cmnd()
1196 SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0; in sd_setup_read_write_cmnd()
1197 SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff; in sd_setup_read_write_cmnd()
1198 SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff; in sd_setup_read_write_cmnd()
1199 SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff; in sd_setup_read_write_cmnd()
1200 SCpnt->cmnd[9] = (unsigned char) block & 0xff; in sd_setup_read_write_cmnd()
1201 SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff; in sd_setup_read_write_cmnd()
1202 SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff; in sd_setup_read_write_cmnd()
1203 SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff; in sd_setup_read_write_cmnd()
1204 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; in sd_setup_read_write_cmnd()
1205 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; in sd_setup_read_write_cmnd()
1207 scsi_device_protection(SCpnt->device) || in sd_setup_read_write_cmnd()
1208 SCpnt->device->use_10_for_rw) { in sd_setup_read_write_cmnd()
1209 SCpnt->cmnd[0] += READ_10 - READ_6; in sd_setup_read_write_cmnd()
1210 SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); in sd_setup_read_write_cmnd()
1211 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; in sd_setup_read_write_cmnd()
1212 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; in sd_setup_read_write_cmnd()
1213 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; in sd_setup_read_write_cmnd()
1214 SCpnt->cmnd[5] = (unsigned char) block & 0xff; in sd_setup_read_write_cmnd()
1215 SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; in sd_setup_read_write_cmnd()
1216 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; in sd_setup_read_write_cmnd()
1217 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; in sd_setup_read_write_cmnd()
1219 if (unlikely(rq->cmd_flags & REQ_FUA)) { in sd_setup_read_write_cmnd()
1231 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); in sd_setup_read_write_cmnd()
1232 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); in sd_setup_read_write_cmnd()
1233 SCpnt->cmnd[3] = (unsigned char) block & 0xff; in sd_setup_read_write_cmnd()
1234 SCpnt->cmnd[4] = (unsigned char) this_count; in sd_setup_read_write_cmnd()
1235 SCpnt->cmnd[5] = 0; in sd_setup_read_write_cmnd()
1237 SCpnt->sdb.length = this_count * sdp->sector_size; in sd_setup_read_write_cmnd()
1244 SCpnt->transfersize = sdp->sector_size; in sd_setup_read_write_cmnd()
1245 SCpnt->underflow = this_count << 9; in sd_setup_read_write_cmnd()
1246 SCpnt->allowed = SD_MAX_RETRIES; in sd_setup_read_write_cmnd()
1259 struct request *rq = cmd->request; in sd_init_command()
1263 switch (scsi_disk(rq->rq_disk)->provisioning_mode) { in sd_init_command()
1296 struct request *rq = SCpnt->request; in sd_uninit_command()
1299 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in sd_uninit_command()
1300 mempool_free(rq->special_vec.bv_page, sd_page_pool); in sd_uninit_command()
1302 if (SCpnt->cmnd != scsi_req(rq)->cmd) { in sd_uninit_command()
1303 cmnd = SCpnt->cmnd; in sd_uninit_command()
1304 SCpnt->cmnd = NULL; in sd_uninit_command()
1305 SCpnt->cmd_len = 0; in sd_uninit_command()
1311 * sd_open - open a scsi disk device
1323 * Locking: called with bdev->bd_mutex held.
1327 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk); in sd_open()
1332 return -ENXIO; in sd_open()
1336 sdev = sdkp->device; in sd_open()
1342 retval = -ENXIO; in sd_open()
1346 if (sdev->removable || sdkp->write_prot) in sd_open()
1352 retval = -ENOMEDIUM; in sd_open()
1353 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) in sd_open()
1360 retval = -EROFS; in sd_open()
1361 if (sdkp->write_prot && (mode & FMODE_WRITE)) in sd_open()
1370 retval = -ENXIO; in sd_open()
1374 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { in sd_open()
1387 * sd_release - invoked when the (last) close(2) is called on this
1397 * Locking: called with bdev->bd_mutex held.
1402 struct scsi_device *sdev = sdkp->device; in sd_release()
1406 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { in sd_release()
1416 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); in sd_getgeo()
1417 struct scsi_device *sdp = sdkp->device; in sd_getgeo() local
1418 struct Scsi_Host *host = sdp->host; in sd_getgeo()
1419 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); in sd_getgeo()
1428 if (host->hostt->bios_param) in sd_getgeo()
1429 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); in sd_getgeo()
1433 geo->heads = diskinfo[0]; in sd_getgeo()
1434 geo->sectors = diskinfo[1]; in sd_getgeo()
1435 geo->cylinders = diskinfo[2]; in sd_getgeo()
1440 * sd_ioctl - process an ioctl
1456 struct gendisk *disk = bdev->bd_disk; in sd_ioctl()
1458 struct scsi_device *sdp = sdkp->device; in sd_ioctl() local
1463 "cmd=0x%x\n", disk->disk_name, cmd)); in sd_ioctl()
1475 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, in sd_ioctl()
1481 return sed_ioctl(sdkp->opal_dev, cmd, p); in sd_ioctl()
1491 error = scsi_ioctl(sdp, cmd, p); in sd_ioctl()
1495 if (error != -ENOTTY) in sd_ioctl()
1497 error = scsi_ioctl(sdp, cmd, p); in sd_ioctl()
1506 if (sdkp->media_present) in set_media_not_present()
1507 sdkp->device->changed = 1; in set_media_not_present()
1509 if (sdkp->device->removable) { in set_media_not_present()
1510 sdkp->media_present = 0; in set_media_not_present()
1511 sdkp->capacity = 0; in set_media_not_present()
1522 switch (sshdr->sense_key) { in media_not_present()
1526 if (sshdr->asc == 0x3A) { in media_not_present()
1535 * sd_check_events - check media events
1546 struct scsi_device *sdp; in sd_check_events() local
1552 sdp = sdkp->device; in sd_check_events()
1556 * If the device is offline, don't send any commands - just pretend as in sd_check_events()
1561 if (!scsi_device_online(sdp)) { in sd_check_events()
1568 * no cartridge loaded - NOT READY, drive with changed cartridge - in sd_check_events()
1569 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. in sd_check_events()
1575 if (scsi_block_when_processing_errors(sdp)) { in sd_check_events()
1578 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, in sd_check_events()
1595 if (!sdkp->media_present) in sd_check_events()
1596 sdp->changed = 1; in sd_check_events()
1597 sdkp->media_present = 1; in sd_check_events()
1600 * sdp->changed is set under the following conditions: in sd_check_events()
1605 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; in sd_check_events()
1606 sdp->changed = 0; in sd_check_events()
1614 struct scsi_device *sdp = sdkp->device; in sd_sync_cache() local
1615 const int timeout = sdp->request_queue->rq_timeout in sd_sync_cache()
1619 if (!scsi_device_online(sdp)) in sd_sync_cache()
1620 return -ENODEV; in sd_sync_cache()
1626 for (retries = 3; retries > 0; --retries) { in sd_sync_cache()
1634 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, in sd_sync_cache()
1648 (sshdr->asc == 0x3a || /* medium not present */ in sd_sync_cache()
1649 sshdr->asc == 0x20 || /* invalid command */ in sd_sync_cache()
1650 (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ in sd_sync_cache()
1664 return -EBUSY; in sd_sync_cache()
1666 return -EIO; in sd_sync_cache()
1676 revalidate_disk(sdkp->disk); in sd_rescan()
1688 struct gendisk *disk = bdev->bd_disk; in sd_compat_ioctl()
1690 struct scsi_device *sdev = sdkp->device; in sd_compat_ioctl()
1704 return sed_ioctl(sdkp->opal_dev, cmd, p); in sd_compat_ioctl()
1709 if (!sdev->host->hostt->compat_ioctl) in sd_compat_ioctl()
1710 return -ENOIOCTLCMD; in sd_compat_ioctl()
1711 return sdev->host->hostt->compat_ioctl(sdev, cmd, p); in sd_compat_ioctl()
1738 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; in sd_pr_command()
1769 return -EOPNOTSUPP; in sd_pr_register()
1779 return -EOPNOTSUPP; in sd_pr_reserve()
1824 * sd_eh_reset - reset error handling callback
1825 * @scmd: sd-issued command that has failed
1837 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); in sd_eh_reset()
1840 sdkp->ignore_medium_access_errors = false; in sd_eh_reset()
1844 * sd_eh_action - error handling callback
1845 * @scmd: sd-issued command that has failed
1857 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); in sd_eh_action()
1858 struct scsi_device *sdev = scmd->device; in sd_eh_action()
1862 host_byte(scmd->result) != DID_TIME_OUT || in sd_eh_action()
1873 if (!sdkp->ignore_medium_access_errors) { in sd_eh_action()
1874 sdkp->medium_access_timed_out++; in sd_eh_action()
1875 sdkp->ignore_medium_access_errors = true; in sd_eh_action()
1883 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { in sd_eh_action()
1886 mutex_lock(&sdev->state_mutex); in sd_eh_action()
1888 mutex_unlock(&sdev->state_mutex); in sd_eh_action()
1898 struct request *req = scmd->request; in sd_completed_bytes()
1899 struct scsi_device *sdev = scmd->device; in sd_completed_bytes()
1907 if (scsi_bufflen(scmd) <= sdev->sector_size) in sd_completed_bytes()
1911 if (!scsi_get_sense_info_fld(scmd->sense_buffer, in sd_completed_bytes()
1929 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); in sd_completed_bytes()
1934 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); in sd_completed_bytes()
1940 * sd_done - bottom half handler: called when the lower level
1942 * @SCpnt: mid-level's per command structure.
1948 int result = SCpnt->result; in sd_done()
1950 unsigned int sector_size = SCpnt->device->sector_size; in sd_done()
1953 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); in sd_done()
1954 struct request *req = SCpnt->request; in sd_done()
1992 if (resid & (sector_size - 1)) { in sd_done()
2007 sdkp->medium_access_timed_out = 0; in sd_done()
2026 SCpnt->result = 0; in sd_done()
2027 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in sd_done()
2040 switch (SCpnt->cmnd[0]) { in sd_done()
2046 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ in sd_done()
2049 sdkp->device->no_write_same = 1; in sd_done()
2051 req->rq_flags |= RQF_QUIET; in sd_done()
2069 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) && in sd_done()
2071 t10_pi_complete(SCpnt->request, sdkp->protection_type, in sd_done()
2078 * spinup disk - called only in sd_revalidate_disk()
2101 the_result = scsi_execute_req(sdkp->device, cmd, in sd_spinup_disk()
2135 if (sdkp->device->no_start_on_add) in sd_spinup_disk()
2156 if (sdkp->device->start_stop_pwr_cond) in sd_spinup_disk()
2158 scsi_execute_req(sdkp->device, cmd, DMA_NONE, in sd_spinup_disk()
2170 * Wait for USB flash devices with slow firmware. in sd_spinup_disk()
2208 struct scsi_device *sdp = sdkp->device; in sd_read_protection_type() local
2212 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { in sd_read_protection_type()
2213 sdkp->protection_type = 0; in sd_read_protection_type()
2220 ret = -ENODEV; in sd_read_protection_type()
2221 else if (scsi_host_dif_capable(sdp->host, type)) in sd_read_protection_type()
2224 if (sdkp->first_scan || type != sdkp->protection_type) in sd_read_protection_type()
2226 case -ENODEV: in sd_read_protection_type()
2241 sdkp->protection_type = type; in sd_read_protection_type()
2246 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, in read_capacity_error() argument
2256 * Set dirty bit for removable devices if not ready - in read_capacity_error()
2259 if (sdp->removable && in read_capacity_error()
2260 sense_valid && sshdr->sense_key == NOT_READY) in read_capacity_error()
2268 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ in read_capacity_error()
2286 u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); in sd_addressable_capacity()
2294 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, in read_capacity_16() argument
2306 if (sdp->no_read_capacity_16) in read_capacity_16()
2307 return -EINVAL; in read_capacity_16()
2316 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, in read_capacity_16()
2321 return -ENODEV; in read_capacity_16()
2332 return -EINVAL; in read_capacity_16()
2338 if (--reset_retries > 0) in read_capacity_16()
2341 retries--; in read_capacity_16()
2347 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); in read_capacity_16()
2348 return -EINVAL; in read_capacity_16()
2355 sdkp->capacity = 0; in read_capacity_16()
2356 return -ENODEV; in read_capacity_16()
2363 sdkp->capacity = 0; in read_capacity_16()
2364 return -EOVERFLOW; in read_capacity_16()
2368 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; in read_capacity_16()
2371 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; in read_capacity_16()
2375 blk_queue_alignment_offset(sdp->request_queue, alignment); in read_capacity_16()
2376 if (alignment && sdkp->first_scan) in read_capacity_16()
2381 sdkp->lbpme = 1; in read_capacity_16()
2384 sdkp->lbprz = 1; in read_capacity_16()
2389 sdkp->capacity = lba + 1; in read_capacity_16()
2393 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, in read_capacity_10() argument
2409 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, in read_capacity_10()
2414 return -ENODEV; in read_capacity_10()
2423 if (--reset_retries > 0) in read_capacity_10()
2426 retries--; in read_capacity_10()
2432 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); in read_capacity_10()
2433 return -EINVAL; in read_capacity_10()
2439 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { in read_capacity_10()
2440 /* Some buggy (usb cardreader) devices return an lba of in read_capacity_10()
2443 sdkp->capacity = 0; in read_capacity_10()
2444 sdkp->physical_block_size = sector_size; in read_capacity_10()
2452 sdkp->capacity = 0; in read_capacity_10()
2453 return -EOVERFLOW; in read_capacity_10()
2456 sdkp->capacity = lba + 1; in read_capacity_10()
2457 sdkp->physical_block_size = sector_size; in read_capacity_10()
2461 static int sd_try_rc16_first(struct scsi_device *sdp) in sd_try_rc16_first() argument
2463 if (sdp->host->max_cmd_len < 16) in sd_try_rc16_first()
2465 if (sdp->try_rc_10_first) in sd_try_rc16_first()
2467 if (sdp->scsi_level > SCSI_SPC_2) in sd_try_rc16_first()
2469 if (scsi_device_protection(sdp)) in sd_try_rc16_first()
2481 struct scsi_device *sdp = sdkp->device; in sd_read_capacity() local
2483 if (sd_try_rc16_first(sdp)) { in sd_read_capacity()
2484 sector_size = read_capacity_16(sdkp, sdp, buffer); in sd_read_capacity()
2485 if (sector_size == -EOVERFLOW) in sd_read_capacity()
2487 if (sector_size == -ENODEV) in sd_read_capacity()
2490 sector_size = read_capacity_10(sdkp, sdp, buffer); in sd_read_capacity()
2494 sector_size = read_capacity_10(sdkp, sdp, buffer); in sd_read_capacity()
2495 if (sector_size == -EOVERFLOW) in sd_read_capacity()
2499 if ((sizeof(sdkp->capacity) > 4) && in sd_read_capacity()
2500 (sdkp->capacity > 0xffffffffULL)) { in sd_read_capacity()
2504 sector_size = read_capacity_16(sdkp, sdp, buffer); in sd_read_capacity()
2508 sdkp->capacity = 1 + (sector_t) 0xffffffff; in sd_read_capacity()
2513 sdp->try_rc_10_first = 0; in sd_read_capacity()
2527 if (sdp->fix_capacity || in sd_read_capacity()
2528 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { in sd_read_capacity()
2531 (unsigned long long) sdkp->capacity); in sd_read_capacity()
2532 --sdkp->capacity; in sd_read_capacity()
2549 * The user might want to re-format the drive with in sd_read_capacity()
2554 sdkp->capacity = 0; in sd_read_capacity()
2563 blk_queue_logical_block_size(sdp->request_queue, sector_size); in sd_read_capacity()
2564 blk_queue_physical_block_size(sdp->request_queue, in sd_read_capacity()
2565 sdkp->physical_block_size); in sd_read_capacity()
2566 sdkp->device->sector_size = sector_size; in sd_read_capacity()
2568 if (sdkp->capacity > 0xffffffff) in sd_read_capacity()
2569 sdp->use_16_for_rw = 1; in sd_read_capacity()
2580 int sector_size = sdkp->device->sector_size; in sd_print_capacity()
2583 string_get_size(sdkp->capacity, sector_size, in sd_print_capacity()
2585 string_get_size(sdkp->capacity, sector_size, in sd_print_capacity()
2589 if (sdkp->first_scan || old_capacity != sdkp->capacity) { in sd_print_capacity()
2591 "%llu %d-byte logical blocks: (%s/%s)\n", in sd_print_capacity()
2592 (unsigned long long)sdkp->capacity, in sd_print_capacity()
2595 if (sdkp->physical_block_size != sector_size) in sd_print_capacity()
2597 "%u-byte physical blocks\n", in sd_print_capacity()
2598 sdkp->physical_block_size); in sd_print_capacity()
2606 sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage, in sd_do_mode_sense() argument
2610 return scsi_mode_sense(sdp, dbd, modepage, buffer, len, in sd_do_mode_sense()
2616 * read write protect setting, if possible - called only in sd_revalidate_disk()
2623 struct scsi_device *sdp = sdkp->device; in sd_read_write_protect_flag() local
2625 int old_wp = sdkp->write_prot; in sd_read_write_protect_flag()
2627 set_disk_ro(sdkp->disk, 0); in sd_read_write_protect_flag()
2628 if (sdp->skip_ms_page_3f) { in sd_read_write_protect_flag()
2633 if (sdp->use_192_bytes_for_3f) { in sd_read_write_protect_flag()
2634 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL); in sd_read_write_protect_flag()
2641 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL); in sd_read_write_protect_flag()
2650 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL); in sd_read_write_protect_flag()
2656 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255, in sd_read_write_protect_flag()
2664 sdkp->write_prot = ((data.device_specific & 0x80) != 0); in sd_read_write_protect_flag()
2665 set_disk_ro(sdkp->disk, sdkp->write_prot); in sd_read_write_protect_flag()
2666 if (sdkp->first_scan || old_wp != sdkp->write_prot) { in sd_read_write_protect_flag()
2668 sdkp->write_prot ? "on" : "off"); in sd_read_write_protect_flag()
2675 * sd_read_cache_type - called only from sd_revalidate_disk()
2682 struct scsi_device *sdp = sdkp->device; in sd_read_cache_type() local
2689 int old_wce = sdkp->WCE; in sd_read_cache_type()
2690 int old_rcd = sdkp->RCD; in sd_read_cache_type()
2691 int old_dpofua = sdkp->DPOFUA; in sd_read_cache_type()
2694 if (sdkp->cache_override) in sd_read_cache_type()
2698 if (sdp->skip_ms_page_8) { in sd_read_cache_type()
2699 if (sdp->type == TYPE_RBC) in sd_read_cache_type()
2702 if (sdp->skip_ms_page_3f) in sd_read_cache_type()
2705 if (sdp->use_192_bytes_for_3f) in sd_read_cache_type()
2709 } else if (sdp->type == TYPE_RBC) { in sd_read_cache_type()
2718 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len, in sd_read_cache_type()
2745 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) in sd_read_cache_type()
2750 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, in sd_read_cache_type()
2763 if (len - offset <= 2) { in sd_read_cache_type()
2774 if (spf && len - offset > 3) in sd_read_cache_type()
2777 else if (!spf && len - offset > 1) in sd_read_cache_type()
2793 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); in sd_read_cache_type()
2794 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); in sd_read_cache_type()
2796 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); in sd_read_cache_type()
2797 sdkp->RCD = 0; in sd_read_cache_type()
2800 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; in sd_read_cache_type()
2801 if (sdp->broken_fua) { in sd_read_cache_type()
2803 sdkp->DPOFUA = 0; in sd_read_cache_type()
2804 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && in sd_read_cache_type()
2805 !sdkp->device->use_16_for_rw) { in sd_read_cache_type()
2808 sdkp->DPOFUA = 0; in sd_read_cache_type()
2812 if (sdkp->WCE && sdkp->write_prot) in sd_read_cache_type()
2813 sdkp->WCE = 0; in sd_read_cache_type()
2815 if (sdkp->first_scan || old_wce != sdkp->WCE || in sd_read_cache_type()
2816 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) in sd_read_cache_type()
2819 sdkp->WCE ? "enabled" : "disabled", in sd_read_cache_type()
2820 sdkp->RCD ? "disabled" : "enabled", in sd_read_cache_type()
2821 sdkp->DPOFUA ? "supports DPO and FUA" in sd_read_cache_type()
2838 if (sdp->wce_default_on) { in sd_read_cache_type()
2841 sdkp->WCE = 1; in sd_read_cache_type()
2845 sdkp->WCE = 0; in sd_read_cache_type()
2847 sdkp->RCD = 0; in sd_read_cache_type()
2848 sdkp->DPOFUA = 0; in sd_read_cache_type()
2858 struct scsi_device *sdp = sdkp->device; in sd_read_app_tag_own() local
2862 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) in sd_read_app_tag_own()
2865 if (sdkp->protection_type == 0) in sd_read_app_tag_own()
2868 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, in sd_read_app_tag_own()
2892 sdkp->ATO = 1; in sd_read_app_tag_own()
2898 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2903 unsigned int sector_sz = sdkp->device->sector_size; in sd_read_block_limits()
2909 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) in sd_read_block_limits()
2912 blk_queue_io_min(sdkp->disk->queue, in sd_read_block_limits()
2915 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); in sd_read_block_limits()
2916 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); in sd_read_block_limits()
2921 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); in sd_read_block_limits()
2923 if (!sdkp->lbpme) in sd_read_block_limits()
2930 sdkp->max_unmap_blocks = lba_count; in sd_read_block_limits()
2932 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); in sd_read_block_limits()
2935 sdkp->unmap_alignment = in sd_read_block_limits()
2938 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ in sd_read_block_limits()
2940 if (sdkp->max_unmap_blocks) in sd_read_block_limits()
2946 if (sdkp->lbpu && sdkp->max_unmap_blocks) in sd_read_block_limits()
2948 else if (sdkp->lbpws) in sd_read_block_limits()
2950 else if (sdkp->lbpws10) in sd_read_block_limits()
2962 * sd_read_block_characteristics - Query block dev. characteristics
2967 struct request_queue *q = sdkp->disk->queue; in sd_read_block_characteristics()
2976 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len)) in sd_read_block_characteristics()
2986 if (sdkp->device->type == TYPE_ZBC) { in sd_read_block_characteristics()
2987 /* Host-managed */ in sd_read_block_characteristics()
2988 q->limits.zoned = BLK_ZONED_HM; in sd_read_block_characteristics()
2990 sdkp->zoned = (buffer[8] >> 4) & 3; in sd_read_block_characteristics()
2991 if (sdkp->zoned == 1) in sd_read_block_characteristics()
2992 /* Host-aware */ in sd_read_block_characteristics()
2993 q->limits.zoned = BLK_ZONED_HA; in sd_read_block_characteristics()
2996 * Treat drive-managed devices as in sd_read_block_characteristics()
2999 q->limits.zoned = BLK_ZONED_NONE; in sd_read_block_characteristics()
3001 if (blk_queue_is_zoned(q) && sdkp->first_scan) in sd_read_block_characteristics()
3002 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", in sd_read_block_characteristics()
3003 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); in sd_read_block_characteristics()
3010 * sd_read_block_provisioning - Query provisioning VPD page
3018 if (sdkp->lbpme == 0) in sd_read_block_provisioning()
3023 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) in sd_read_block_provisioning()
3026 sdkp->lbpvpd = 1; in sd_read_block_provisioning()
3027 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ in sd_read_block_provisioning()
3028 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ in sd_read_block_provisioning()
3029 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ in sd_read_block_provisioning()
3037 struct scsi_device *sdev = sdkp->device; in sd_read_write_same()
3039 if (sdev->host->no_write_same) { in sd_read_write_same()
3040 sdev->no_write_same = 1; in sd_read_write_same()
3049 sdev->no_report_opcodes = 1; in sd_read_write_same()
3056 sdev->no_write_same = 1; in sd_read_write_same()
3060 sdkp->ws16 = 1; in sd_read_write_same()
3063 sdkp->ws10 = 1; in sd_read_write_same()
3068 struct scsi_device *sdev = sdkp->device; in sd_read_security()
3070 if (!sdev->security_supported) in sd_read_security()
3077 sdkp->security = 1; in sd_read_security()
3088 struct scsi_device *sdp = sdkp->device; in sd_validate_opt_xfer_size() local
3090 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); in sd_validate_opt_xfer_size()
3092 if (sdkp->opt_xfer_blocks == 0) in sd_validate_opt_xfer_size()
3095 if (sdkp->opt_xfer_blocks > dev_max) { in sd_validate_opt_xfer_size()
3099 sdkp->opt_xfer_blocks, dev_max); in sd_validate_opt_xfer_size()
3103 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { in sd_validate_opt_xfer_size()
3107 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); in sd_validate_opt_xfer_size()
3119 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { in sd_validate_opt_xfer_size()
3123 opt_xfer_bytes, sdkp->physical_block_size); in sd_validate_opt_xfer_size()
3133 * sd_revalidate_disk - called the first time a new disk is seen,
3140 struct scsi_device *sdp = sdkp->device; in sd_revalidate_disk() local
3141 struct request_queue *q = sdkp->disk->queue; in sd_revalidate_disk()
3142 sector_t old_capacity = sdkp->capacity; in sd_revalidate_disk()
3153 if (!scsi_device_online(sdp)) in sd_revalidate_disk()
3169 if (sdkp->media_present) { in sd_revalidate_disk()
3173 * set the default to rotational. All non-rotational devices in sd_revalidate_disk()
3181 if (scsi_device_supports_vpd(sdp)) { in sd_revalidate_disk()
3204 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; in sd_revalidate_disk()
3207 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); in sd_revalidate_disk()
3208 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); in sd_revalidate_disk()
3211 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); in sd_revalidate_disk()
3212 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); in sd_revalidate_disk()
3214 q->limits.io_opt = 0; in sd_revalidate_disk()
3215 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), in sd_revalidate_disk()
3226 if (sdkp->first_scan || in sd_revalidate_disk()
3227 q->limits.max_sectors > q->limits.max_dev_sectors || in sd_revalidate_disk()
3228 q->limits.max_sectors > q->limits.max_hw_sectors) in sd_revalidate_disk()
3229 q->limits.max_sectors = rw_max; in sd_revalidate_disk()
3231 sdkp->first_scan = 0; in sd_revalidate_disk()
3233 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); in sd_revalidate_disk()
3242 * sd_unlock_native_capacity - unlock native capacity
3247 * implements ->unlock_native_capacity() method, it's invoked to
3255 struct scsi_device *sdev = scsi_disk(disk)->device; in sd_unlock_native_capacity()
3257 if (sdev->host->hostt->unlock_native_capacity) in sd_unlock_native_capacity()
3258 sdev->host->hostt->unlock_native_capacity(sdev); in sd_unlock_native_capacity()
3262 * sd_format_disk_name - format disk name
3263 * @prefix: name prefix - ie. "sd" for SCSI disks
3275 * index shifted -1 after each digit is computed.
3281 * 0 on success, -errno on failure.
3285 const int base = 'z' - 'a' + 1; in sd_format_disk_name()
3291 p = end - 1; in sd_format_disk_name()
3296 return -EINVAL; in sd_format_disk_name()
3297 *--p = 'a' + (index % unit); in sd_format_disk_name()
3298 index = (index / unit) - 1; in sd_format_disk_name()
3301 memmove(begin, p, end - p); in sd_format_disk_name()
3313 struct scsi_device *sdp; in sd_probe_async() local
3318 sdp = sdkp->device; in sd_probe_async()
3319 gd = sdkp->disk; in sd_probe_async()
3320 index = sdkp->index; in sd_probe_async()
3321 dev = &sdp->sdev_gendev; in sd_probe_async()
3323 gd->major = sd_major((index & 0xf0) >> 4); in sd_probe_async()
3324 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); in sd_probe_async()
3326 gd->fops = &sd_fops; in sd_probe_async()
3327 gd->private_data = &sdkp->driver; in sd_probe_async()
3328 gd->queue = sdkp->device->request_queue; in sd_probe_async()
3331 sdp->sector_size = 512; in sd_probe_async()
3332 sdkp->capacity = 0; in sd_probe_async()
3333 sdkp->media_present = 1; in sd_probe_async()
3334 sdkp->write_prot = 0; in sd_probe_async()
3335 sdkp->cache_override = 0; in sd_probe_async()
3336 sdkp->WCE = 0; in sd_probe_async()
3337 sdkp->RCD = 0; in sd_probe_async()
3338 sdkp->ATO = 0; in sd_probe_async()
3339 sdkp->first_scan = 1; in sd_probe_async()
3340 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; in sd_probe_async()
3344 gd->flags = GENHD_FL_EXT_DEVT; in sd_probe_async()
3345 if (sdp->removable) { in sd_probe_async()
3346 gd->flags |= GENHD_FL_REMOVABLE; in sd_probe_async()
3347 gd->events |= DISK_EVENT_MEDIA_CHANGE; in sd_probe_async()
3350 blk_pm_runtime_init(sdp->request_queue, dev); in sd_probe_async()
3352 if (sdkp->capacity) in sd_probe_async()
3357 if (sdkp->security) { in sd_probe_async()
3358 sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit); in sd_probe_async()
3359 if (sdkp->opal_dev) in sd_probe_async()
3364 sdp->removable ? "removable " : ""); in sd_probe_async()
3365 scsi_autopm_put_device(sdp); in sd_probe_async()
3366 put_device(&sdkp->dev); in sd_probe_async()
3370 * sd_probe - called during driver initialization and whenever a
3378 * Note: this function is invoked from the scsi mid-level.
3380 * <host,channel,id,lun> (found in sdp) and new device name
3384 * Assume sd_probe is not re-entrant (for time being)
3389 struct scsi_device *sdp = to_scsi_device(dev); in sd_probe() local
3395 scsi_autopm_get_device(sdp); in sd_probe()
3396 error = -ENODEV; in sd_probe()
3397 if (sdp->type != TYPE_DISK && in sd_probe()
3398 sdp->type != TYPE_ZBC && in sd_probe()
3399 sdp->type != TYPE_MOD && in sd_probe()
3400 sdp->type != TYPE_RBC) in sd_probe()
3404 if (sdp->type == TYPE_ZBC) in sd_probe()
3407 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, in sd_probe()
3410 error = -ENOMEM; in sd_probe()
3421 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); in sd_probe()
3425 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); in sd_probe()
3427 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); in sd_probe()
3431 sdkp->device = sdp; in sd_probe()
3432 sdkp->driver = &sd_template; in sd_probe()
3433 sdkp->disk = gd; in sd_probe()
3434 sdkp->index = index; in sd_probe()
3435 atomic_set(&sdkp->openers, 0); in sd_probe()
3436 atomic_set(&sdkp->device->ioerr_cnt, 0); in sd_probe()
3438 if (!sdp->request_queue->rq_timeout) { in sd_probe()
3439 if (sdp->type != TYPE_MOD) in sd_probe()
3440 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); in sd_probe()
3442 blk_queue_rq_timeout(sdp->request_queue, in sd_probe()
3446 device_initialize(&sdkp->dev); in sd_probe()
3447 sdkp->dev.parent = dev; in sd_probe()
3448 sdkp->dev.class = &sd_disk_class; in sd_probe()
3449 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); in sd_probe()
3451 error = device_add(&sdkp->dev); in sd_probe()
3458 get_device(&sdkp->dev); /* prevent release before async_schedule */ in sd_probe()
3470 scsi_autopm_put_device(sdp); in sd_probe()
3475 * sd_remove - called whenever a scsi disk (previously recognized by
3480 * Note: this function is invoked from the scsi mid-level.
3482 * that could be re-used by a subsequent sd_probe().
3483 * This function is not called when the built-in sd driver is "exit-ed".
3491 devt = disk_devt(sdkp->disk); in sd_remove()
3492 scsi_autopm_get_device(sdkp->device); in sd_remove()
3496 device_del(&sdkp->dev); in sd_remove()
3497 del_gendisk(sdkp->disk); in sd_remove()
3502 free_opal_dev(sdkp->opal_dev); in sd_remove()
3509 put_device(&sdkp->dev); in sd_remove()
3516 * scsi_disk_release - Called to free the scsi_disk structure
3527 struct gendisk *disk = sdkp->disk; in scsi_disk_release()
3528 struct request_queue *q = disk->queue; in scsi_disk_release()
3530 ida_free(&sd_index_ida, sdkp->index); in scsi_disk_release()
3535 * due to clearing the disk->private_data pointer. Wait from inside in scsi_disk_release()
3543 disk->private_data = NULL; in scsi_disk_release()
3545 put_device(&sdkp->device->sdev_gendev); in scsi_disk_release()
3554 struct scsi_device *sdp = sdkp->device; in sd_start_stop_device() local
3560 if (sdp->start_stop_pwr_cond) in sd_start_stop_device()
3563 if (!scsi_device_online(sdp)) in sd_start_stop_device()
3564 return -ENODEV; in sd_start_stop_device()
3566 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, in sd_start_stop_device()
3580 return -EIO; in sd_start_stop_device()
3600 if (sdkp->WCE && sdkp->media_present) { in sd_shutdown()
3605 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { in sd_shutdown()
3620 if (sdkp->WCE && sdkp->media_present) { in sd_suspend_common()
3626 if (ret == -ENODEV) in sd_suspend_common()
3642 if (sdkp->device->manage_start_stop) { in sd_suspend_common()
3671 if (!sdkp->device->manage_start_stop) in sd_resume()
3677 opal_unlock_from_suspend(sdkp->opal_dev); in sd_resume()
3682 * init_sd - entry point for this driver (both when built in or when
3685 * Note: this function registers this driver with the scsi mid-level.
3702 return -ENODEV; in init_sd()
3712 err = -ENOMEM; in init_sd()
3719 err = -ENOMEM; in init_sd()
3726 err = -ENOMEM; in init_sd()
3754 * exit_sd - exit point for this driver (when it is a module).
3756 * Note: this function unregisters this driver from the scsi mid-level.
3783 scsi_print_sense_hdr(sdkp->device, in sd_print_sense_hdr()
3784 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); in sd_print_sense_hdr()