Lines Matching +full:tf +full:- +full:a
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-scsi.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
9 * as Documentation/driver-api/libata.rst
12 * - http://www.t10.org/
13 * - http://www.t13.org/
38 #include "libata-transport.h"
66 RW_RECOVERY_MPAGE_LEN - 2,
76 CACHE_MPAGE_LEN - 2,
85 CONTROL_MPAGE_LEN - 2,
87 0, /* [QAM+QERR may be 1, see 05-359r1] */
89 0, 30 /* extended self test time, see 05-359r1 */
103 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_show()
105 spin_lock_irq(ap->lock); in ata_scsi_park_show()
108 rc = -ENODEV; in ata_scsi_park_show()
111 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_show()
112 rc = -EOPNOTSUPP; in ata_scsi_park_show()
116 link = dev->link; in ata_scsi_park_show()
118 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && in ata_scsi_park_show()
119 link->eh_context.unloaded_mask & (1 << dev->devno) && in ata_scsi_park_show()
120 time_after(dev->unpark_deadline, now)) in ata_scsi_park_show()
121 msecs = jiffies_to_msecs(dev->unpark_deadline - now); in ata_scsi_park_show()
126 spin_unlock_irq(ap->lock); in ata_scsi_park_show()
145 if (input < -2) in ata_scsi_park_store()
146 return -EINVAL; in ata_scsi_park_store()
148 rc = -EOVERFLOW; in ata_scsi_park_store()
152 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_store()
154 spin_lock_irqsave(ap->lock, flags); in ata_scsi_park_store()
157 rc = -ENODEV; in ata_scsi_park_store()
160 if (dev->class != ATA_DEV_ATA && in ata_scsi_park_store()
161 dev->class != ATA_DEV_ZAC) { in ata_scsi_park_store()
162 rc = -EOPNOTSUPP; in ata_scsi_park_store()
167 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_store()
168 rc = -EOPNOTSUPP; in ata_scsi_park_store()
172 dev->unpark_deadline = ata_deadline(jiffies, input); in ata_scsi_park_store()
173 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; in ata_scsi_park_store()
175 complete(&ap->park_req_pending); in ata_scsi_park_store()
178 case -1: in ata_scsi_park_store()
179 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
181 case -2: in ata_scsi_park_store()
182 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_park_store()
214 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_scsi_set_sense()
221 const struct ata_taskfile *tf) in ata_scsi_set_sense_information() argument
225 information = ata_tf_read_block(tf, dev); in ata_scsi_set_sense_information()
229 scsi_set_sense_information(cmd->sense_buffer, in ata_scsi_set_sense_information()
238 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_field()
247 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_parameter()
267 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
274 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
297 * ata_scsi_unlock_native_capacity - unlock native capacity
300 * This function is called if a partition on @sdev extends beyond
308 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_unlock_native_capacity()
312 spin_lock_irqsave(ap->lock, flags); in ata_scsi_unlock_native_capacity()
315 if (dev && dev->n_sectors < dev->n_native_sectors) { in ata_scsi_unlock_native_capacity()
316 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_scsi_unlock_native_capacity()
317 dev->link->eh_info.action |= ATA_EH_RESET; in ata_scsi_unlock_native_capacity()
321 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_unlock_native_capacity()
327 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
346 return -ENOMSG; in ata_get_identity()
348 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) in ata_get_identity()
349 return -EFAULT; in ata_get_identity()
351 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); in ata_get_identity()
353 return -EFAULT; in ata_get_identity()
355 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); in ata_get_identity()
357 return -EFAULT; in ata_get_identity()
359 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); in ata_get_identity()
361 return -EFAULT; in ata_get_identity()
367 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
393 return -EINVAL; in ata_cmd_ioctl()
396 return -EFAULT; in ata_cmd_ioctl()
405 rc = -ENOMEM; in ata_cmd_ioctl()
409 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ in ata_cmd_ioctl()
413 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_cmd_ioctl()
420 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ in ata_cmd_ioctl()
441 /* If we set cc then ATA pass-through will cause a in ata_cmd_ioctl()
449 /* Send userspace a few ATA registers (same as drivers/ide) */ in ata_cmd_ioctl()
456 rc = -EFAULT; in ata_cmd_ioctl()
462 rc = -EIO; in ata_cmd_ioctl()
468 rc = -EFAULT; in ata_cmd_ioctl()
475 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
500 return -EINVAL; in ata_task_ioctl()
503 return -EFAULT; in ata_task_ioctl()
508 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_task_ioctl()
529 /* If we set cc then ATA pass-through will cause a in ata_task_ioctl()
548 rc = -EFAULT; in ata_task_ioctl()
553 rc = -EIO; in ata_task_ioctl()
563 if (ap->flags & ATA_FLAG_PIO_DMA) in ata_ioc32()
565 if (ap->pflags & ATA_PFLAG_PIO32) in ata_ioc32()
572 * here must have a compatible argument, or check in_compat_syscall()
578 int rc = -EINVAL; in ata_sas_scsi_ioctl()
583 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
585 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
595 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
596 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { in ata_sas_scsi_ioctl()
598 ap->pflags |= ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
600 ap->pflags &= ~ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
603 rc = -EINVAL; in ata_sas_scsi_ioctl()
605 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
613 return -EACCES; in ata_sas_scsi_ioctl()
618 return -EACCES; in ata_sas_scsi_ioctl()
622 rc = -ENOTTY; in ata_sas_scsi_ioctl()
633 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), in ata_scsi_ioctl()
639 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
643 * Obtain a reference to an unused ata_queued_cmd structure,
644 * which is the basic libata structure representing a single
647 * If a command was available, fill in the SCSI-specific
660 struct ata_port *ap = dev->link->ap; in ata_scsi_qc_new()
667 if (ap->flags & ATA_FLAG_SAS_HOST) { in ata_scsi_qc_new()
670 * unique per-device budget token as a tag. in ata_scsi_qc_new()
672 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) in ata_scsi_qc_new()
674 tag = cmd->budget_token; in ata_scsi_qc_new()
676 tag = scsi_cmd_to_rq(cmd)->tag; in ata_scsi_qc_new()
680 qc->tag = qc->hw_tag = tag; in ata_scsi_qc_new()
681 qc->ap = ap; in ata_scsi_qc_new()
682 qc->dev = dev; in ata_scsi_qc_new()
686 qc->scsicmd = cmd; in ata_scsi_qc_new()
687 qc->scsidone = scsi_done; in ata_scsi_qc_new()
689 qc->sg = scsi_sglist(cmd); in ata_scsi_qc_new()
690 qc->n_elem = scsi_sg_count(cmd); in ata_scsi_qc_new()
692 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) in ata_scsi_qc_new()
693 qc->flags |= ATA_QCFLAG_QUIET; in ata_scsi_qc_new()
706 struct scsi_cmnd *scmd = qc->scsicmd; in ata_qc_set_pc_nbytes()
708 qc->extrabytes = scmd->extra_len; in ata_qc_set_pc_nbytes()
709 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; in ata_qc_set_pc_nbytes()
713 * ata_to_sense_error - convert ATA error to SCSI error
721 * Converts an ATA error into a SCSI error. Fill out pointers to
756 /* TRK0 - Track 0 not found */ in ata_to_sense_error()
763 /* SRV/IDNF - ID not found */ in ata_to_sense_error()
766 /* MC - Media Changed */ in ata_to_sense_error()
769 /* ECC - Uncorrectable ECC error */ in ata_to_sense_error()
772 /* BBD - block marked bad */ in ata_to_sense_error()
828 * We need a sensible error return here, which is tricky, and one in ata_to_sense_error()
829 * that won't cause people to do things like return a disk wrongly. in ata_to_sense_error()
837 * ata_gen_passthru_sense - Generate check condition sense block.
842 * of whether the command errored or not, return a sense
847 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
855 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_passthru_sense()
856 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_passthru_sense() local
857 unsigned char *sb = cmd->sense_buffer; in ata_gen_passthru_sense()
867 if (qc->err_mask || in ata_gen_passthru_sense()
868 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_passthru_sense()
869 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, in ata_gen_passthru_sense()
871 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); in ata_gen_passthru_sense()
874 * ATA PASS-THROUGH INFORMATION AVAILABLE in ata_gen_passthru_sense()
880 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { in ata_gen_passthru_sense()
898 desc[3] = tf->error; in ata_gen_passthru_sense()
899 desc[5] = tf->nsect; in ata_gen_passthru_sense()
900 desc[7] = tf->lbal; in ata_gen_passthru_sense()
901 desc[9] = tf->lbam; in ata_gen_passthru_sense()
902 desc[11] = tf->lbah; in ata_gen_passthru_sense()
903 desc[12] = tf->device; in ata_gen_passthru_sense()
904 desc[13] = tf->status; in ata_gen_passthru_sense()
910 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
912 desc[4] = tf->hob_nsect; in ata_gen_passthru_sense()
913 desc[6] = tf->hob_lbal; in ata_gen_passthru_sense()
914 desc[8] = tf->hob_lbam; in ata_gen_passthru_sense()
915 desc[10] = tf->hob_lbah; in ata_gen_passthru_sense()
919 desc[0] = tf->error; in ata_gen_passthru_sense()
920 desc[1] = tf->status; in ata_gen_passthru_sense()
921 desc[2] = tf->device; in ata_gen_passthru_sense()
922 desc[3] = tf->nsect; in ata_gen_passthru_sense()
924 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
926 if (tf->hob_nsect) in ata_gen_passthru_sense()
928 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) in ata_gen_passthru_sense()
931 desc[9] = tf->lbal; in ata_gen_passthru_sense()
932 desc[10] = tf->lbam; in ata_gen_passthru_sense()
933 desc[11] = tf->lbah; in ata_gen_passthru_sense()
938 * ata_gen_ata_sense - generate a SCSI fixed sense block
941 * Generate sense block for a failed ATA command @qc. Descriptor
949 struct ata_device *dev = qc->dev; in ata_gen_ata_sense()
950 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_ata_sense()
951 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_ata_sense() local
952 unsigned char *sb = cmd->sense_buffer; in ata_gen_ata_sense()
967 if (qc->err_mask || in ata_gen_ata_sense()
968 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_ata_sense()
969 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, in ata_gen_ata_sense()
975 tf->status, qc->err_mask); in ata_gen_ata_sense()
980 block = ata_tf_read_block(&qc->result_tf, dev); in ata_gen_ata_sense()
989 sdev->use_10_for_rw = 1; in ata_scsi_sdev_config()
990 sdev->use_10_for_ms = 1; in ata_scsi_sdev_config()
991 sdev->no_write_same = 1; in ata_scsi_sdev_config()
993 /* Schedule policy is determined by ->qc_defer() callback and in ata_scsi_sdev_config()
998 sdev->max_device_blocked = 1; in ata_scsi_sdev_config()
1002 * ata_scsi_dma_need_drain - Check whether data transfer may overflow
1020 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; in ata_scsi_dma_need_drain()
1026 struct request_queue *q = sdev->request_queue; in ata_scsi_dev_config()
1029 if (!ata_id_has_unload(dev->id)) in ata_scsi_dev_config()
1030 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_dev_config()
1033 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); in ata_scsi_dev_config()
1034 blk_queue_max_hw_sectors(q, dev->max_sectors); in ata_scsi_dev_config()
1036 if (dev->class == ATA_DEV_ATAPI) { in ata_scsi_dev_config()
1037 sdev->sector_size = ATA_SECT_SIZE; in ata_scsi_dev_config()
1040 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); in ata_scsi_dev_config()
1043 blk_queue_max_segments(q, queue_max_segments(q) - 1); in ata_scsi_dev_config()
1045 sdev->dma_drain_len = ATAPI_MAX_DRAIN; in ata_scsi_dev_config()
1046 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); in ata_scsi_dev_config()
1047 if (!sdev->dma_drain_buf) { in ata_scsi_dev_config()
1049 return -ENOMEM; in ata_scsi_dev_config()
1052 sdev->sector_size = ata_id_logical_sector_size(dev->id); in ata_scsi_dev_config()
1063 sdev->manage_runtime_start_stop = 1; in ata_scsi_dev_config()
1064 sdev->manage_shutdown = 1; in ata_scsi_dev_config()
1065 sdev->force_runtime_start_on_system_start = 1; in ata_scsi_dev_config()
1075 if (sdev->sector_size > PAGE_SIZE) in ata_scsi_dev_config()
1078 sdev->sector_size); in ata_scsi_dev_config()
1080 blk_queue_update_dma_alignment(q, sdev->sector_size - 1); in ata_scsi_dev_config()
1082 if (dev->flags & ATA_DFLAG_AN) in ata_scsi_dev_config()
1083 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); in ata_scsi_dev_config()
1086 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); in ata_scsi_dev_config()
1090 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsi_dev_config()
1091 sdev->security_supported = 1; in ata_scsi_dev_config()
1093 dev->sdev = sdev; in ata_scsi_dev_config()
1098 * ata_scsi_slave_alloc - Early setup of SCSI device
1102 * associated with an ATA device is scanned on a port.
1110 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_alloc()
1116 * Create a link from the ata_port device to the scsi device to ensure in ata_scsi_slave_alloc()
1120 link = device_link_add(&sdev->sdev_gendev, &ap->tdev, in ata_scsi_slave_alloc()
1125 dev_name(&sdev->sdev_gendev)); in ata_scsi_slave_alloc()
1126 return -ENODEV; in ata_scsi_slave_alloc()
1134 * ata_scsi_slave_config - Set SCSI device attributes
1139 * SCSI mid-layer behaviors.
1147 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_config()
1158 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
1163 * dev->sdev, this function doesn't have to do anything.
1164 * Otherwise, SCSI layer initiated warm-unplug is in progress.
1165 * Clear dev->sdev, schedule the device for ATA detach and invoke
1173 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_destroy()
1177 device_link_remove(&sdev->sdev_gendev, &ap->tdev); in ata_scsi_slave_destroy()
1179 spin_lock_irqsave(ap->lock, flags); in ata_scsi_slave_destroy()
1181 if (dev && dev->sdev) { in ata_scsi_slave_destroy()
1183 dev->sdev = NULL; in ata_scsi_slave_destroy()
1184 dev->flags |= ATA_DFLAG_DETACH; in ata_scsi_slave_destroy()
1187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_slave_destroy()
1189 kfree(sdev->dma_drain_buf); in ata_scsi_slave_destroy()
1194 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1206 * Zero on success, non-zero on error.
1210 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_start_stop_xlat()
1211 struct ata_taskfile *tf = &qc->tf; in ata_scsi_start_stop_xlat() local
1212 const u8 *cdb = scmd->cmnd; in ata_scsi_start_stop_xlat()
1216 if (scmd->cmd_len < 5) { in ata_scsi_start_stop_xlat()
1221 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_scsi_start_stop_xlat()
1222 tf->protocol = ATA_PROT_NODATA; in ata_scsi_start_stop_xlat()
1224 ; /* ignore IMMED bit, violates sat-r05 */ in ata_scsi_start_stop_xlat()
1238 tf->nsect = 1; /* 1 sector, lba=0 */ in ata_scsi_start_stop_xlat()
1240 if (qc->dev->flags & ATA_DFLAG_LBA) { in ata_scsi_start_stop_xlat()
1241 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_start_stop_xlat()
1243 tf->lbah = 0x0; in ata_scsi_start_stop_xlat()
1244 tf->lbam = 0x0; in ata_scsi_start_stop_xlat()
1245 tf->lbal = 0x0; in ata_scsi_start_stop_xlat()
1246 tf->device |= ATA_LBA; in ata_scsi_start_stop_xlat()
1249 tf->lbal = 0x1; /* sect */ in ata_scsi_start_stop_xlat()
1250 tf->lbam = 0x0; /* cyl low */ in ata_scsi_start_stop_xlat()
1251 tf->lbah = 0x0; /* cyl high */ in ata_scsi_start_stop_xlat()
1254 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ in ata_scsi_start_stop_xlat()
1259 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && in ata_scsi_start_stop_xlat()
1263 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && in ata_scsi_start_stop_xlat()
1268 tf->command = ATA_CMD_STANDBYNOW1; in ata_scsi_start_stop_xlat()
1281 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_start_stop_xlat()
1284 scmd->result = SAM_STAT_GOOD; in ata_scsi_start_stop_xlat()
1290 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1300 * Zero on success, non-zero on error.
1304 struct ata_taskfile *tf = &qc->tf; in ata_scsi_flush_xlat() local
1306 tf->flags |= ATA_TFLAG_DEVICE; in ata_scsi_flush_xlat()
1307 tf->protocol = ATA_PROT_NODATA; in ata_scsi_flush_xlat()
1309 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) in ata_scsi_flush_xlat()
1310 tf->command = ATA_CMD_FLUSH_EXT; in ata_scsi_flush_xlat()
1312 tf->command = ATA_CMD_FLUSH; in ata_scsi_flush_xlat()
1315 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_flush_xlat()
1321 * scsi_6_lba_len - Get LBA and transfer length
1324 * Calculate LBA and transfer length for 6-byte commands.
1346 * scsi_10_lba_len - Get LBA and transfer length
1349 * Calculate LBA and transfer length for 10-byte commands.
1362 * scsi_16_lba_len - Get LBA and transfer length
1365 * Calculate LBA and transfer length for 16-byte commands.
1378 * scsi_dld - Get duration limit descriptor index
1381 * Returns the dld bits indicating the index of a command duration limit
1390 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1399 * Zero on success, non-zero on error.
1403 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_verify_xlat()
1404 struct ata_taskfile *tf = &qc->tf; in ata_scsi_verify_xlat() local
1405 struct ata_device *dev = qc->dev; in ata_scsi_verify_xlat()
1406 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat()
1407 const u8 *cdb = scmd->cmnd; in ata_scsi_verify_xlat()
1412 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_verify_xlat()
1413 tf->protocol = ATA_PROT_NODATA; in ata_scsi_verify_xlat()
1417 if (scmd->cmd_len < 10) { in ata_scsi_verify_xlat()
1424 if (scmd->cmd_len < 16) { in ata_scsi_verify_xlat()
1442 if (dev->flags & ATA_DFLAG_LBA) { in ata_scsi_verify_xlat()
1443 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_verify_xlat()
1447 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1448 tf->device |= (block >> 24) & 0xf; in ata_scsi_verify_xlat()
1450 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_scsi_verify_xlat()
1454 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_verify_xlat()
1455 tf->command = ATA_CMD_VERIFY_EXT; in ata_scsi_verify_xlat()
1457 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_scsi_verify_xlat()
1459 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_verify_xlat()
1460 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_verify_xlat()
1461 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_verify_xlat()
1466 tf->nsect = n_block & 0xff; in ata_scsi_verify_xlat()
1468 tf->lbah = (block >> 16) & 0xff; in ata_scsi_verify_xlat()
1469 tf->lbam = (block >> 8) & 0xff; in ata_scsi_verify_xlat()
1470 tf->lbal = block & 0xff; in ata_scsi_verify_xlat()
1472 tf->device |= ATA_LBA; in ata_scsi_verify_xlat()
1481 track = (u32)block / dev->sectors; in ata_scsi_verify_xlat()
1482 cyl = track / dev->heads; in ata_scsi_verify_xlat()
1483 head = track % dev->heads; in ata_scsi_verify_xlat()
1484 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat()
1487 Cylinder: 0-65535 in ata_scsi_verify_xlat()
1488 Head: 0-15 in ata_scsi_verify_xlat()
1489 Sector: 1-255*/ in ata_scsi_verify_xlat()
1493 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1494 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_scsi_verify_xlat()
1495 tf->lbal = sect; in ata_scsi_verify_xlat()
1496 tf->lbam = cyl; in ata_scsi_verify_xlat()
1497 tf->lbah = cyl >> 8; in ata_scsi_verify_xlat()
1498 tf->device |= head; in ata_scsi_verify_xlat()
1504 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_verify_xlat()
1508 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_verify_xlat()
1513 scmd->result = SAM_STAT_GOOD; in ata_scsi_verify_xlat()
1525 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
1533 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1548 * Zero on success, non-zero on error.
1552 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_rw_xlat()
1553 const u8 *cdb = scmd->cmnd; in ata_scsi_rw_xlat()
1575 if (unlikely(scmd->cmd_len < 10)) { in ata_scsi_rw_xlat()
1587 if (unlikely(scmd->cmd_len < 6)) { in ata_scsi_rw_xlat()
1593 /* for 6-byte r/w commands, transfer length 0 in ata_scsi_rw_xlat()
1603 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_rw_xlat()
1621 /* For 10-byte and 16-byte SCSI R/W commands, transfer in ata_scsi_rw_xlat()
1630 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_rw_xlat()
1631 qc->nbytes = n_block * scmd->device->sector_size; in ata_scsi_rw_xlat()
1637 if (rc == -ERANGE) in ata_scsi_rw_xlat()
1639 /* treat all other errors as -EINVAL, fall through */ in ata_scsi_rw_xlat()
1641 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_rw_xlat()
1645 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_rw_xlat()
1650 scmd->result = SAM_STAT_GOOD; in ata_scsi_rw_xlat()
1656 struct scsi_cmnd *cmd = qc->scsicmd; in ata_qc_done()
1657 void (*done)(struct scsi_cmnd *) = qc->scsidone; in ata_qc_done()
1665 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_qc_complete()
1666 u8 *cdb = cmd->cmnd; in ata_scsi_qc_complete()
1667 int need_sense = (qc->err_mask != 0) && in ata_scsi_qc_complete()
1668 !(qc->flags & ATA_QCFLAG_SENSE_VALID); in ata_scsi_qc_complete()
1670 /* For ATA pass thru (SAT) commands, generate a sense block if in ata_scsi_qc_complete()
1672 * generate because the user forced us to [CK_COND =1], a check in ata_scsi_qc_complete()
1677 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE in ata_scsi_qc_complete()
1686 cmd->result &= 0x0000ffff; in ata_scsi_qc_complete()
1692 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1697 * Our ->queuecommand() function has decided that the SCSI
1706 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1720 struct ata_port *ap = dev->link->ap; in ata_scsi_translate()
1728 /* data is present; dma-map it */ in ata_scsi_translate()
1729 if (cmd->sc_data_direction == DMA_FROM_DEVICE || in ata_scsi_translate()
1730 cmd->sc_data_direction == DMA_TO_DEVICE) { in ata_scsi_translate()
1738 qc->dma_dir = cmd->sc_data_direction; in ata_scsi_translate()
1741 qc->complete_fn = ata_scsi_qc_complete; in ata_scsi_translate()
1746 if (ap->ops->qc_defer) { in ata_scsi_translate()
1747 if ((rc = ap->ops->qc_defer(qc))) in ata_scsi_translate()
1763 cmd->result = (DID_ERROR << 16); in ata_scsi_translate()
1783 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1787 * Takes care of the hard work of simulating a SCSI command...
1791 * completed successfully (0), or not (in which case cmd->result
1801 struct scsi_cmnd *cmd = args->cmd; in ata_scsi_rbuf_fill()
1815 cmd->result = SAM_STAT_GOOD; in ata_scsi_rbuf_fill()
1819 * ata_scsiop_inq_std - Simulate INQUIRY command
1824 * with non-VPD INQUIRY command output.
1833 0x60, /* SAM-3 (no version claimed) */ in ata_scsiop_inq_std()
1836 0x20, /* SBC-2 (no version claimed) */ in ata_scsiop_inq_std()
1839 0x00 /* SPC-3 (no version claimed) */ in ata_scsiop_inq_std()
1843 0xA0, /* SAM-5 (no version claimed) */ in ata_scsiop_inq_std()
1846 0x00, /* SBC-4 (no version claimed) */ in ata_scsiop_inq_std()
1849 0xC0, /* SPC-5 (no version claimed) */ in ata_scsiop_inq_std()
1858 0x5, /* claim SPC-3 version compatibility */ in ata_scsiop_inq_std()
1860 95 - 4, in ata_scsiop_inq_std()
1867 * AHCI port says it's external (Hotplug-capable, eSATA). in ata_scsiop_inq_std()
1869 if (ata_id_removable(args->id) || in ata_scsiop_inq_std()
1870 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) in ata_scsiop_inq_std()
1873 if (args->dev->class == ATA_DEV_ZAC) { in ata_scsiop_inq_std()
1875 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ in ata_scsiop_inq_std()
1878 if (args->dev->flags & ATA_DFLAG_CDL) in ata_scsiop_inq_std()
1879 hdr[2] = 0xd; /* claim SPC-6 version compatibility */ in ata_scsiop_inq_std()
1883 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); in ata_scsiop_inq_std()
1886 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); in ata_scsiop_inq_std()
1888 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); in ata_scsiop_inq_std()
1891 memcpy(&rbuf[32], "n/a ", 4); in ata_scsiop_inq_std()
1893 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) in ata_scsiop_inq_std()
1902 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1928 !(args->dev->flags & ATA_DFLAG_ZAC)) in ata_scsiop_inq_00()
1938 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1957 ata_id_string(args->id, (unsigned char *) &rbuf[4], in ata_scsiop_inq_80()
1963 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1968 * - vendor specific ASCII containing the ATA serial number
1969 * - SAT defined "t10 vendor id based" containing ASCII vendor
1987 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
1999 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, in ata_scsiop_inq_83()
2002 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, in ata_scsiop_inq_83()
2006 if (ata_id_has_wwn(args->id)) { in ata_scsiop_inq_83()
2013 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2017 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ in ata_scsiop_inq_83()
2022 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2026 * Yields SAT-specified ATA VPD page.
2052 memcpy(&rbuf[60], &args->id[0], 512); in ata_scsiop_inq_89()
2058 struct ata_device *dev = args->dev; in ata_scsiop_inq_b0()
2067 * This is always one physical block, but for disks with a smaller in ata_scsiop_inq_b0()
2071 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); in ata_scsiop_inq_b0()
2077 * The ATA spec doesn't even know about a granularity or alignment in ata_scsiop_inq_b0()
2079 * VPD page entries, but we have specifify a granularity to signal in ata_scsiop_inq_b0()
2080 * that we support some form of unmap - in thise case via WRITE SAME in ata_scsiop_inq_b0()
2083 if (ata_id_has_trim(args->id)) { in ata_scsiop_inq_b0()
2086 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) in ata_scsiop_inq_b0()
2087 max_blocks = 128 << (20 - SECTOR_SHIFT); in ata_scsiop_inq_b0()
2098 int form_factor = ata_id_form_factor(args->id); in ata_scsiop_inq_b1()
2099 int media_rotation_rate = ata_id_rotation_rate(args->id); in ata_scsiop_inq_b1()
2100 u8 zoned = ata_id_zoned_cap(args->id); in ata_scsiop_inq_b1()
2115 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ in ata_scsiop_inq_b2()
2126 * zbc-r05 SCSI Zoned Block device characteristics VPD page in ata_scsiop_inq_b6()
2132 * URSWRZ bit is only meaningful for host-managed ZAC drives in ata_scsiop_inq_b6()
2134 if (args->dev->zac_zoned_cap & 1) in ata_scsiop_inq_b6()
2136 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); in ata_scsiop_inq_b6()
2137 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); in ata_scsiop_inq_b6()
2138 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); in ata_scsiop_inq_b6()
2145 struct ata_cpr_log *cpr_log = args->dev->cpr_log; in ata_scsiop_inq_b9()
2149 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ in ata_scsiop_inq_b9()
2151 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); in ata_scsiop_inq_b9()
2153 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { in ata_scsiop_inq_b9()
2154 desc[0] = cpr_log->cpr[i].num; in ata_scsiop_inq_b9()
2155 desc[1] = cpr_log->cpr[i].num_storage_elements; in ata_scsiop_inq_b9()
2156 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); in ata_scsiop_inq_b9()
2157 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); in ata_scsiop_inq_b9()
2164 * modecpy - Prepare response for MODE SENSE
2170 * Generate a generic MODE SENSE page for either current or changeable
2180 memset(dest + 2, 0, n - 2); in modecpy()
2187 * ata_msense_caching - Simulate MODE SENSE caching info page
2192 * Generate a caching info page, which conditionally indicates
2212 * Simulate MODE SENSE control mode page, sub-page 0.
2223 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_msense_control_spg0()
2233 * Translate an ATA duration limit in microseconds to a SCSI duration limit
2234 * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
2245 * Simulate MODE SENSE control mode page, sub-pages 07h and 08h
2251 u8 *b, *cdl = dev->cdl, *desc; in ata_msense_control_spgt2()
2257 * are a header. The PAGE LENGTH field is the size of the page in ata_msense_control_spgt2()
2262 put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_spgt2()
2299 * Simulate MODE SENSE control mode page, sub-page f2h
2310 * The first four bytes of ATA Feature Control mode page are a header. in ata_msense_control_ata_feature()
2313 put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_ata_feature()
2315 if (dev->flags & ATA_DFLAG_CDL) in ata_msense_control_ata_feature()
2324 * ata_msense_control - Simulate MODE SENSE control mode page
2327 * @spg: sub-page code
2330 * Generate a generic MODE SENSE control mode page.
2360 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2364 * Generate a generic MODE SENSE r/w error recovery page.
2377 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2390 struct ata_device *dev = args->dev; in ata_scsiop_mode_sense()
2391 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; in ata_scsiop_mode_sense()
2431 * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of in ata_scsiop_mode_sense()
2441 if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) in ata_scsiop_mode_sense()
2456 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2460 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2465 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2466 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2474 if (dev->flags & ATA_DFLAG_FUA) in ata_scsiop_mode_sense()
2478 rbuf[0] = p - rbuf - 1; in ata_scsiop_mode_sense()
2485 put_unaligned_be16(p - rbuf - 2, &rbuf[0]); in ata_scsiop_mode_sense()
2495 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); in ata_scsiop_mode_sense()
2499 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); in ata_scsiop_mode_sense()
2505 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2516 struct ata_device *dev = args->dev; in ata_scsiop_read_cap()
2517 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ in ata_scsiop_read_cap()
2522 sector_size = ata_id_logical_sector_size(dev->id); in ata_scsiop_read_cap()
2523 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_read_cap()
2524 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); in ata_scsiop_read_cap()
2526 if (args->cmd->cmnd[0] == READ_CAPACITY) { in ata_scsiop_read_cap()
2530 /* sector count, 32-bit */ in ata_scsiop_read_cap()
2542 /* sector count, 64-bit */ in ata_scsiop_read_cap()
2563 if (ata_id_has_trim(args->id) && in ata_scsiop_read_cap()
2564 !(dev->horkage & ATA_HORKAGE_NOTRIM)) { in ata_scsiop_read_cap()
2567 if (ata_id_has_zero_after_trim(args->id) && in ata_scsiop_read_cap()
2568 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { in ata_scsiop_read_cap()
2573 if (ata_id_zoned_cap(args->id) || in ata_scsiop_read_cap()
2574 args->dev->class == ATA_DEV_ZAC) in ata_scsiop_read_cap()
2581 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2601 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2619 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_qc_complete()
2620 unsigned int err_mask = qc->err_mask; in atapi_qc_complete()
2623 if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2625 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2627 * translation of taskfile registers into a in atapi_qc_complete()
2634 /* SCSI EH automatically locks door if sdev->locked is in atapi_qc_complete()
2637 * creates a loop - SCSI EH issues door lock which in atapi_qc_complete()
2641 * If door lock fails, always clear sdev->locked to in atapi_qc_complete()
2645 * sure qc->dev->sdev isn't NULL before dereferencing. in atapi_qc_complete()
2647 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) in atapi_qc_complete()
2648 qc->dev->sdev->locked = 0; in atapi_qc_complete()
2650 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2656 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) in atapi_qc_complete()
2658 cmd->result = SAM_STAT_GOOD; in atapi_qc_complete()
2663 * atapi_xlat - Initialize PACKET taskfile
2670 * Zero on success, non-zero on failure.
2674 struct scsi_cmnd *scmd = qc->scsicmd; in atapi_xlat()
2675 struct ata_device *dev = qc->dev; in atapi_xlat()
2676 int nodata = (scmd->sc_data_direction == DMA_NONE); in atapi_xlat()
2677 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); in atapi_xlat()
2680 memset(qc->cdb, 0, dev->cdb_len); in atapi_xlat()
2681 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); in atapi_xlat()
2683 qc->complete_fn = atapi_qc_complete; in atapi_xlat()
2685 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_xlat()
2686 if (scmd->sc_data_direction == DMA_TO_DEVICE) { in atapi_xlat()
2687 qc->tf.flags |= ATA_TFLAG_WRITE; in atapi_xlat()
2690 qc->tf.command = ATA_CMD_PACKET; in atapi_xlat()
2731 qc->tf.lbam = (nbytes & 0xFF); in atapi_xlat()
2732 qc->tf.lbah = (nbytes >> 8); in atapi_xlat()
2735 qc->tf.protocol = ATAPI_PROT_NODATA; in atapi_xlat()
2737 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_xlat()
2740 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_xlat()
2741 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_xlat()
2743 if ((dev->flags & ATA_DFLAG_DMADIR) && in atapi_xlat()
2744 (scmd->sc_data_direction != DMA_TO_DEVICE)) in atapi_xlat()
2746 qc->tf.feature |= ATAPI_DMADIR; in atapi_xlat()
2750 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE in atapi_xlat()
2758 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), in ata_find_dev()
2766 int link_max_devices = ata_link_max_devices(&ap->link); in ata_find_dev()
2769 return &ap->link.device[0]; in ata_find_dev()
2772 return &ap->link.device[devno]; in ata_find_dev()
2778 * For PMP-attached devices, the device number corresponds to C in ata_find_dev()
2782 if (devno < ap->nr_pmp_links) in ata_find_dev()
2783 return &ap->pmp_link[devno].device[0]; in ata_find_dev()
2795 if (unlikely(scsidev->channel || scsidev->lun)) in __ata_scsi_find_dev()
2797 devno = scsidev->id; in __ata_scsi_find_dev()
2799 if (unlikely(scsidev->id || scsidev->lun)) in __ata_scsi_find_dev()
2801 devno = scsidev->channel; in __ata_scsi_find_dev()
2808 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2835 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2836 * @byte1: Byte 1 from pass-thru CDB.
2845 case 3: /* Non-data */ in ata_scsi_map_proto()
2849 case 10: /* UDMA Data-in */ in ata_scsi_map_proto()
2850 case 11: /* UDMA Data-Out */ in ata_scsi_map_proto()
2853 case 4: /* PIO Data-in */ in ata_scsi_map_proto()
2854 case 5: /* PIO Data-out */ in ata_scsi_map_proto()
2874 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2877 * Handles either 12, 16, or 32-byte versions of the CDB.
2880 * Zero on success, non-zero on failure.
2884 struct ata_taskfile *tf = &(qc->tf); in ata_scsi_pass_thru() local
2885 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_pass_thru()
2886 struct ata_device *dev = qc->dev; in ata_scsi_pass_thru()
2887 const u8 *cdb = scmd->cmnd; in ata_scsi_pass_thru()
2891 /* 7Fh variable length cmd means a ata pass-thru(32) */ in ata_scsi_pass_thru()
2895 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); in ata_scsi_pass_thru()
2896 if (tf->protocol == ATA_PROT_UNKNOWN) { in ata_scsi_pass_thru()
2906 if (scmd->sc_data_direction != DMA_NONE) { in ata_scsi_pass_thru()
2911 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
2912 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_pass_thru()
2916 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_pass_thru()
2925 * 16-byte CDB - may contain extended commands. in ata_scsi_pass_thru()
2930 tf->hob_feature = cdb[3]; in ata_scsi_pass_thru()
2931 tf->hob_nsect = cdb[5]; in ata_scsi_pass_thru()
2932 tf->hob_lbal = cdb[7]; in ata_scsi_pass_thru()
2933 tf->hob_lbam = cdb[9]; in ata_scsi_pass_thru()
2934 tf->hob_lbah = cdb[11]; in ata_scsi_pass_thru()
2935 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2937 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2942 tf->feature = cdb[4]; in ata_scsi_pass_thru()
2943 tf->nsect = cdb[6]; in ata_scsi_pass_thru()
2944 tf->lbal = cdb[8]; in ata_scsi_pass_thru()
2945 tf->lbam = cdb[10]; in ata_scsi_pass_thru()
2946 tf->lbah = cdb[12]; in ata_scsi_pass_thru()
2947 tf->device = cdb[13]; in ata_scsi_pass_thru()
2948 tf->command = cdb[14]; in ata_scsi_pass_thru()
2952 * 12-byte CDB - incapable of extended commands. in ata_scsi_pass_thru()
2954 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2956 tf->feature = cdb[3]; in ata_scsi_pass_thru()
2957 tf->nsect = cdb[4]; in ata_scsi_pass_thru()
2958 tf->lbal = cdb[5]; in ata_scsi_pass_thru()
2959 tf->lbam = cdb[6]; in ata_scsi_pass_thru()
2960 tf->lbah = cdb[7]; in ata_scsi_pass_thru()
2961 tf->device = cdb[8]; in ata_scsi_pass_thru()
2962 tf->command = cdb[9]; in ata_scsi_pass_thru()
2966 * 32-byte CDB - may contain extended command fields. in ata_scsi_pass_thru()
2971 tf->hob_feature = cdb[20]; in ata_scsi_pass_thru()
2972 tf->hob_nsect = cdb[22]; in ata_scsi_pass_thru()
2973 tf->hob_lbal = cdb[16]; in ata_scsi_pass_thru()
2974 tf->hob_lbam = cdb[15]; in ata_scsi_pass_thru()
2975 tf->hob_lbah = cdb[14]; in ata_scsi_pass_thru()
2976 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2978 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2980 tf->feature = cdb[21]; in ata_scsi_pass_thru()
2981 tf->nsect = cdb[23]; in ata_scsi_pass_thru()
2982 tf->lbal = cdb[19]; in ata_scsi_pass_thru()
2983 tf->lbam = cdb[18]; in ata_scsi_pass_thru()
2984 tf->lbah = cdb[17]; in ata_scsi_pass_thru()
2985 tf->device = cdb[24]; in ata_scsi_pass_thru()
2986 tf->command = cdb[25]; in ata_scsi_pass_thru()
2987 tf->auxiliary = get_unaligned_be32(&cdb[28]); in ata_scsi_pass_thru()
2992 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
2993 tf->nsect = qc->hw_tag << 3; in ata_scsi_pass_thru()
2996 tf->device = dev->devno ? in ata_scsi_pass_thru()
2997 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; in ata_scsi_pass_thru()
2999 switch (tf->command) { in ata_scsi_pass_thru()
3000 /* READ/WRITE LONG use a non-standard sect_size */ in ata_scsi_pass_thru()
3005 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { in ata_scsi_pass_thru()
3009 qc->sect_size = scsi_bufflen(scmd); in ata_scsi_pass_thru()
3043 qc->sect_size = scmd->device->sector_size; in ata_scsi_pass_thru()
3048 qc->sect_size = ATA_SECT_SIZE; in ata_scsi_pass_thru()
3053 * write indication (used for PIO/DMA setup), result TF is in ata_scsi_pass_thru()
3056 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_pass_thru()
3057 if (scmd->sc_data_direction == DMA_TO_DEVICE) in ata_scsi_pass_thru()
3058 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_pass_thru()
3060 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; in ata_scsi_pass_thru()
3071 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { in ata_scsi_pass_thru()
3077 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { in ata_scsi_pass_thru()
3083 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3088 if (is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3094 if (multi_count != dev->multi_count) in ata_scsi_pass_thru()
3100 * Filter SET_FEATURES - XFER MODE command -- otherwise, in ata_scsi_pass_thru()
3101 * SET_FEATURES - XFER MODE must be preceded/succeeded in ata_scsi_pass_thru()
3102 * by an update to hardware-specific registers for each in ata_scsi_pass_thru()
3103 * controller (i.e. the reason for ->set_piomode(), in ata_scsi_pass_thru()
3104 * ->set_dmamode(), and ->post_set_mode() hooks). in ata_scsi_pass_thru()
3106 if (tf->command == ATA_CMD_SET_FEATURES && in ata_scsi_pass_thru()
3107 tf->feature == SETFEATURES_XFER) { in ata_scsi_pass_thru()
3116 * have a real reason for wanting to use them. This ensures in ata_scsi_pass_thru()
3121 * Note that for ATA8 we can issue a DCS change and DCS freeze lock in ata_scsi_pass_thru()
3127 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { in ata_scsi_pass_thru()
3140 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3146 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3163 struct scsi_device *sdp = cmd->device; in ata_format_dsm_trim_descr()
3164 size_t len = sdp->sector_size; in ata_format_dsm_trim_descr()
3184 count -= 0xffff; in ata_format_dsm_trim_descr()
3194 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3197 * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
3201 * - When set translate to DSM TRIM
3202 * - When clear translate to SCT Write Same
3206 struct ata_taskfile *tf = &qc->tf; in ata_scsi_write_same_xlat() local
3207 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_write_same_xlat()
3208 struct scsi_device *sdp = scmd->device; in ata_scsi_write_same_xlat()
3209 size_t len = sdp->sector_size; in ata_scsi_write_same_xlat()
3210 struct ata_device *dev = qc->dev; in ata_scsi_write_same_xlat()
3211 const u8 *cdb = scmd->cmnd; in ata_scsi_write_same_xlat()
3232 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_write_same_xlat()
3239 (dev->horkage & ATA_HORKAGE_NOTRIM) || in ata_scsi_write_same_xlat()
3240 !ata_id_has_trim(dev->id)) { in ata_scsi_write_same_xlat()
3252 * WRITE SAME always has a sector sized buffer as payload, this in ata_scsi_write_same_xlat()
3253 * should never be a multiple entry S/G list. in ata_scsi_write_same_xlat()
3260 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) in ata_scsi_write_same_xlat()
3270 tf->protocol = ATA_PROT_NCQ; in ata_scsi_write_same_xlat()
3271 tf->command = ATA_CMD_FPDMA_SEND; in ata_scsi_write_same_xlat()
3272 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; in ata_scsi_write_same_xlat()
3273 tf->nsect = qc->hw_tag << 3; in ata_scsi_write_same_xlat()
3274 tf->hob_feature = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3275 tf->feature = size / 512; in ata_scsi_write_same_xlat()
3277 tf->auxiliary = 1; in ata_scsi_write_same_xlat()
3279 tf->protocol = ATA_PROT_DMA; in ata_scsi_write_same_xlat()
3280 tf->hob_feature = 0; in ata_scsi_write_same_xlat()
3281 tf->feature = ATA_DSM_TRIM; in ata_scsi_write_same_xlat()
3282 tf->hob_nsect = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3283 tf->nsect = size / 512; in ata_scsi_write_same_xlat()
3284 tf->command = ATA_CMD_DSM; in ata_scsi_write_same_xlat()
3287 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | in ata_scsi_write_same_xlat()
3308 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3312 * Yields a subset to satisfy scsi_report_opcode()
3319 struct ata_device *dev = args->dev; in ata_scsiop_maint_in()
3320 u8 *cdb = args->cmd->cmnd; in ata_scsiop_maint_in()
3361 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3372 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3383 if (ata_id_zoned_cap(dev->id) || in ata_scsiop_maint_in()
3384 dev->class == ATA_DEV_ZAC) in ata_scsiop_maint_in()
3389 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsiop_maint_in()
3403 * ata_scsi_report_zones_complete - convert ATA output
3406 * Convert T-13 little-endian field representation into
3407 * T-10 big-endian field representation.
3408 * What a mess.
3412 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_report_zones_complete()
3475 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_in_xlat() local
3476 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_in_xlat()
3477 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_in_xlat()
3478 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat()
3483 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_in_xlat()
3484 ata_dev_warn(qc->dev, "invalid cdb length %d\n", in ata_scsi_zbc_in_xlat()
3485 scmd->cmd_len); in ata_scsi_zbc_in_xlat()
3491 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", in ata_scsi_zbc_in_xlat()
3497 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); in ata_scsi_zbc_in_xlat()
3503 * and uses a 16 bit value for the transfer count. in ata_scsi_zbc_in_xlat()
3506 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); in ata_scsi_zbc_in_xlat()
3512 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_in_xlat()
3513 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { in ata_scsi_zbc_in_xlat()
3514 tf->protocol = ATA_PROT_NCQ; in ata_scsi_zbc_in_xlat()
3515 tf->command = ATA_CMD_FPDMA_RECV; in ata_scsi_zbc_in_xlat()
3516 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; in ata_scsi_zbc_in_xlat()
3517 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_in_xlat()
3518 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat()
3519 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3520 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); in ata_scsi_zbc_in_xlat()
3522 tf->command = ATA_CMD_ZAC_MGMT_IN; in ata_scsi_zbc_in_xlat()
3523 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; in ata_scsi_zbc_in_xlat()
3524 tf->protocol = ATA_PROT_DMA; in ata_scsi_zbc_in_xlat()
3525 tf->hob_feature = options; in ata_scsi_zbc_in_xlat()
3526 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3527 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
3529 tf->device = ATA_LBA; in ata_scsi_zbc_in_xlat()
3530 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_in_xlat()
3531 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3532 tf->lbal = block & 0xff; in ata_scsi_zbc_in_xlat()
3533 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_in_xlat()
3534 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_in_xlat()
3535 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_in_xlat()
3537 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_in_xlat()
3538 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_scsi_zbc_in_xlat()
3542 qc->complete_fn = ata_scsi_report_zones_complete; in ata_scsi_zbc_in_xlat()
3547 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_zbc_in_xlat()
3552 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_in_xlat()
3558 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_out_xlat() local
3559 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_out_xlat()
3560 struct ata_device *dev = qc->dev; in ata_scsi_zbc_out_xlat()
3561 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_out_xlat()
3565 u16 fp = (u16)-1; in ata_scsi_zbc_out_xlat()
3567 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_out_xlat()
3593 } else if (block >= dev->n_sectors) { in ata_scsi_zbc_out_xlat()
3595 * Block must be a valid zone ID (a zone start LBA). in ata_scsi_zbc_out_xlat()
3601 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_out_xlat()
3602 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { in ata_scsi_zbc_out_xlat()
3603 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_zbc_out_xlat()
3604 tf->command = ATA_CMD_NCQ_NON_DATA; in ata_scsi_zbc_out_xlat()
3605 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3606 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_out_xlat()
3607 tf->auxiliary = sa | ((u16)all << 8); in ata_scsi_zbc_out_xlat()
3609 tf->protocol = ATA_PROT_NODATA; in ata_scsi_zbc_out_xlat()
3610 tf->command = ATA_CMD_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3611 tf->feature = sa; in ata_scsi_zbc_out_xlat()
3612 tf->hob_feature = all; in ata_scsi_zbc_out_xlat()
3614 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_out_xlat()
3615 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_out_xlat()
3616 tf->lbal = block & 0xff; in ata_scsi_zbc_out_xlat()
3617 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_out_xlat()
3618 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_out_xlat()
3619 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_out_xlat()
3620 tf->device = ATA_LBA; in ata_scsi_zbc_out_xlat()
3621 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_out_xlat()
3626 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_zbc_out_xlat()
3630 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_out_xlat()
3635 * ata_mselect_caching - Simulate MODE SELECT for caching info page
3641 * Prepare a taskfile to modify caching information for the device.
3649 struct ata_taskfile *tf = &qc->tf; in ata_mselect_caching() local
3650 struct ata_device *dev = qc->dev; in ata_mselect_caching()
3656 * The first two bytes of def_cache_mpage are a header, so offsets in ata_mselect_caching()
3660 if (len != CACHE_MPAGE_LEN - 2) { in ata_mselect_caching()
3661 *fp = min(len, CACHE_MPAGE_LEN - 2); in ata_mselect_caching()
3662 return -EINVAL; in ata_mselect_caching()
3668 * Check that read-only bits are not modified. in ata_mselect_caching()
3670 ata_msense_caching(dev->id, mpage, false); in ata_mselect_caching()
3671 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { in ata_mselect_caching()
3676 return -EINVAL; in ata_mselect_caching()
3680 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_caching()
3681 tf->protocol = ATA_PROT_NODATA; in ata_mselect_caching()
3682 tf->nsect = 0; in ata_mselect_caching()
3683 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_caching()
3684 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; in ata_mselect_caching()
3689 * Simulate MODE SELECT control mode page, sub-page 0.
3694 struct ata_device *dev = qc->dev; in ata_mselect_control_spg0()
3700 * The first two bytes of def_control_mpage are a header, so offsets in ata_mselect_control_spg0()
3704 if (len != CONTROL_MPAGE_LEN - 2) { in ata_mselect_control_spg0()
3705 *fp = min(len, CONTROL_MPAGE_LEN - 2); in ata_mselect_control_spg0()
3706 return -EINVAL; in ata_mselect_control_spg0()
3712 * Check that read-only bits are not modified. in ata_mselect_control_spg0()
3715 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { in ata_mselect_control_spg0()
3720 return -EINVAL; in ata_mselect_control_spg0()
3724 dev->flags |= ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3726 dev->flags &= ~ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3731 * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
3732 * page) into a SET FEATURES command.
3738 struct ata_device *dev = qc->dev; in ata_mselect_control_ata_feature()
3739 struct ata_taskfile *tf = &qc->tf; in ata_mselect_control_ata_feature() local
3743 * The first four bytes of ATA Feature Control mode page are a header, in ata_mselect_control_ata_feature()
3746 if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { in ata_mselect_control_ata_feature()
3747 *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); in ata_mselect_control_ata_feature()
3748 return -EINVAL; in ata_mselect_control_ata_feature()
3756 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3760 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { in ata_mselect_control_ata_feature()
3763 return -EINVAL; in ata_mselect_control_ata_feature()
3766 dev->flags |= ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3770 return -EINVAL; in ata_mselect_control_ata_feature()
3773 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_control_ata_feature()
3774 tf->protocol = ATA_PROT_NODATA; in ata_mselect_control_ata_feature()
3775 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_control_ata_feature()
3776 tf->feature = SETFEATURES_CDL; in ata_mselect_control_ata_feature()
3777 tf->nsect = cdl_action; in ata_mselect_control_ata_feature()
3783 * ata_mselect_control - Simulate MODE SELECT for control page
3785 * @spg: target sub-page of the control page
3790 * Prepare a taskfile to modify caching information for the device.
3804 return -EINVAL; in ata_mselect_control()
3809 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3812 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
3821 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_mode_select_xlat()
3822 const u8 *cdb = scmd->cmnd; in ata_scsi_mode_select_xlat()
3826 u16 fp = (u16)-1; in ata_scsi_mode_select_xlat()
3833 if (scmd->cmd_len < 5) { in ata_scsi_mode_select_xlat()
3841 if (scmd->cmd_len < 9) { in ata_scsi_mode_select_xlat()
3858 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) in ata_scsi_mode_select_xlat()
3874 len -= hdr_len; in ata_scsi_mode_select_xlat()
3884 len -= bd_len; in ata_scsi_mode_select_xlat()
3898 len -= 4; in ata_scsi_mode_select_xlat()
3906 len -= 2; in ata_scsi_mode_select_xlat()
3910 * Supported subpages: all subpages and ATA feature sub-page f2h of in ata_scsi_mode_select_xlat()
3924 if (qc->dev->flags & ATA_DFLAG_CDL && in ata_scsi_mode_select_xlat()
3961 * page at a time. in ata_scsi_mode_select_xlat()
3969 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_mode_select_xlat()
3973 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); in ata_scsi_mode_select_xlat()
3978 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_mode_select_xlat()
3982 scmd->result = SAM_STAT_GOOD; in ata_scsi_mode_select_xlat()
3998 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_security_inout_xlat()
3999 const u8 *cdb = scmd->cmnd; in ata_scsi_security_inout_xlat()
4000 struct ata_taskfile *tf = &qc->tf; in ata_scsi_security_inout_xlat() local
4005 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); in ata_scsi_security_inout_xlat()
4011 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); in ata_scsi_security_inout_xlat()
4017 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4022 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4026 /* convert to the sector-based ATA addressing */ in ata_scsi_security_inout_xlat()
4030 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; in ata_scsi_security_inout_xlat()
4031 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; in ata_scsi_security_inout_xlat()
4033 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_security_inout_xlat()
4034 tf->command = ata_scsi_trusted_op(len, send, dma); in ata_scsi_security_inout_xlat()
4035 tf->feature = secp; in ata_scsi_security_inout_xlat()
4036 tf->lbam = spsp & 0xff; in ata_scsi_security_inout_xlat()
4037 tf->lbah = spsp >> 8; in ata_scsi_security_inout_xlat()
4040 tf->nsect = len & 0xff; in ata_scsi_security_inout_xlat()
4041 tf->lbal = len >> 8; in ata_scsi_security_inout_xlat()
4044 tf->lbah = (1 << 7); in ata_scsi_security_inout_xlat()
4052 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4055 * Translate a SCSI variable length CDB to specified commands.
4056 * It checks a service action value in CDB to call corresponding handler.
4059 * Zero on success, non-zero on failure
4064 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_var_len_cdb_xlat()
4065 const u8 *cdb = scmd->cmnd; in ata_scsi_var_len_cdb_xlat()
4069 * if service action represents a ata pass-thru(32) command, in ata_scsi_var_len_cdb_xlat()
4080 * ata_get_xlat_func - check if SCSI to ATA translation is possible
4135 if (!(dev->flags & ATA_DFLAG_TRUSTED)) in ata_get_xlat_func()
4148 struct ata_port *ap = dev->link->ap; in __ata_scsi_queuecmd()
4149 u8 scsi_op = scmd->cmnd[0]; in __ata_scsi_queuecmd()
4154 * However, this check is done without holding the ap->lock (a libata in __ata_scsi_queuecmd()
4156 * therefore we must check if EH is pending, while holding ap->lock. in __ata_scsi_queuecmd()
4158 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) in __ata_scsi_queuecmd()
4161 if (unlikely(!scmd->cmd_len)) in __ata_scsi_queuecmd()
4164 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in __ata_scsi_queuecmd()
4165 if (unlikely(scmd->cmd_len > dev->cdb_len)) in __ata_scsi_queuecmd()
4173 if (unlikely(len > scmd->cmd_len || in __ata_scsi_queuecmd()
4174 len > dev->cdb_len || in __ata_scsi_queuecmd()
4175 scmd->cmd_len > ATAPI_CDB_LEN)) in __ata_scsi_queuecmd()
4181 if (unlikely(scmd->cmd_len > 16)) in __ata_scsi_queuecmd()
4195 scmd->result = DID_ERROR << 16; in __ata_scsi_queuecmd()
4201 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4207 * hardware. In other cases, this function simulates a
4223 struct scsi_device *scsidev = cmd->device; in ata_scsi_queuecmd()
4229 spin_lock_irqsave(ap->lock, irq_flags); in ata_scsi_queuecmd()
4235 cmd->result = (DID_BAD_TARGET << 16); in ata_scsi_queuecmd()
4239 spin_unlock_irqrestore(ap->lock, irq_flags); in ata_scsi_queuecmd()
4246 * ata_scsi_simulate - simulate SCSI command on ATA device
4250 * Interprets and directly executes a select list of SCSI commands
4260 const u8 *scsicmd = cmd->cmnd; in ata_scsi_simulate()
4264 args.id = dev->id; in ata_scsi_simulate()
4296 if (dev->flags & ATA_DFLAG_ZAC) in ata_scsi_simulate()
4302 if (dev->cpr_log) in ata_scsi_simulate()
4338 * turning this into a no-op. in ata_scsi_simulate()
4344 /* no-op's, complete with success */ in ata_scsi_simulate()
4378 for (i = 0; i < host->n_ports; i++) { in ata_scsi_add_hosts()
4379 struct ata_port *ap = host->ports[i]; in ata_scsi_add_hosts()
4382 rc = -ENOMEM; in ata_scsi_add_hosts()
4387 shost->eh_noresume = 1; in ata_scsi_add_hosts()
4388 *(struct ata_port **)&shost->hostdata[0] = ap; in ata_scsi_add_hosts()
4389 ap->scsi_host = shost; in ata_scsi_add_hosts()
4391 shost->transportt = ata_scsi_transport_template; in ata_scsi_add_hosts()
4392 shost->unique_id = ap->print_id; in ata_scsi_add_hosts()
4393 shost->max_id = 16; in ata_scsi_add_hosts()
4394 shost->max_lun = 1; in ata_scsi_add_hosts()
4395 shost->max_channel = 1; in ata_scsi_add_hosts()
4396 shost->max_cmd_len = 32; in ata_scsi_add_hosts()
4398 /* Schedule policy is determined by ->qc_defer() in ata_scsi_add_hosts()
4403 shost->max_host_blocked = 1; in ata_scsi_add_hosts()
4405 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); in ata_scsi_add_hosts()
4413 while (--i >= 0) { in ata_scsi_add_hosts()
4414 struct Scsi_Host *shost = host->ports[i]->scsi_host; in ata_scsi_add_hosts()
4425 struct scsi_device *sdev = dev->sdev; in ata_scsi_assign_ofnode()
4426 struct device *d = ap->host->dev; in ata_scsi_assign_ofnode()
4427 struct device_node *np = d->of_node; in ata_scsi_assign_ofnode()
4437 if (val == dev->devno) { in ata_scsi_assign_ofnode()
4439 sdev->sdev_gendev.of_node = child; in ata_scsi_assign_ofnode()
4463 if (dev->sdev) in ata_scsi_scan_host()
4467 id = dev->devno; in ata_scsi_scan_host()
4469 channel = link->pmp; in ata_scsi_scan_host()
4471 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, in ata_scsi_scan_host()
4474 dev->sdev = sdev; in ata_scsi_scan_host()
4478 dev->sdev = NULL; in ata_scsi_scan_host()
4489 if (!dev->sdev) in ata_scsi_scan_host()
4509 * a few more chances. in ata_scsi_scan_host()
4511 if (--tries) { in ata_scsi_scan_host()
4520 queue_delayed_work(system_long_wq, &ap->hotplug_task, in ata_scsi_scan_host()
4525 * ata_scsi_offline_dev - offline attached SCSI device
4530 * function is called with host lock which protects dev->sdev
4541 if (dev->sdev) { in ata_scsi_offline_dev()
4542 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); in ata_scsi_offline_dev()
4549 * ata_scsi_remove_dev - remove attached SCSI device
4560 struct ata_port *ap = dev->link->ap; in ata_scsi_remove_dev()
4570 mutex_lock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4571 spin_lock_irqsave(ap->lock, flags); in ata_scsi_remove_dev()
4573 /* clearing dev->sdev is protected by host lock */ in ata_scsi_remove_dev()
4574 sdev = dev->sdev; in ata_scsi_remove_dev()
4575 dev->sdev = NULL; in ata_scsi_remove_dev()
4595 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_remove_dev()
4596 mutex_unlock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4600 dev_name(&sdev->sdev_gendev)); in ata_scsi_remove_dev()
4609 struct ata_port *ap = link->ap; in ata_scsi_handle_link_detach()
4615 if (!(dev->flags & ATA_DFLAG_DETACHED)) in ata_scsi_handle_link_detach()
4618 spin_lock_irqsave(ap->lock, flags); in ata_scsi_handle_link_detach()
4619 dev->flags &= ~ATA_DFLAG_DETACHED; in ata_scsi_handle_link_detach()
4620 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4630 * ata_scsi_media_change_notify - send media change event
4633 * Tell the block layer to send a media change notification
4641 if (dev->sdev) in ata_scsi_media_change_notify()
4642 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, in ata_scsi_media_change_notify()
4647 * ata_scsi_hotplug - SCSI part of hotplug
4650 * Perform SCSI part of hotplug. It's executed from a separate
4653 * synchronized with hot plugging with a mutex.
4664 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_scsi_hotplug()
4667 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4673 ata_scsi_handle_link_detach(&ap->link); in ata_scsi_hotplug()
4674 if (ap->pmp_link) in ata_scsi_hotplug()
4676 ata_scsi_handle_link_detach(&ap->pmp_link[i]); in ata_scsi_hotplug()
4681 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4685 * ata_scsi_user_scan - indication for user-initiated bus scan
4708 return -EINVAL; in ata_scsi_user_scan()
4712 return -EINVAL; in ata_scsi_user_scan()
4716 return -EINVAL; in ata_scsi_user_scan()
4720 spin_lock_irqsave(ap->lock, flags); in ata_scsi_user_scan()
4726 struct ata_eh_info *ehi = &link->eh_info; in ata_scsi_user_scan()
4727 ehi->probe_mask |= ATA_ALL_DEVICES; in ata_scsi_user_scan()
4728 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4734 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_scsi_user_scan()
4735 ehi->probe_mask |= 1 << dev->devno; in ata_scsi_user_scan()
4736 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4738 rc = -EINVAL; in ata_scsi_user_scan()
4743 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4746 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4752 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
4770 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4771 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4775 struct scsi_device *sdev = dev->sdev; in ata_scsi_dev_rescan()
4781 if (ap->pflags & ATA_PFLAG_SUSPENDED) in ata_scsi_dev_rescan()
4789 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4792 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4800 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4801 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4803 /* Reschedule with a delay if scsi_rescan_device() returned an error */ in ata_scsi_dev_rescan()
4805 schedule_delayed_work(&ap->scsi_rescan_task, in ata_scsi_dev_rescan()