• Home
  • Raw
  • Download

Lines Matching +full:tf +full:- +full:a

2  *  libata-scsi.c - helper library for ATA
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * You should have received a copy of the GNU General Public License
28 * as Documentation/driver-api/libata.rst
31 * - http://www.t10.org/
32 * - http://www.t13.org/
56 #include "libata-transport.h"
82 RW_RECOVERY_MPAGE_LEN - 2,
92 CACHE_MPAGE_LEN - 2,
101 CONTROL_MPAGE_LEN - 2,
103 0, /* [QAM+QERR may be 1, see 05-359r1] */
105 0, 30 /* extended self test time, see 05-359r1 */
137 return -EINVAL; in ata_scsi_lpm_store()
139 spin_lock_irqsave(ap->lock, flags); in ata_scsi_lpm_store()
142 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_scsi_lpm_store()
143 if (dev->horkage & ATA_HORKAGE_NOLPM) { in ata_scsi_lpm_store()
144 count = -EOPNOTSUPP; in ata_scsi_lpm_store()
150 ap->target_lpm_policy = policy; in ata_scsi_lpm_store()
153 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_lpm_store()
163 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) in ata_scsi_lpm_show()
164 return -EINVAL; in ata_scsi_lpm_show()
167 ata_lpm_policy_names[ap->target_lpm_policy]); in ata_scsi_lpm_show()
184 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_show()
186 spin_lock_irq(ap->lock); in ata_scsi_park_show()
189 rc = -ENODEV; in ata_scsi_park_show()
192 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_show()
193 rc = -EOPNOTSUPP; in ata_scsi_park_show()
197 link = dev->link; in ata_scsi_park_show()
199 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && in ata_scsi_park_show()
200 link->eh_context.unloaded_mask & (1 << dev->devno) && in ata_scsi_park_show()
201 time_after(dev->unpark_deadline, now)) in ata_scsi_park_show()
202 msecs = jiffies_to_msecs(dev->unpark_deadline - now); in ata_scsi_park_show()
207 spin_unlock_irq(ap->lock); in ata_scsi_park_show()
226 if (input < -2) in ata_scsi_park_store()
227 return -EINVAL; in ata_scsi_park_store()
229 rc = -EOVERFLOW; in ata_scsi_park_store()
233 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_store()
235 spin_lock_irqsave(ap->lock, flags); in ata_scsi_park_store()
238 rc = -ENODEV; in ata_scsi_park_store()
241 if (dev->class != ATA_DEV_ATA && in ata_scsi_park_store()
242 dev->class != ATA_DEV_ZAC) { in ata_scsi_park_store()
243 rc = -EOPNOTSUPP; in ata_scsi_park_store()
248 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_store()
249 rc = -EOPNOTSUPP; in ata_scsi_park_store()
253 dev->unpark_deadline = ata_deadline(jiffies, input); in ata_scsi_park_store()
254 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; in ata_scsi_park_store()
256 complete(&ap->park_req_pending); in ata_scsi_park_store()
259 case -1: in ata_scsi_park_store()
260 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
262 case -2: in ata_scsi_park_store()
263 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
268 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_park_store()
286 ap = ata_shost_to_port(sdev->host); in ata_ncq_prio_enable_show()
288 spin_lock_irq(ap->lock); in ata_ncq_prio_enable_show()
291 rc = -ENODEV; in ata_ncq_prio_enable_show()
295 ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE; in ata_ncq_prio_enable_show()
298 spin_unlock_irq(ap->lock); in ata_ncq_prio_enable_show()
317 return -EINVAL; in ata_ncq_prio_enable_store()
319 ap = ata_shost_to_port(sdev->host); in ata_ncq_prio_enable_store()
322 return -ENODEV; in ata_ncq_prio_enable_store()
324 spin_lock_irq(ap->lock); in ata_ncq_prio_enable_store()
326 dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE; in ata_ncq_prio_enable_store()
328 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; in ata_ncq_prio_enable_store()
330 dev->link->eh_info.action |= ATA_EH_REVALIDATE; in ata_ncq_prio_enable_store()
331 dev->link->eh_info.flags |= ATA_EHI_QUIET; in ata_ncq_prio_enable_store()
333 spin_unlock_irq(ap->lock); in ata_ncq_prio_enable_store()
338 spin_lock_irq(ap->lock); in ata_ncq_prio_enable_store()
339 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { in ata_ncq_prio_enable_store()
340 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; in ata_ncq_prio_enable_store()
341 rc = -EIO; in ata_ncq_prio_enable_store()
343 spin_unlock_irq(ap->lock); in ata_ncq_prio_enable_store()
356 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_scsi_set_sense()
361 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; in ata_scsi_set_sense()
363 scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq); in ata_scsi_set_sense()
368 const struct ata_taskfile *tf) in ata_scsi_set_sense_information() argument
375 information = ata_tf_read_block(tf, dev); in ata_scsi_set_sense_information()
379 scsi_set_sense_information(cmd->sense_buffer, in ata_scsi_set_sense_information()
388 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_field()
397 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_parameter()
407 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) in ata_scsi_em_message_store()
408 return ap->ops->em_store(ap, buf, count); in ata_scsi_em_message_store()
409 return -EINVAL; in ata_scsi_em_message_store()
419 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) in ata_scsi_em_message_show()
420 return ap->ops->em_show(ap, buf); in ata_scsi_em_message_show()
421 return -EINVAL; in ata_scsi_em_message_show()
434 return snprintf(buf, 23, "%d\n", ap->em_message_type); in ata_scsi_em_message_type_show()
445 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_activity_show()
448 if (atadev && ap->ops->sw_activity_show && in ata_scsi_activity_show()
449 (ap->flags & ATA_FLAG_SW_ACTIVITY)) in ata_scsi_activity_show()
450 return ap->ops->sw_activity_show(atadev, buf); in ata_scsi_activity_show()
451 return -EINVAL; in ata_scsi_activity_show()
459 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_activity_store()
464 if (atadev && ap->ops->sw_activity_store && in ata_scsi_activity_store()
465 (ap->flags & ATA_FLAG_SW_ACTIVITY)) { in ata_scsi_activity_store()
469 rc = ap->ops->sw_activity_store(atadev, val); in ata_scsi_activity_store()
476 return -EINVAL; in ata_scsi_activity_store()
490 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
497 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
519 * ata_scsi_unlock_native_capacity - unlock native capacity
522 * This function is called if a partition on @sdev extends beyond
530 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_unlock_native_capacity()
534 spin_lock_irqsave(ap->lock, flags); in ata_scsi_unlock_native_capacity()
537 if (dev && dev->n_sectors < dev->n_native_sectors) { in ata_scsi_unlock_native_capacity()
538 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_scsi_unlock_native_capacity()
539 dev->link->eh_info.action |= ATA_EH_RESET; in ata_scsi_unlock_native_capacity()
543 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_unlock_native_capacity()
548 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
567 return -ENOMSG; in ata_get_identity()
569 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) in ata_get_identity()
570 return -EFAULT; in ata_get_identity()
572 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); in ata_get_identity()
574 return -EFAULT; in ata_get_identity()
576 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); in ata_get_identity()
578 return -EFAULT; in ata_get_identity()
580 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); in ata_get_identity()
582 return -EFAULT; in ata_get_identity()
588 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
610 return -EINVAL; in ata_cmd_ioctl()
613 return -EFAULT; in ata_cmd_ioctl()
622 rc = -ENOMEM; in ata_cmd_ioctl()
626 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ in ata_cmd_ioctl()
631 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_cmd_ioctl()
639 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ in ata_cmd_ioctl()
658 /* If we set cc then ATA pass-through will cause a in ata_cmd_ioctl()
666 /* Send userspace a few ATA registers (same as drivers/ide) */ in ata_cmd_ioctl()
673 rc = -EFAULT; in ata_cmd_ioctl()
679 rc = -EIO; in ata_cmd_ioctl()
685 rc = -EFAULT; in ata_cmd_ioctl()
692 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
712 return -EINVAL; in ata_task_ioctl()
715 return -EFAULT; in ata_task_ioctl()
720 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_task_ioctl()
739 /* If we set cc then ATA pass-through will cause a in ata_task_ioctl()
758 rc = -EFAULT; in ata_task_ioctl()
763 rc = -EIO; in ata_task_ioctl()
773 if (ap->flags & ATA_FLAG_PIO_DMA) in ata_ioc32()
775 if (ap->pflags & ATA_PFLAG_PIO32) in ata_ioc32()
784 int rc = -EINVAL; in ata_sas_scsi_ioctl()
789 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
791 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
797 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
798 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { in ata_sas_scsi_ioctl()
800 ap->pflags |= ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
802 ap->pflags &= ~ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
805 rc = -EINVAL; in ata_sas_scsi_ioctl()
807 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
815 return -EACCES; in ata_sas_scsi_ioctl()
820 return -EACCES; in ata_sas_scsi_ioctl()
824 rc = -ENOTTY; in ata_sas_scsi_ioctl()
834 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), in ata_scsi_ioctl()
840 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
844 * Obtain a reference to an unused ata_queued_cmd structure,
845 * which is the basic libata structure representing a single
848 * If a command was available, fill in the SCSI-specific
863 qc = ata_qc_new_init(dev, cmd->request->tag); in ata_scsi_qc_new()
865 qc->scsicmd = cmd; in ata_scsi_qc_new()
866 qc->scsidone = cmd->scsi_done; in ata_scsi_qc_new()
868 qc->sg = scsi_sglist(cmd); in ata_scsi_qc_new()
869 qc->n_elem = scsi_sg_count(cmd); in ata_scsi_qc_new()
871 if (cmd->request->rq_flags & RQF_QUIET) in ata_scsi_qc_new()
872 qc->flags |= ATA_QCFLAG_QUIET; in ata_scsi_qc_new()
874 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); in ata_scsi_qc_new()
875 cmd->scsi_done(cmd); in ata_scsi_qc_new()
883 struct scsi_cmnd *scmd = qc->scsicmd; in ata_qc_set_pc_nbytes()
885 qc->extrabytes = scmd->request->extra_len; in ata_qc_set_pc_nbytes()
886 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; in ata_qc_set_pc_nbytes()
890 * ata_dump_status - user friendly display of error info
892 * @tf: ptr to filled out taskfile
896 * make-believe layer.
901 static void ata_dump_status(unsigned id, struct ata_taskfile *tf) in ata_dump_status() argument
903 u8 stat = tf->command, err = tf->feature; in ata_dump_status()
936 * ata_to_sense_error - convert ATA error to SCSI error
945 * Converts an ATA error into a SCSI error. Fill out pointers to
980 /* TRK0 - Track 0 not found */ in ata_to_sense_error()
987 /* SRV/IDNF - ID not found */ in ata_to_sense_error()
990 /* MC - Media Changed */ in ata_to_sense_error()
993 /* ECC - Uncorrectable ECC error */ in ata_to_sense_error()
996 /* BBD - block marked bad */ in ata_to_sense_error()
1052 * We need a sensible error return here, which is tricky, and one in ata_to_sense_error()
1053 * that won't cause people to do things like return a disk wrongly. in ata_to_sense_error()
1067 * ata_gen_passthru_sense - Generate check condition sense block.
1072 * of whether the command errored or not, return a sense
1077 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
1085 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_passthru_sense()
1086 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_passthru_sense() local
1087 unsigned char *sb = cmd->sense_buffer; in ata_gen_passthru_sense()
1089 int verbose = qc->ap->ops->error_handler == NULL; in ata_gen_passthru_sense()
1094 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; in ata_gen_passthru_sense()
1100 if (qc->err_mask || in ata_gen_passthru_sense()
1101 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_passthru_sense()
1102 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, in ata_gen_passthru_sense()
1104 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); in ata_gen_passthru_sense()
1107 * ATA PASS-THROUGH INFORMATION AVAILABLE in ata_gen_passthru_sense()
1110 scsi_build_sense_buffer(1, cmd->sense_buffer, in ata_gen_passthru_sense()
1114 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { in ata_gen_passthru_sense()
1132 desc[3] = tf->feature; /* == error reg */ in ata_gen_passthru_sense()
1133 desc[5] = tf->nsect; in ata_gen_passthru_sense()
1134 desc[7] = tf->lbal; in ata_gen_passthru_sense()
1135 desc[9] = tf->lbam; in ata_gen_passthru_sense()
1136 desc[11] = tf->lbah; in ata_gen_passthru_sense()
1137 desc[12] = tf->device; in ata_gen_passthru_sense()
1138 desc[13] = tf->command; /* == status reg */ in ata_gen_passthru_sense()
1144 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
1146 desc[4] = tf->hob_nsect; in ata_gen_passthru_sense()
1147 desc[6] = tf->hob_lbal; in ata_gen_passthru_sense()
1148 desc[8] = tf->hob_lbam; in ata_gen_passthru_sense()
1149 desc[10] = tf->hob_lbah; in ata_gen_passthru_sense()
1153 desc[0] = tf->feature; in ata_gen_passthru_sense()
1154 desc[1] = tf->command; /* status */ in ata_gen_passthru_sense()
1155 desc[2] = tf->device; in ata_gen_passthru_sense()
1156 desc[3] = tf->nsect; in ata_gen_passthru_sense()
1158 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
1160 if (tf->hob_nsect) in ata_gen_passthru_sense()
1162 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) in ata_gen_passthru_sense()
1165 desc[9] = tf->lbal; in ata_gen_passthru_sense()
1166 desc[10] = tf->lbam; in ata_gen_passthru_sense()
1167 desc[11] = tf->lbah; in ata_gen_passthru_sense()
1172 * ata_gen_ata_sense - generate a SCSI fixed sense block
1175 * Generate sense block for a failed ATA command @qc. Descriptor
1183 struct ata_device *dev = qc->dev; in ata_gen_ata_sense()
1184 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_ata_sense()
1185 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_ata_sense() local
1186 unsigned char *sb = cmd->sense_buffer; in ata_gen_ata_sense()
1187 int verbose = qc->ap->ops->error_handler == NULL; in ata_gen_ata_sense()
1193 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; in ata_gen_ata_sense()
1204 if (qc->err_mask || in ata_gen_ata_sense()
1205 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_ata_sense()
1206 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, in ata_gen_ata_sense()
1212 tf->command, qc->err_mask); in ata_gen_ata_sense()
1217 block = ata_tf_read_block(&qc->result_tf, dev); in ata_gen_ata_sense()
1226 sdev->use_10_for_rw = 1; in ata_scsi_sdev_config()
1227 sdev->use_10_for_ms = 1; in ata_scsi_sdev_config()
1228 sdev->no_write_same = 1; in ata_scsi_sdev_config()
1230 /* Schedule policy is determined by ->qc_defer() callback and in ata_scsi_sdev_config()
1235 sdev->max_device_blocked = 1; in ata_scsi_sdev_config()
1239 * atapi_drain_needed - Check whether data transfer may overflow
1261 return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC; in atapi_drain_needed()
1267 struct request_queue *q = sdev->request_queue; in ata_scsi_dev_config()
1269 if (!ata_id_has_unload(dev->id)) in ata_scsi_dev_config()
1270 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_dev_config()
1273 blk_queue_max_hw_sectors(q, dev->max_sectors); in ata_scsi_dev_config()
1275 if (dev->class == ATA_DEV_ATAPI) { in ata_scsi_dev_config()
1278 sdev->sector_size = ATA_SECT_SIZE; in ata_scsi_dev_config()
1281 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); in ata_scsi_dev_config()
1284 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); in ata_scsi_dev_config()
1287 return -ENOMEM; in ata_scsi_dev_config()
1292 sdev->sector_size = ata_id_logical_sector_size(dev->id); in ata_scsi_dev_config()
1293 sdev->manage_start_stop = 1; in ata_scsi_dev_config()
1303 if (sdev->sector_size > PAGE_SIZE) in ata_scsi_dev_config()
1306 sdev->sector_size); in ata_scsi_dev_config()
1308 blk_queue_update_dma_alignment(q, sdev->sector_size - 1); in ata_scsi_dev_config()
1310 if (dev->flags & ATA_DFLAG_AN) in ata_scsi_dev_config()
1311 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); in ata_scsi_dev_config()
1313 if (dev->flags & ATA_DFLAG_NCQ) { in ata_scsi_dev_config()
1316 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); in ata_scsi_dev_config()
1323 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsi_dev_config()
1324 sdev->security_supported = 1; in ata_scsi_dev_config()
1326 dev->sdev = sdev; in ata_scsi_dev_config()
1331 * ata_scsi_slave_config - Set SCSI device attributes
1336 * SCSI mid-layer behaviors.
1344 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_config()
1357 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
1362 * dev->sdev, this function doesn't have to do anything.
1363 * Otherwise, SCSI layer initiated warm-unplug is in progress.
1364 * Clear dev->sdev, schedule the device for ATA detach and invoke
1372 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_destroy()
1373 struct request_queue *q = sdev->request_queue; in ata_scsi_slave_destroy()
1377 if (!ap->ops->error_handler) in ata_scsi_slave_destroy()
1380 spin_lock_irqsave(ap->lock, flags); in ata_scsi_slave_destroy()
1382 if (dev && dev->sdev) { in ata_scsi_slave_destroy()
1384 dev->sdev = NULL; in ata_scsi_slave_destroy()
1385 dev->flags |= ATA_DFLAG_DETACH; in ata_scsi_slave_destroy()
1388 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_slave_destroy()
1390 kfree(q->dma_drain_buffer); in ata_scsi_slave_destroy()
1391 q->dma_drain_buffer = NULL; in ata_scsi_slave_destroy()
1392 q->dma_drain_size = 0; in ata_scsi_slave_destroy()
1396 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1401 * libsas and libata have different approaches for associating a sdev to
1411 if (queue_depth < 1 || queue_depth == sdev->queue_depth) in __ata_change_queue_depth()
1412 return sdev->queue_depth; in __ata_change_queue_depth()
1416 return sdev->queue_depth; in __ata_change_queue_depth()
1419 spin_lock_irqsave(ap->lock, flags); in __ata_change_queue_depth()
1420 dev->flags &= ~ATA_DFLAG_NCQ_OFF; in __ata_change_queue_depth()
1422 dev->flags |= ATA_DFLAG_NCQ_OFF; in __ata_change_queue_depth()
1425 spin_unlock_irqrestore(ap->lock, flags); in __ata_change_queue_depth()
1428 queue_depth = min(queue_depth, sdev->host->can_queue); in __ata_change_queue_depth()
1429 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); in __ata_change_queue_depth()
1432 if (sdev->queue_depth == queue_depth) in __ata_change_queue_depth()
1433 return -EINVAL; in __ata_change_queue_depth()
1439 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
1443 * This is libata standard hostt->change_queue_depth callback.
1455 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_change_queue_depth()
1461 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1473 * Zero on success, non-zero on error.
1477 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_start_stop_xlat()
1478 struct ata_taskfile *tf = &qc->tf; in ata_scsi_start_stop_xlat() local
1479 const u8 *cdb = scmd->cmnd; in ata_scsi_start_stop_xlat()
1483 if (scmd->cmd_len < 5) { in ata_scsi_start_stop_xlat()
1488 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_scsi_start_stop_xlat()
1489 tf->protocol = ATA_PROT_NODATA; in ata_scsi_start_stop_xlat()
1491 ; /* ignore IMMED bit, violates sat-r05 */ in ata_scsi_start_stop_xlat()
1505 tf->nsect = 1; /* 1 sector, lba=0 */ in ata_scsi_start_stop_xlat()
1507 if (qc->dev->flags & ATA_DFLAG_LBA) { in ata_scsi_start_stop_xlat()
1508 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_start_stop_xlat()
1510 tf->lbah = 0x0; in ata_scsi_start_stop_xlat()
1511 tf->lbam = 0x0; in ata_scsi_start_stop_xlat()
1512 tf->lbal = 0x0; in ata_scsi_start_stop_xlat()
1513 tf->device |= ATA_LBA; in ata_scsi_start_stop_xlat()
1516 tf->lbal = 0x1; /* sect */ in ata_scsi_start_stop_xlat()
1517 tf->lbam = 0x0; /* cyl low */ in ata_scsi_start_stop_xlat()
1518 tf->lbah = 0x0; /* cyl high */ in ata_scsi_start_stop_xlat()
1521 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ in ata_scsi_start_stop_xlat()
1526 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && in ata_scsi_start_stop_xlat()
1530 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && in ata_scsi_start_stop_xlat()
1535 tf->command = ATA_CMD_STANDBYNOW1; in ata_scsi_start_stop_xlat()
1548 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_start_stop_xlat()
1551 scmd->result = SAM_STAT_GOOD; in ata_scsi_start_stop_xlat()
1557 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1567 * Zero on success, non-zero on error.
1571 struct ata_taskfile *tf = &qc->tf; in ata_scsi_flush_xlat() local
1573 tf->flags |= ATA_TFLAG_DEVICE; in ata_scsi_flush_xlat()
1574 tf->protocol = ATA_PROT_NODATA; in ata_scsi_flush_xlat()
1576 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) in ata_scsi_flush_xlat()
1577 tf->command = ATA_CMD_FLUSH_EXT; in ata_scsi_flush_xlat()
1579 tf->command = ATA_CMD_FLUSH; in ata_scsi_flush_xlat()
1582 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_flush_xlat()
1588 * scsi_6_lba_len - Get LBA and transfer length
1591 * Calculate LBA and transfer length for 6-byte commands.
1602 VPRINTK("six-byte command\n"); in scsi_6_lba_len()
1615 * scsi_10_lba_len - Get LBA and transfer length
1618 * Calculate LBA and transfer length for 10-byte commands.
1629 VPRINTK("ten-byte command\n"); in scsi_10_lba_len()
1644 * scsi_16_lba_len - Get LBA and transfer length
1647 * Calculate LBA and transfer length for 16-byte commands.
1658 VPRINTK("sixteen-byte command\n"); in scsi_16_lba_len()
1679 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1688 * Zero on success, non-zero on error.
1692 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_verify_xlat()
1693 struct ata_taskfile *tf = &qc->tf; in ata_scsi_verify_xlat() local
1694 struct ata_device *dev = qc->dev; in ata_scsi_verify_xlat()
1695 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat()
1696 const u8 *cdb = scmd->cmnd; in ata_scsi_verify_xlat()
1701 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_verify_xlat()
1702 tf->protocol = ATA_PROT_NODATA; in ata_scsi_verify_xlat()
1705 if (scmd->cmd_len < 10) { in ata_scsi_verify_xlat()
1711 if (scmd->cmd_len < 16) { in ata_scsi_verify_xlat()
1728 if (dev->flags & ATA_DFLAG_LBA) { in ata_scsi_verify_xlat()
1729 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_verify_xlat()
1733 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1734 tf->device |= (block >> 24) & 0xf; in ata_scsi_verify_xlat()
1736 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_scsi_verify_xlat()
1740 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_verify_xlat()
1741 tf->command = ATA_CMD_VERIFY_EXT; in ata_scsi_verify_xlat()
1743 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_scsi_verify_xlat()
1745 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_verify_xlat()
1746 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_verify_xlat()
1747 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_verify_xlat()
1752 tf->nsect = n_block & 0xff; in ata_scsi_verify_xlat()
1754 tf->lbah = (block >> 16) & 0xff; in ata_scsi_verify_xlat()
1755 tf->lbam = (block >> 8) & 0xff; in ata_scsi_verify_xlat()
1756 tf->lbal = block & 0xff; in ata_scsi_verify_xlat()
1758 tf->device |= ATA_LBA; in ata_scsi_verify_xlat()
1767 track = (u32)block / dev->sectors; in ata_scsi_verify_xlat()
1768 cyl = track / dev->heads; in ata_scsi_verify_xlat()
1769 head = track % dev->heads; in ata_scsi_verify_xlat()
1770 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat()
1776 Cylinder: 0-65535 in ata_scsi_verify_xlat()
1777 Head: 0-15 in ata_scsi_verify_xlat()
1778 Sector: 1-255*/ in ata_scsi_verify_xlat()
1782 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1783 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_scsi_verify_xlat()
1784 tf->lbal = sect; in ata_scsi_verify_xlat()
1785 tf->lbam = cyl; in ata_scsi_verify_xlat()
1786 tf->lbah = cyl >> 8; in ata_scsi_verify_xlat()
1787 tf->device |= head; in ata_scsi_verify_xlat()
1793 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_verify_xlat()
1797 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_verify_xlat()
1802 scmd->result = SAM_STAT_GOOD; in ata_scsi_verify_xlat()
1808 struct request *rq = scmd->request; in ata_check_nblocks()
1814 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
1822 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1837 * Zero on success, non-zero on error.
1841 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_rw_xlat()
1842 const u8 *cdb = scmd->cmnd; in ata_scsi_rw_xlat()
1843 struct request *rq = scmd->request; in ata_scsi_rw_xlat()
1858 if (unlikely(scmd->cmd_len < 10)) { in ata_scsi_rw_xlat()
1870 if (unlikely(scmd->cmd_len < 6)) { in ata_scsi_rw_xlat()
1876 /* for 6-byte r/w commands, transfer length 0 in ata_scsi_rw_xlat()
1886 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_rw_xlat()
1897 DPRINTK("no-byte command\n"); in ata_scsi_rw_xlat()
1904 /* For 10-byte and 16-byte SCSI R/W commands, transfer in ata_scsi_rw_xlat()
1913 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_rw_xlat()
1914 qc->nbytes = n_block * scmd->device->sector_size; in ata_scsi_rw_xlat()
1916 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, in ata_scsi_rw_xlat()
1917 qc->hw_tag, class); in ata_scsi_rw_xlat()
1922 if (rc == -ERANGE) in ata_scsi_rw_xlat()
1924 /* treat all other errors as -EINVAL, fall through */ in ata_scsi_rw_xlat()
1926 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_rw_xlat()
1930 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_rw_xlat()
1935 scmd->result = SAM_STAT_GOOD; in ata_scsi_rw_xlat()
1941 struct scsi_cmnd *cmd = qc->scsicmd; in ata_qc_done()
1942 void (*done)(struct scsi_cmnd *) = qc->scsidone; in ata_qc_done()
1950 struct ata_port *ap = qc->ap; in ata_scsi_qc_complete()
1951 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_qc_complete()
1952 u8 *cdb = cmd->cmnd; in ata_scsi_qc_complete()
1953 int need_sense = (qc->err_mask != 0); in ata_scsi_qc_complete()
1955 /* For ATA pass thru (SAT) commands, generate a sense block if in ata_scsi_qc_complete()
1957 * generate because the user forced us to [CK_COND =1], a check in ata_scsi_qc_complete()
1962 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE in ata_scsi_qc_complete()
1967 else if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_scsi_qc_complete()
1968 cmd->result = SAM_STAT_CHECK_CONDITION; in ata_scsi_qc_complete()
1972 cmd->result = SAM_STAT_GOOD; in ata_scsi_qc_complete()
1974 if (need_sense && !ap->ops->error_handler) in ata_scsi_qc_complete()
1975 ata_dump_status(ap->print_id, &qc->result_tf); in ata_scsi_qc_complete()
1981 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1986 * Our ->queuecommand() function has decided that the SCSI
1995 * then cmd->result (and possibly cmd->sense_buffer) are assumed
2009 struct ata_port *ap = dev->link->ap; in ata_scsi_translate()
2019 /* data is present; dma-map it */ in ata_scsi_translate()
2020 if (cmd->sc_data_direction == DMA_FROM_DEVICE || in ata_scsi_translate()
2021 cmd->sc_data_direction == DMA_TO_DEVICE) { in ata_scsi_translate()
2029 qc->dma_dir = cmd->sc_data_direction; in ata_scsi_translate()
2032 qc->complete_fn = ata_scsi_qc_complete; in ata_scsi_translate()
2037 if (ap->ops->qc_defer) { in ata_scsi_translate()
2038 if ((rc = ap->ops->qc_defer(qc))) in ata_scsi_translate()
2050 cmd->scsi_done(cmd); in ata_scsi_translate()
2051 DPRINTK("EXIT - early finish (good or error)\n"); in ata_scsi_translate()
2056 cmd->result = (DID_ERROR << 16); in ata_scsi_translate()
2057 cmd->scsi_done(cmd); in ata_scsi_translate()
2059 DPRINTK("EXIT - internal\n"); in ata_scsi_translate()
2064 DPRINTK("EXIT - defer\n"); in ata_scsi_translate()
2078 * ata_scsi_rbuf_get - Map response buffer.
2104 * ata_scsi_rbuf_put - Unmap response buffer.
2125 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
2129 * Takes care of the hard work of simulating a SCSI command...
2133 * completed successfully (0), or not (in which case cmd->result
2144 struct scsi_cmnd *cmd = args->cmd; in ata_scsi_rbuf_fill()
2152 cmd->result = SAM_STAT_GOOD; in ata_scsi_rbuf_fill()
2156 * ata_scsiop_inq_std - Simulate INQUIRY command
2161 * with non-VPD INQUIRY command output.
2170 0x60, /* SAM-3 (no version claimed) */ in ata_scsiop_inq_std()
2173 0x20, /* SBC-2 (no version claimed) */ in ata_scsiop_inq_std()
2176 0x00 /* SPC-3 (no version claimed) */ in ata_scsiop_inq_std()
2180 0xA0, /* SAM-5 (no version claimed) */ in ata_scsiop_inq_std()
2183 0x00, /* SBC-4 (no version claimed) */ in ata_scsiop_inq_std()
2186 0xC0, /* SPC-5 (no version claimed) */ in ata_scsiop_inq_std()
2195 0x5, /* claim SPC-3 version compatibility */ in ata_scsiop_inq_std()
2197 95 - 4, in ata_scsiop_inq_std()
2206 * AHCI port says it's external (Hotplug-capable, eSATA). in ata_scsiop_inq_std()
2208 if (ata_id_removable(args->id) || in ata_scsiop_inq_std()
2209 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) in ata_scsiop_inq_std()
2212 if (args->dev->class == ATA_DEV_ZAC) { in ata_scsiop_inq_std()
2214 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ in ata_scsiop_inq_std()
2219 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); in ata_scsiop_inq_std()
2222 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); in ata_scsiop_inq_std()
2224 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); in ata_scsiop_inq_std()
2227 memcpy(&rbuf[32], "n/a ", 4); in ata_scsiop_inq_std()
2229 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) in ata_scsiop_inq_std()
2238 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
2262 if (!(args->dev->flags & ATA_DFLAG_ZAC)) in ata_scsiop_inq_00()
2263 num_pages--; in ata_scsiop_inq_00()
2270 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
2289 ata_id_string(args->id, (unsigned char *) &rbuf[4], in ata_scsiop_inq_80()
2295 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
2300 * - vendor specific ASCII containing the ATA serial number
2301 * - SAT defined "t10 vendor id based" containing ASCII vendor
2319 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2331 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, in ata_scsiop_inq_83()
2334 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, in ata_scsiop_inq_83()
2338 if (ata_id_has_wwn(args->id)) { in ata_scsiop_inq_83()
2345 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2349 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ in ata_scsiop_inq_83()
2354 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2358 * Yields SAT-specified ATA VPD page.
2365 struct ata_taskfile tf; in ata_scsiop_inq_89() local
2367 memset(&tf, 0, sizeof(tf)); in ata_scsiop_inq_89()
2379 tf.command = ATA_DRDY; /* really, this is Status reg */ in ata_scsiop_inq_89()
2380 tf.lbal = 0x1; in ata_scsiop_inq_89()
2381 tf.nsect = 0x1; in ata_scsiop_inq_89()
2383 ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */ in ata_scsiop_inq_89()
2388 memcpy(&rbuf[60], &args->id[0], 512); in ata_scsiop_inq_89()
2394 struct ata_device *dev = args->dev; in ata_scsiop_inq_b0()
2403 * This is always one physical block, but for disks with a smaller in ata_scsiop_inq_b0()
2407 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); in ata_scsiop_inq_b0()
2413 * The ATA spec doesn't even know about a granularity or alignment in ata_scsiop_inq_b0()
2415 * VPD page entries, but we have specifify a granularity to signal in ata_scsiop_inq_b0()
2416 * that we support some form of unmap - in thise case via WRITE SAME in ata_scsiop_inq_b0()
2419 if (ata_id_has_trim(args->id)) { in ata_scsiop_inq_b0()
2422 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) in ata_scsiop_inq_b0()
2423 max_blocks = 128 << (20 - SECTOR_SHIFT); in ata_scsiop_inq_b0()
2434 int form_factor = ata_id_form_factor(args->id); in ata_scsiop_inq_b1()
2435 int media_rotation_rate = ata_id_rotation_rate(args->id); in ata_scsiop_inq_b1()
2436 u8 zoned = ata_id_zoned_cap(args->id); in ata_scsiop_inq_b1()
2451 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ in ata_scsiop_inq_b2()
2462 * zbc-r05 SCSI Zoned Block device characteristics VPD page in ata_scsiop_inq_b6()
2468 * URSWRZ bit is only meaningful for host-managed ZAC drives in ata_scsiop_inq_b6()
2470 if (args->dev->zac_zoned_cap & 1) in ata_scsiop_inq_b6()
2472 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); in ata_scsiop_inq_b6()
2473 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); in ata_scsiop_inq_b6()
2474 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); in ata_scsiop_inq_b6()
2480 * modecpy - Prepare response for MODE SENSE
2486 * Generate a generic MODE SENSE page for either current or changeable
2496 memset(dest + 2, 0, n - 2); in modecpy()
2503 * ata_msense_caching - Simulate MODE SENSE caching info page
2508 * Generate a caching info page, which conditionally indicates
2528 * ata_msense_control - Simulate MODE SENSE control mode page
2533 * Generate a generic MODE SENSE control mode page.
2545 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_msense_control()
2553 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2557 * Generate a generic MODE SENSE r/w error recovery page.
2570 * We can turn this into a real blacklist if it's needed, for now just
2594 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2607 struct ata_device *dev = args->dev; in ata_scsiop_mode_sense()
2608 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; in ata_scsiop_mode_sense()
2663 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2667 p += ata_msense_control(args->dev, p, page_control == 1); in ata_scsiop_mode_sense()
2672 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2673 p += ata_msense_control(args->dev, p, page_control == 1); in ata_scsiop_mode_sense()
2682 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && in ata_scsiop_mode_sense()
2683 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) in ata_scsiop_mode_sense()
2687 rbuf[0] = p - rbuf - 1; in ata_scsiop_mode_sense()
2694 unsigned int output_len = p - rbuf - 2; in ata_scsiop_mode_sense()
2707 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); in ata_scsiop_mode_sense()
2711 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); in ata_scsiop_mode_sense()
2717 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2728 struct ata_device *dev = args->dev; in ata_scsiop_read_cap()
2729 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ in ata_scsiop_read_cap()
2734 sector_size = ata_id_logical_sector_size(dev->id); in ata_scsiop_read_cap()
2735 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_read_cap()
2736 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); in ata_scsiop_read_cap()
2740 if (args->cmd->cmnd[0] == READ_CAPACITY) { in ata_scsiop_read_cap()
2744 /* sector count, 32-bit */ in ata_scsiop_read_cap()
2756 /* sector count, 64-bit */ in ata_scsiop_read_cap()
2777 if (ata_id_has_trim(args->id) && in ata_scsiop_read_cap()
2778 !(dev->horkage & ATA_HORKAGE_NOTRIM)) { in ata_scsiop_read_cap()
2781 if (ata_id_has_zero_after_trim(args->id) && in ata_scsiop_read_cap()
2782 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { in ata_scsiop_read_cap()
2787 if (ata_id_zoned_cap(args->id) || in ata_scsiop_read_cap()
2788 args->dev->class == ATA_DEV_ZAC) in ata_scsiop_read_cap()
2795 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2814 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { in atapi_sense_complete()
2817 * a sense descriptors, since that's only in atapi_sense_complete()
2829 return (ap->flags & ATA_FLAG_PIO_DMA); in ata_pio_use_silly()
2834 struct ata_port *ap = qc->ap; in atapi_request_sense()
2835 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_request_sense()
2839 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in atapi_request_sense()
2842 if (ap->ops->sff_tf_read) in atapi_request_sense()
2843 ap->ops->sff_tf_read(ap, &qc->tf); in atapi_request_sense()
2846 /* fill these in, for the case where they are -not- overwritten */ in atapi_request_sense()
2847 cmd->sense_buffer[0] = 0x70; in atapi_request_sense()
2848 cmd->sense_buffer[2] = qc->tf.feature >> 4; in atapi_request_sense()
2853 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); in atapi_request_sense()
2854 ata_sg_init(qc, &qc->sgent, 1); in atapi_request_sense()
2855 qc->dma_dir = DMA_FROM_DEVICE; in atapi_request_sense()
2857 memset(&qc->cdb, 0, qc->dev->cdb_len); in atapi_request_sense()
2858 qc->cdb[0] = REQUEST_SENSE; in atapi_request_sense()
2859 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; in atapi_request_sense()
2861 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_request_sense()
2862 qc->tf.command = ATA_CMD_PACKET; in atapi_request_sense()
2865 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_request_sense()
2866 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_request_sense()
2868 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_request_sense()
2869 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; in atapi_request_sense()
2870 qc->tf.lbah = 0; in atapi_request_sense()
2872 qc->nbytes = SCSI_SENSE_BUFFERSIZE; in atapi_request_sense()
2874 qc->complete_fn = atapi_sense_complete; in atapi_request_sense()
2885 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2903 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_qc_complete()
2904 unsigned int err_mask = qc->err_mask; in atapi_qc_complete()
2909 if (unlikely(qc->ap->ops->error_handler && in atapi_qc_complete()
2910 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { in atapi_qc_complete()
2912 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2914 * translation of taskfile registers into a in atapi_qc_complete()
2921 /* SCSI EH automatically locks door if sdev->locked is in atapi_qc_complete()
2924 * creates a loop - SCSI EH issues door lock which in atapi_qc_complete()
2928 * If door lock fails, always clear sdev->locked to in atapi_qc_complete()
2932 * sure qc->dev->sdev isn't NULL before dereferencing. in atapi_qc_complete()
2934 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) in atapi_qc_complete()
2935 qc->dev->sdev->locked = 0; in atapi_qc_complete()
2937 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2944 cmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2950 * a sense descriptors, since that's only in atapi_qc_complete()
2955 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) in atapi_qc_complete()
2957 cmd->result = SAM_STAT_GOOD; in atapi_qc_complete()
2963 * atapi_xlat - Initialize PACKET taskfile
2970 * Zero on success, non-zero on failure.
2974 struct scsi_cmnd *scmd = qc->scsicmd; in atapi_xlat()
2975 struct ata_device *dev = qc->dev; in atapi_xlat()
2976 int nodata = (scmd->sc_data_direction == DMA_NONE); in atapi_xlat()
2977 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); in atapi_xlat()
2980 memset(qc->cdb, 0, dev->cdb_len); in atapi_xlat()
2981 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); in atapi_xlat()
2983 qc->complete_fn = atapi_qc_complete; in atapi_xlat()
2985 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_xlat()
2986 if (scmd->sc_data_direction == DMA_TO_DEVICE) { in atapi_xlat()
2987 qc->tf.flags |= ATA_TFLAG_WRITE; in atapi_xlat()
2991 qc->tf.command = ATA_CMD_PACKET; in atapi_xlat()
3032 qc->tf.lbam = (nbytes & 0xFF); in atapi_xlat()
3033 qc->tf.lbah = (nbytes >> 8); in atapi_xlat()
3036 qc->tf.protocol = ATAPI_PROT_NODATA; in atapi_xlat()
3038 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_xlat()
3041 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_xlat()
3042 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_xlat()
3044 if ((dev->flags & ATA_DFLAG_DMADIR) && in atapi_xlat()
3045 (scmd->sc_data_direction != DMA_TO_DEVICE)) in atapi_xlat()
3047 qc->tf.feature |= ATAPI_DMADIR; in atapi_xlat()
3051 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE in atapi_xlat()
3060 devno < ata_link_max_devices(&ap->link))) in ata_find_dev()
3061 return &ap->link.device[devno]; in ata_find_dev()
3064 devno < ap->nr_pmp_links)) in ata_find_dev()
3065 return &ap->pmp_link[devno].device[0]; in ata_find_dev()
3078 if (unlikely(scsidev->channel || scsidev->lun)) in __ata_scsi_find_dev()
3080 devno = scsidev->id; in __ata_scsi_find_dev()
3082 if (unlikely(scsidev->id || scsidev->lun)) in __ata_scsi_find_dev()
3084 devno = scsidev->channel; in __ata_scsi_find_dev()
3091 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
3118 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
3119 * @byte1: Byte 1 from pass-thru CDB.
3128 case 3: /* Non-data */ in ata_scsi_map_proto()
3132 case 10: /* UDMA Data-in */ in ata_scsi_map_proto()
3133 case 11: /* UDMA Data-Out */ in ata_scsi_map_proto()
3136 case 4: /* PIO Data-in */ in ata_scsi_map_proto()
3137 case 5: /* PIO Data-out */ in ata_scsi_map_proto()
3157 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
3160 * Handles either 12, 16, or 32-byte versions of the CDB.
3163 * Zero on success, non-zero on failure.
3167 struct ata_taskfile *tf = &(qc->tf); in ata_scsi_pass_thru() local
3168 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_pass_thru()
3169 struct ata_device *dev = qc->dev; in ata_scsi_pass_thru()
3170 const u8 *cdb = scmd->cmnd; in ata_scsi_pass_thru()
3174 /* 7Fh variable length cmd means a ata pass-thru(32) */ in ata_scsi_pass_thru()
3178 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); in ata_scsi_pass_thru()
3179 if (tf->protocol == ATA_PROT_UNKNOWN) { in ata_scsi_pass_thru()
3184 if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0) in ata_scsi_pass_thru()
3185 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_pass_thru()
3188 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_pass_thru()
3196 * 16-byte CDB - may contain extended commands. in ata_scsi_pass_thru()
3201 tf->hob_feature = cdb[3]; in ata_scsi_pass_thru()
3202 tf->hob_nsect = cdb[5]; in ata_scsi_pass_thru()
3203 tf->hob_lbal = cdb[7]; in ata_scsi_pass_thru()
3204 tf->hob_lbam = cdb[9]; in ata_scsi_pass_thru()
3205 tf->hob_lbah = cdb[11]; in ata_scsi_pass_thru()
3206 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3208 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3213 tf->feature = cdb[4]; in ata_scsi_pass_thru()
3214 tf->nsect = cdb[6]; in ata_scsi_pass_thru()
3215 tf->lbal = cdb[8]; in ata_scsi_pass_thru()
3216 tf->lbam = cdb[10]; in ata_scsi_pass_thru()
3217 tf->lbah = cdb[12]; in ata_scsi_pass_thru()
3218 tf->device = cdb[13]; in ata_scsi_pass_thru()
3219 tf->command = cdb[14]; in ata_scsi_pass_thru()
3222 * 12-byte CDB - incapable of extended commands. in ata_scsi_pass_thru()
3224 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3226 tf->feature = cdb[3]; in ata_scsi_pass_thru()
3227 tf->nsect = cdb[4]; in ata_scsi_pass_thru()
3228 tf->lbal = cdb[5]; in ata_scsi_pass_thru()
3229 tf->lbam = cdb[6]; in ata_scsi_pass_thru()
3230 tf->lbah = cdb[7]; in ata_scsi_pass_thru()
3231 tf->device = cdb[8]; in ata_scsi_pass_thru()
3232 tf->command = cdb[9]; in ata_scsi_pass_thru()
3235 * 32-byte CDB - may contain extended command fields. in ata_scsi_pass_thru()
3240 tf->hob_feature = cdb[20]; in ata_scsi_pass_thru()
3241 tf->hob_nsect = cdb[22]; in ata_scsi_pass_thru()
3242 tf->hob_lbal = cdb[16]; in ata_scsi_pass_thru()
3243 tf->hob_lbam = cdb[15]; in ata_scsi_pass_thru()
3244 tf->hob_lbah = cdb[14]; in ata_scsi_pass_thru()
3245 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3247 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3249 tf->feature = cdb[21]; in ata_scsi_pass_thru()
3250 tf->nsect = cdb[23]; in ata_scsi_pass_thru()
3251 tf->lbal = cdb[19]; in ata_scsi_pass_thru()
3252 tf->lbam = cdb[18]; in ata_scsi_pass_thru()
3253 tf->lbah = cdb[17]; in ata_scsi_pass_thru()
3254 tf->device = cdb[24]; in ata_scsi_pass_thru()
3255 tf->command = cdb[25]; in ata_scsi_pass_thru()
3256 tf->auxiliary = get_unaligned_be32(&cdb[28]); in ata_scsi_pass_thru()
3260 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
3261 tf->nsect = qc->hw_tag << 3; in ata_scsi_pass_thru()
3264 tf->device = dev->devno ? in ata_scsi_pass_thru()
3265 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; in ata_scsi_pass_thru()
3267 switch (tf->command) { in ata_scsi_pass_thru()
3268 /* READ/WRITE LONG use a non-standard sect_size */ in ata_scsi_pass_thru()
3273 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { in ata_scsi_pass_thru()
3277 qc->sect_size = scsi_bufflen(scmd); in ata_scsi_pass_thru()
3311 qc->sect_size = scmd->device->sector_size; in ata_scsi_pass_thru()
3316 qc->sect_size = ATA_SECT_SIZE; in ata_scsi_pass_thru()
3321 * write indication (used for PIO/DMA setup), result TF is in ata_scsi_pass_thru()
3324 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_pass_thru()
3325 if (scmd->sc_data_direction == DMA_TO_DEVICE) in ata_scsi_pass_thru()
3326 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_pass_thru()
3328 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; in ata_scsi_pass_thru()
3339 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) { in ata_scsi_pass_thru()
3345 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { in ata_scsi_pass_thru()
3351 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3356 if (is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3362 if (multi_count != dev->multi_count) in ata_scsi_pass_thru()
3368 * Filter SET_FEATURES - XFER MODE command -- otherwise, in ata_scsi_pass_thru()
3369 * SET_FEATURES - XFER MODE must be preceded/succeeded in ata_scsi_pass_thru()
3370 * by an update to hardware-specific registers for each in ata_scsi_pass_thru()
3371 * controller (i.e. the reason for ->set_piomode(), in ata_scsi_pass_thru()
3372 * ->set_dmamode(), and ->post_set_mode() hooks). in ata_scsi_pass_thru()
3374 if (tf->command == ATA_CMD_SET_FEATURES && in ata_scsi_pass_thru()
3375 tf->feature == SETFEATURES_XFER) { in ata_scsi_pass_thru()
3384 * have a real reason for wanting to use them. This ensures in ata_scsi_pass_thru()
3389 * Note that for ATA8 we can issue a DCS change and DCS freeze lock in ata_scsi_pass_thru()
3395 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { in ata_scsi_pass_thru()
3408 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3414 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3431 struct scsi_device *sdp = cmd->device; in ata_format_dsm_trim_descr()
3432 size_t len = sdp->sector_size; in ata_format_dsm_trim_descr()
3452 count -= 0xffff; in ata_format_dsm_trim_descr()
3462 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3465 * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
3469 * - When set translate to DSM TRIM
3470 * - When clear translate to SCT Write Same
3474 struct ata_taskfile *tf = &qc->tf; in ata_scsi_write_same_xlat() local
3475 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_write_same_xlat()
3476 struct scsi_device *sdp = scmd->device; in ata_scsi_write_same_xlat()
3477 size_t len = sdp->sector_size; in ata_scsi_write_same_xlat()
3478 struct ata_device *dev = qc->dev; in ata_scsi_write_same_xlat()
3479 const u8 *cdb = scmd->cmnd; in ata_scsi_write_same_xlat()
3489 if (unlikely(!dev->dma_mode)) in ata_scsi_write_same_xlat()
3497 if (unlikely(blk_rq_is_passthrough(scmd->request))) in ata_scsi_write_same_xlat()
3500 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_write_same_xlat()
3507 (dev->horkage & ATA_HORKAGE_NOTRIM) || in ata_scsi_write_same_xlat()
3508 !ata_id_has_trim(dev->id)) { in ata_scsi_write_same_xlat()
3520 * WRITE SAME always has a sector sized buffer as payload, this in ata_scsi_write_same_xlat()
3521 * should never be a multiple entry S/G list. in ata_scsi_write_same_xlat()
3528 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) in ata_scsi_write_same_xlat()
3538 tf->protocol = ATA_PROT_NCQ; in ata_scsi_write_same_xlat()
3539 tf->command = ATA_CMD_FPDMA_SEND; in ata_scsi_write_same_xlat()
3540 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; in ata_scsi_write_same_xlat()
3541 tf->nsect = qc->hw_tag << 3; in ata_scsi_write_same_xlat()
3542 tf->hob_feature = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3543 tf->feature = size / 512; in ata_scsi_write_same_xlat()
3545 tf->auxiliary = 1; in ata_scsi_write_same_xlat()
3547 tf->protocol = ATA_PROT_DMA; in ata_scsi_write_same_xlat()
3548 tf->hob_feature = 0; in ata_scsi_write_same_xlat()
3549 tf->feature = ATA_DSM_TRIM; in ata_scsi_write_same_xlat()
3550 tf->hob_nsect = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3551 tf->nsect = size / 512; in ata_scsi_write_same_xlat()
3552 tf->command = ATA_CMD_DSM; in ata_scsi_write_same_xlat()
3555 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | in ata_scsi_write_same_xlat()
3576 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3580 * Yields a subset to satisfy scsi_report_opcode()
3587 struct ata_device *dev = args->dev; in ata_scsiop_maint_in()
3588 u8 *cdb = args->cmd->cmnd; in ata_scsiop_maint_in()
3629 if (ata_id_zoned_cap(dev->id) || in ata_scsiop_maint_in()
3630 dev->class == ATA_DEV_ZAC) in ata_scsiop_maint_in()
3635 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsiop_maint_in()
3647 * ata_scsi_report_zones_complete - convert ATA output
3650 * Convert T-13 little-endian field representation into
3651 * T-10 big-endian field representation.
3652 * What a mess.
3656 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_report_zones_complete()
3719 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_in_xlat() local
3720 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_in_xlat()
3721 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_in_xlat()
3722 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat()
3727 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_in_xlat()
3728 ata_dev_warn(qc->dev, "invalid cdb length %d\n", in ata_scsi_zbc_in_xlat()
3729 scmd->cmd_len); in ata_scsi_zbc_in_xlat()
3735 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", in ata_scsi_zbc_in_xlat()
3741 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); in ata_scsi_zbc_in_xlat()
3747 * and uses a 16 bit value for the transfer count. in ata_scsi_zbc_in_xlat()
3750 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); in ata_scsi_zbc_in_xlat()
3756 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_in_xlat()
3757 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { in ata_scsi_zbc_in_xlat()
3758 tf->protocol = ATA_PROT_NCQ; in ata_scsi_zbc_in_xlat()
3759 tf->command = ATA_CMD_FPDMA_RECV; in ata_scsi_zbc_in_xlat()
3760 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; in ata_scsi_zbc_in_xlat()
3761 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_in_xlat()
3762 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat()
3763 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3764 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); in ata_scsi_zbc_in_xlat()
3766 tf->command = ATA_CMD_ZAC_MGMT_IN; in ata_scsi_zbc_in_xlat()
3767 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; in ata_scsi_zbc_in_xlat()
3768 tf->protocol = ATA_PROT_DMA; in ata_scsi_zbc_in_xlat()
3769 tf->hob_feature = options; in ata_scsi_zbc_in_xlat()
3770 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3771 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
3773 tf->device = ATA_LBA; in ata_scsi_zbc_in_xlat()
3774 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_in_xlat()
3775 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3776 tf->lbal = block & 0xff; in ata_scsi_zbc_in_xlat()
3777 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_in_xlat()
3778 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_in_xlat()
3779 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_in_xlat()
3781 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_in_xlat()
3782 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_scsi_zbc_in_xlat()
3786 qc->complete_fn = ata_scsi_report_zones_complete; in ata_scsi_zbc_in_xlat()
3791 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_zbc_in_xlat()
3796 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_in_xlat()
3802 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_out_xlat() local
3803 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_out_xlat()
3804 struct ata_device *dev = qc->dev; in ata_scsi_zbc_out_xlat()
3805 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_out_xlat()
3809 u16 fp = (u16)-1; in ata_scsi_zbc_out_xlat()
3811 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_out_xlat()
3837 } else if (block >= dev->n_sectors) { in ata_scsi_zbc_out_xlat()
3839 * Block must be a valid zone ID (a zone start LBA). in ata_scsi_zbc_out_xlat()
3845 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_out_xlat()
3846 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { in ata_scsi_zbc_out_xlat()
3847 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_zbc_out_xlat()
3848 tf->command = ATA_CMD_NCQ_NON_DATA; in ata_scsi_zbc_out_xlat()
3849 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3850 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_out_xlat()
3851 tf->auxiliary = sa | ((u16)all << 8); in ata_scsi_zbc_out_xlat()
3853 tf->protocol = ATA_PROT_NODATA; in ata_scsi_zbc_out_xlat()
3854 tf->command = ATA_CMD_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3855 tf->feature = sa; in ata_scsi_zbc_out_xlat()
3856 tf->hob_feature = all; in ata_scsi_zbc_out_xlat()
3858 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_out_xlat()
3859 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_out_xlat()
3860 tf->lbal = block & 0xff; in ata_scsi_zbc_out_xlat()
3861 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_out_xlat()
3862 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_out_xlat()
3863 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_out_xlat()
3864 tf->device = ATA_LBA; in ata_scsi_zbc_out_xlat()
3865 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_out_xlat()
3870 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_zbc_out_xlat()
3874 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_out_xlat()
3879 * ata_mselect_caching - Simulate MODE SELECT for caching info page
3885 * Prepare a taskfile to modify caching information for the device.
3893 struct ata_taskfile *tf = &qc->tf; in ata_mselect_caching() local
3894 struct ata_device *dev = qc->dev; in ata_mselect_caching()
3900 * The first two bytes of def_cache_mpage are a header, so offsets in ata_mselect_caching()
3904 if (len != CACHE_MPAGE_LEN - 2) { in ata_mselect_caching()
3905 if (len < CACHE_MPAGE_LEN - 2) in ata_mselect_caching()
3908 *fp = CACHE_MPAGE_LEN - 2; in ata_mselect_caching()
3909 return -EINVAL; in ata_mselect_caching()
3915 * Check that read-only bits are not modified. in ata_mselect_caching()
3917 ata_msense_caching(dev->id, mpage, false); in ata_mselect_caching()
3918 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { in ata_mselect_caching()
3923 return -EINVAL; in ata_mselect_caching()
3927 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_caching()
3928 tf->protocol = ATA_PROT_NODATA; in ata_mselect_caching()
3929 tf->nsect = 0; in ata_mselect_caching()
3930 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_caching()
3931 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; in ata_mselect_caching()
3936 * ata_mselect_control - Simulate MODE SELECT for control page
3942 * Prepare a taskfile to modify caching information for the device.
3950 struct ata_device *dev = qc->dev; in ata_mselect_control()
3956 * The first two bytes of def_control_mpage are a header, so offsets in ata_mselect_control()
3960 if (len != CONTROL_MPAGE_LEN - 2) { in ata_mselect_control()
3961 if (len < CONTROL_MPAGE_LEN - 2) in ata_mselect_control()
3964 *fp = CONTROL_MPAGE_LEN - 2; in ata_mselect_control()
3965 return -EINVAL; in ata_mselect_control()
3971 * Check that read-only bits are not modified. in ata_mselect_control()
3974 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { in ata_mselect_control()
3979 return -EINVAL; in ata_mselect_control()
3983 dev->flags |= ATA_DFLAG_D_SENSE; in ata_mselect_control()
3985 dev->flags &= ~ATA_DFLAG_D_SENSE; in ata_mselect_control()
3990 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3993 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
4002 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_mode_select_xlat()
4003 const u8 *cdb = scmd->cmnd; in ata_scsi_mode_select_xlat()
4007 u16 fp = (u16)-1; in ata_scsi_mode_select_xlat()
4016 if (scmd->cmd_len < 5) { in ata_scsi_mode_select_xlat()
4024 if (scmd->cmd_len < 9) { in ata_scsi_mode_select_xlat()
4041 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) in ata_scsi_mode_select_xlat()
4057 len -= hdr_len; in ata_scsi_mode_select_xlat()
4067 len -= bd_len; in ata_scsi_mode_select_xlat()
4081 len -= 4; in ata_scsi_mode_select_xlat()
4089 len -= 2; in ata_scsi_mode_select_xlat()
4126 * page at a time. in ata_scsi_mode_select_xlat()
4134 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_mode_select_xlat()
4138 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); in ata_scsi_mode_select_xlat()
4143 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_mode_select_xlat()
4147 scmd->result = SAM_STAT_GOOD; in ata_scsi_mode_select_xlat()
4163 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_security_inout_xlat()
4164 const u8 *cdb = scmd->cmnd; in ata_scsi_security_inout_xlat()
4165 struct ata_taskfile *tf = &qc->tf; in ata_scsi_security_inout_xlat() local
4170 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); in ata_scsi_security_inout_xlat()
4176 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); in ata_scsi_security_inout_xlat()
4182 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4187 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4191 /* convert to the sector-based ATA addressing */ in ata_scsi_security_inout_xlat()
4195 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; in ata_scsi_security_inout_xlat()
4196 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; in ata_scsi_security_inout_xlat()
4198 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_security_inout_xlat()
4199 tf->command = ata_scsi_trusted_op(len, send, dma); in ata_scsi_security_inout_xlat()
4200 tf->feature = secp; in ata_scsi_security_inout_xlat()
4201 tf->lbam = spsp & 0xff; in ata_scsi_security_inout_xlat()
4202 tf->lbah = spsp >> 8; in ata_scsi_security_inout_xlat()
4205 tf->nsect = len & 0xff; in ata_scsi_security_inout_xlat()
4206 tf->lbal = len >> 8; in ata_scsi_security_inout_xlat()
4209 tf->lbah = (1 << 7); in ata_scsi_security_inout_xlat()
4217 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4220 * Translate a SCSI variable length CDB to specified commands.
4221 * It checks a service action value in CDB to call corresponding handler.
4224 * Zero on success, non-zero on failure
4229 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_var_len_cdb_xlat()
4230 const u8 *cdb = scmd->cmnd; in ata_scsi_var_len_cdb_xlat()
4234 * if service action represents a ata pass-thru(32) command, in ata_scsi_var_len_cdb_xlat()
4245 * ata_get_xlat_func - check if SCSI to ATA translation is possible
4300 if (!(dev->flags & ATA_DFLAG_TRUSTED)) in ata_get_xlat_func()
4312 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
4316 * Prints the contents of a SCSI command via printk().
4323 struct scsi_device *scsidev = cmd->device; in ata_scsi_dump_cdb()
4326 ap->print_id, in ata_scsi_dump_cdb()
4327 scsidev->channel, scsidev->id, scsidev->lun, in ata_scsi_dump_cdb()
4328 cmd->cmnd); in ata_scsi_dump_cdb()
4335 u8 scsi_op = scmd->cmnd[0]; in __ata_scsi_queuecmd()
4339 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in __ata_scsi_queuecmd()
4340 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) in __ata_scsi_queuecmd()
4345 if (unlikely(!scmd->cmd_len)) in __ata_scsi_queuecmd()
4352 if (unlikely(len > scmd->cmd_len || in __ata_scsi_queuecmd()
4353 len > dev->cdb_len || in __ata_scsi_queuecmd()
4354 scmd->cmd_len > ATAPI_CDB_LEN)) in __ata_scsi_queuecmd()
4360 if (unlikely(scmd->cmd_len > 16)) in __ata_scsi_queuecmd()
4376 scmd->cmd_len, scsi_op, dev->cdb_len); in __ata_scsi_queuecmd()
4377 scmd->result = DID_ERROR << 16; in __ata_scsi_queuecmd()
4378 scmd->scsi_done(scmd); in __ata_scsi_queuecmd()
4383 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4389 * hardware. In other cases, this function simulates a
4405 struct scsi_device *scsidev = cmd->device; in ata_scsi_queuecmd()
4411 spin_lock_irqsave(ap->lock, irq_flags); in ata_scsi_queuecmd()
4419 cmd->result = (DID_BAD_TARGET << 16); in ata_scsi_queuecmd()
4420 cmd->scsi_done(cmd); in ata_scsi_queuecmd()
4423 spin_unlock_irqrestore(ap->lock, irq_flags); in ata_scsi_queuecmd()
4429 * ata_scsi_simulate - simulate SCSI command on ATA device
4433 * Interprets and directly executes a select list of SCSI commands
4443 const u8 *scsicmd = cmd->cmnd; in ata_scsi_simulate()
4447 args.id = dev->id; in ata_scsi_simulate()
4479 if (dev->flags & ATA_DFLAG_ZAC) { in ata_scsi_simulate()
4512 cmd->result = (DRIVER_SENSE << 24); in ata_scsi_simulate()
4516 * turning this into a no-op. in ata_scsi_simulate()
4521 /* no-op's, complete with success */ in ata_scsi_simulate()
4548 cmd->scsi_done(cmd); in ata_scsi_simulate()
4555 for (i = 0; i < host->n_ports; i++) { in ata_scsi_add_hosts()
4556 struct ata_port *ap = host->ports[i]; in ata_scsi_add_hosts()
4559 rc = -ENOMEM; in ata_scsi_add_hosts()
4564 shost->eh_noresume = 1; in ata_scsi_add_hosts()
4565 *(struct ata_port **)&shost->hostdata[0] = ap; in ata_scsi_add_hosts()
4566 ap->scsi_host = shost; in ata_scsi_add_hosts()
4568 shost->transportt = ata_scsi_transport_template; in ata_scsi_add_hosts()
4569 shost->unique_id = ap->print_id; in ata_scsi_add_hosts()
4570 shost->max_id = 16; in ata_scsi_add_hosts()
4571 shost->max_lun = 1; in ata_scsi_add_hosts()
4572 shost->max_channel = 1; in ata_scsi_add_hosts()
4573 shost->max_cmd_len = 32; in ata_scsi_add_hosts()
4575 /* Schedule policy is determined by ->qc_defer() in ata_scsi_add_hosts()
4580 shost->max_host_blocked = 1; in ata_scsi_add_hosts()
4582 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); in ata_scsi_add_hosts()
4590 while (--i >= 0) { in ata_scsi_add_hosts()
4591 struct Scsi_Host *shost = host->ports[i]->scsi_host; in ata_scsi_add_hosts()
4612 if (dev->sdev) in ata_scsi_scan_host()
4616 id = dev->devno; in ata_scsi_scan_host()
4618 channel = link->pmp; in ata_scsi_scan_host()
4620 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, in ata_scsi_scan_host()
4623 dev->sdev = sdev; in ata_scsi_scan_host()
4626 dev->sdev = NULL; in ata_scsi_scan_host()
4637 if (!dev->sdev) in ata_scsi_scan_host()
4657 * a few more chances. in ata_scsi_scan_host()
4659 if (--tries) { in ata_scsi_scan_host()
4668 queue_delayed_work(system_long_wq, &ap->hotplug_task, in ata_scsi_scan_host()
4673 * ata_scsi_offline_dev - offline attached SCSI device
4678 * function is called with host lock which protects dev->sdev
4689 if (dev->sdev) { in ata_scsi_offline_dev()
4690 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); in ata_scsi_offline_dev()
4697 * ata_scsi_remove_dev - remove attached SCSI device
4708 struct ata_port *ap = dev->link->ap; in ata_scsi_remove_dev()
4718 mutex_lock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4719 spin_lock_irqsave(ap->lock, flags); in ata_scsi_remove_dev()
4721 /* clearing dev->sdev is protected by host lock */ in ata_scsi_remove_dev()
4722 sdev = dev->sdev; in ata_scsi_remove_dev()
4723 dev->sdev = NULL; in ata_scsi_remove_dev()
4743 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_remove_dev()
4744 mutex_unlock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4748 dev_name(&sdev->sdev_gendev)); in ata_scsi_remove_dev()
4757 struct ata_port *ap = link->ap; in ata_scsi_handle_link_detach()
4763 if (!(dev->flags & ATA_DFLAG_DETACHED)) in ata_scsi_handle_link_detach()
4766 spin_lock_irqsave(ap->lock, flags); in ata_scsi_handle_link_detach()
4767 dev->flags &= ~ATA_DFLAG_DETACHED; in ata_scsi_handle_link_detach()
4768 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4778 * ata_scsi_media_change_notify - send media change event
4781 * Tell the block layer to send a media change notification
4789 if (dev->sdev) in ata_scsi_media_change_notify()
4790 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, in ata_scsi_media_change_notify()
4795 * ata_scsi_hotplug - SCSI part of hotplug
4798 * Perform SCSI part of hotplug. It's executed from a separate
4801 * synchronized with hot plugging with a mutex.
4812 if (ap->pflags & ATA_PFLAG_UNLOADING) { in ata_scsi_hotplug()
4813 DPRINTK("ENTER/EXIT - unloading\n"); in ata_scsi_hotplug()
4818 * XXX - UGLY HACK in ata_scsi_hotplug()
4821 * to freezable kthreads and workqueue and may deadlock if a block in ata_scsi_hotplug()
4827 * removal while freezer is active. This is a joke but does avoid in ata_scsi_hotplug()
4831 * http://marc.info/?l=linux-kernel&m=138695698516487 in ata_scsi_hotplug()
4839 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4845 ata_scsi_handle_link_detach(&ap->link); in ata_scsi_hotplug()
4846 if (ap->pmp_link) in ata_scsi_hotplug()
4848 ata_scsi_handle_link_detach(&ap->pmp_link[i]); in ata_scsi_hotplug()
4853 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4858 * ata_scsi_user_scan - indication for user-initiated bus scan
4880 if (!ap->ops->error_handler) in ata_scsi_user_scan()
4881 return -EOPNOTSUPP; in ata_scsi_user_scan()
4884 return -EINVAL; in ata_scsi_user_scan()
4888 return -EINVAL; in ata_scsi_user_scan()
4892 return -EINVAL; in ata_scsi_user_scan()
4896 spin_lock_irqsave(ap->lock, flags); in ata_scsi_user_scan()
4902 struct ata_eh_info *ehi = &link->eh_info; in ata_scsi_user_scan()
4903 ehi->probe_mask |= ATA_ALL_DEVICES; in ata_scsi_user_scan()
4904 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4910 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_scsi_user_scan()
4911 ehi->probe_mask |= 1 << dev->devno; in ata_scsi_user_scan()
4912 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4914 rc = -EINVAL; in ata_scsi_user_scan()
4919 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4922 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4928 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
4945 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4946 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4950 struct scsi_device *sdev = dev->sdev; in ata_scsi_dev_rescan()
4957 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4958 scsi_rescan_device(&(sdev->sdev_gendev)); in ata_scsi_dev_rescan()
4960 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4964 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4965 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4969 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
4971 * @port_info: Information from low-level host driver
4991 ap->port_no = 0; in ata_sas_port_alloc()
4992 ap->lock = &host->lock; in ata_sas_port_alloc()
4993 ap->pio_mask = port_info->pio_mask; in ata_sas_port_alloc()
4994 ap->mwdma_mask = port_info->mwdma_mask; in ata_sas_port_alloc()
4995 ap->udma_mask = port_info->udma_mask; in ata_sas_port_alloc()
4996 ap->flags |= port_info->flags; in ata_sas_port_alloc()
4997 ap->ops = port_info->port_ops; in ata_sas_port_alloc()
4998 ap->cbl = ATA_CBL_SATA; in ata_sas_port_alloc()
5005 * ata_sas_port_start - Set port up for dma.
5022 if (!ap->ops->error_handler) in ata_sas_port_start()
5023 ap->pflags &= ~ATA_PFLAG_FROZEN; in ata_sas_port_start()
5029 * ata_port_stop - Undo ata_sas_port_start()
5044 * ata_sas_async_probe - simply schedule probing and return
5064 * ata_sas_port_init - Initialize a SATA device
5071 * Zero on success, non-zero on error.
5076 int rc = ap->ops->port_start(ap); in ata_sas_port_init()
5080 ap->print_id = atomic_inc_return(&ata_print_id); in ata_sas_port_init()
5098 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
5105 if (ap->ops->port_stop) in ata_sas_port_destroy()
5106 ap->ops->port_stop(ap); in ata_sas_port_destroy()
5112 * ata_sas_slave_configure - Default slave_config routine for libata devices
5123 ata_scsi_dev_config(sdev, ap->link.device); in ata_sas_slave_configure()
5129 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
5144 if (likely(ata_dev_enabled(ap->link.device))) in ata_sas_queuecmd()
5145 rc = __ata_scsi_queuecmd(cmd, ap->link.device); in ata_sas_queuecmd()
5147 cmd->result = (DID_BAD_TARGET << 16); in ata_sas_queuecmd()
5148 cmd->scsi_done(cmd); in ata_sas_queuecmd()
5156 unsigned int max_queue = ap->host->n_tags; in ata_sas_allocate_tag()
5159 for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { in ata_sas_allocate_tag()
5166 if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { in ata_sas_allocate_tag()
5167 ap->sas_last_tag = tag; in ata_sas_allocate_tag()
5171 return -1; in ata_sas_allocate_tag()
5176 clear_bit(tag, &ap->sas_tag_allocated); in ata_sas_free_tag()