• Home
  • Raw
  • Download

Lines Matching +full:sata +full:- +full:cold

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_nv.c - NVIDIA nForce SATA
9 * as Documentation/driver-api/libata.rst
12 * This driver programs the NVIDIA SATA controller in a similar
14 * NV-specific details such as register offsets, SATA phy location,
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
187 /* ADMA Physical Region Descriptor - one SG segment */
198 WNB = (1 << 14), /* wait-not-BSY */
218 __le16 reserved2; /* 6-7 */
219 __le16 tf[12]; /* 8-31 */
220 struct nv_adma_prd aprd[5]; /* 32-111 */
221 __le64 next_aprd; /* 112-119 */
222 __le64 reserved3; /* 120-127 */
387 .can_queue = ATA_MAX_QUEUE - 1,
394 * NV SATA controllers have various different problems with hardreset
401 * linux-ide.
410 * failure on cold boot with the standard debouncing timing.
437 * - Softreset during boot always works.
439 * - Hardreset during boot sometimes fails to bring up the link on
443 * - Hardreset is often necessary after hotplug.
447 * post-boot probing should work around the above issues in most
448 * cases. Define nv_hardreset() which only kicks in for post-boot
580 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
591 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_register_mode()
592 void __iomem *mmio = pp->ctl_block; in nv_adma_register_mode()
596 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_register_mode()
624 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_register_mode()
629 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_mode()
630 void __iomem *mmio = pp->ctl_block; in nv_adma_mode()
634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) in nv_adma_mode()
637 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_mode()
654 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; in nv_adma_mode()
659 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_adma_slave_config()
660 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_slave_config()
662 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_adma_slave_config()
671 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_adma_slave_config()
675 spin_lock_irqsave(ap->lock, flags); in nv_adma_slave_config()
677 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { in nv_adma_slave_config()
681 * However, the legacy interface only supports 32-bit DMA. in nv_adma_slave_config()
687 libata-scsi.c */ in nv_adma_slave_config()
688 sg_tablesize = LIBATA_MAX_PRD - 1; in nv_adma_slave_config()
702 if (ap->port_no == 1) in nv_adma_slave_config()
711 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_slave_config()
714 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_slave_config()
720 port0 = ap->host->ports[0]->private_data; in nv_adma_slave_config()
721 port1 = ap->host->ports[1]->private_data; in nv_adma_slave_config()
722 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_slave_config()
723 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { in nv_adma_slave_config()
725 * We have to set the DMA mask to 32-bit if either port is in in nv_adma_slave_config()
731 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); in nv_adma_slave_config()
733 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); in nv_adma_slave_config()
736 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); in nv_adma_slave_config()
737 blk_queue_max_segments(sdev->request_queue, sg_tablesize); in nv_adma_slave_config()
740 (unsigned long long)*ap->host->dev->dma_mask, in nv_adma_slave_config()
743 spin_unlock_irqrestore(ap->lock, flags); in nv_adma_slave_config()
750 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
751 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_check_atapi_dma()
756 /* Other than when internal or pass-through commands are executed, in nv_adma_tf_read()
772 if (tf->flags & ATA_TFLAG_ISADDR) { in nv_adma_tf_to_cpb()
773 if (tf->flags & ATA_TFLAG_LBA48) { in nv_adma_tf_to_cpb()
774 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); in nv_adma_tf_to_cpb()
775 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); in nv_adma_tf_to_cpb()
776 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); in nv_adma_tf_to_cpb()
777 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); in nv_adma_tf_to_cpb()
778 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); in nv_adma_tf_to_cpb()
779 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); in nv_adma_tf_to_cpb()
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); in nv_adma_tf_to_cpb()
783 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); in nv_adma_tf_to_cpb()
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); in nv_adma_tf_to_cpb()
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); in nv_adma_tf_to_cpb()
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); in nv_adma_tf_to_cpb()
789 if (tf->flags & ATA_TFLAG_DEVICE) in nv_adma_tf_to_cpb()
790 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); in nv_adma_tf_to_cpb()
792 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); in nv_adma_tf_to_cpb()
802 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_check_cpb()
803 u8 flags = pp->cpb[cpb_num].resp_flags; in nv_adma_check_cpb()
811 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_check_cpb()
818 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
821 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
824 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_check_cpb()
829 ehi->err_mask |= AC_ERR_OTHER; in nv_adma_check_cpb()
837 return -1; in nv_adma_check_cpb()
847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr()
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
875 spin_lock(&host->lock); in nv_adma_interrupt()
877 for (i = 0; i < host->n_ports; i++) { in nv_adma_interrupt()
878 struct ata_port *ap = host->ports[i]; in nv_adma_interrupt()
879 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_interrupt()
880 void __iomem *mmio = pp->ctl_block; in nv_adma_interrupt()
888 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_interrupt()
889 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
896 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { in nv_adma_interrupt()
897 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
899 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
912 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_interrupt()
914 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && in nv_adma_interrupt()
938 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_interrupt()
943 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_interrupt()
969 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
971 ap->link.active_tag; in nv_adma_interrupt()
973 check_commands = ap->link.sactive; in nv_adma_interrupt()
978 pos--; in nv_adma_interrupt()
994 struct nv_adma_port_priv *pp = host->ports[0]->private_data; in nv_adma_interrupt()
995 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_interrupt()
996 pp = host->ports[1]->private_data; in nv_adma_interrupt()
997 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_interrupt()
1000 spin_unlock(&host->lock); in nv_adma_interrupt()
1007 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_freeze()
1008 void __iomem *mmio = pp->ctl_block; in nv_adma_freeze()
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_freeze()
1017 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_freeze()
1018 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_freeze()
1029 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_thaw()
1030 void __iomem *mmio = pp->ctl_block; in nv_adma_thaw()
1035 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_thaw()
1047 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_irq_clear()
1048 void __iomem *mmio = pp->ctl_block; in nv_adma_irq_clear()
1051 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_irq_clear()
1057 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_irq_clear()
1058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_irq_clear()
1063 /* clear notifiers - note both ports need to be written with in nv_adma_irq_clear()
1065 if (ap->port_no == 0) { in nv_adma_irq_clear()
1072 pp = ap->host->ports[0]->private_data; in nv_adma_irq_clear()
1073 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_irq_clear()
1074 pp = ap->host->ports[1]->private_data; in nv_adma_irq_clear()
1075 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_irq_clear()
1080 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1082 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_post_internal_cmd()
1088 struct device *dev = ap->host->dev; in nv_adma_port_start()
1100 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and in nv_adma_port_start()
1103 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in nv_adma_port_start()
1114 return -ENOMEM; in nv_adma_port_start()
1116 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + in nv_adma_port_start()
1117 ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_port_start()
1118 pp->ctl_block = mmio; in nv_adma_port_start()
1119 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; in nv_adma_port_start()
1120 pp->notifier_clear_block = pp->gen_block + in nv_adma_port_start()
1121 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); in nv_adma_port_start()
1127 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nv_adma_port_start()
1129 pp->adma_dma_mask = *dev->dma_mask; in nv_adma_port_start()
1134 return -ENOMEM; in nv_adma_port_start()
1138 * 128-byte command parameter block (CPB) in nv_adma_port_start()
1141 pp->cpb = mem; in nv_adma_port_start()
1142 pp->cpb_dma = mem_dma; in nv_adma_port_start()
1153 pp->aprd = mem; in nv_adma_port_start()
1154 pp->aprd_dma = mem_dma; in nv_adma_port_start()
1156 ap->private_data = pp; in nv_adma_port_start()
1162 pp->flags = NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_start()
1184 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_stop()
1185 void __iomem *mmio = pp->ctl_block; in nv_adma_port_stop()
1194 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_suspend()
1195 void __iomem *mmio = pp->ctl_block; in nv_adma_port_suspend()
1197 /* Go to register mode - clears GO */ in nv_adma_port_suspend()
1211 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_resume()
1212 void __iomem *mmio = pp->ctl_block; in nv_adma_port_resume()
1216 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); in nv_adma_port_resume()
1217 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); in nv_adma_port_resume()
1223 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_resume()
1246 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_adma_setup_port()
1247 struct ata_ioports *ioport = &ap->ioaddr; in nv_adma_setup_port()
1251 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_setup_port()
1253 ioport->cmd_addr = mmio; in nv_adma_setup_port()
1254 ioport->data_addr = mmio + (ATA_REG_DATA * 4); in nv_adma_setup_port()
1255 ioport->error_addr = in nv_adma_setup_port()
1256 ioport->feature_addr = mmio + (ATA_REG_ERR * 4); in nv_adma_setup_port()
1257 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); in nv_adma_setup_port()
1258 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); in nv_adma_setup_port()
1259 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); in nv_adma_setup_port()
1260 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); in nv_adma_setup_port()
1261 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); in nv_adma_setup_port()
1262 ioport->status_addr = in nv_adma_setup_port()
1263 ioport->command_addr = mmio + (ATA_REG_STATUS * 4); in nv_adma_setup_port()
1264 ioport->altstatus_addr = in nv_adma_setup_port()
1265 ioport->ctl_addr = mmio + 0x20; in nv_adma_setup_port()
1270 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_init()
1285 for (i = 0; i < host->n_ports; i++) in nv_adma_host_init()
1286 nv_adma_setup_port(host->ports[i]); in nv_adma_host_init()
1297 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1299 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); in nv_adma_fill_aprd()
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ in nv_adma_fill_aprd()
1306 aprd->flags = flags; in nv_adma_fill_aprd()
1307 aprd->packet_len = 0; in nv_adma_fill_aprd()
1312 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1319 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1320 aprd = (si < 5) ? &cpb->aprd[si] : in nv_adma_fill_sg()
1321 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1325 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1327 cpb->next_aprd = cpu_to_le64(0); in nv_adma_fill_sg()
1332 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1334 /* ADMA engine can only be used for non-ATAPI DMA commands, in nv_adma_use_reg_mode()
1335 or interrupt-driven no-data commands. */ in nv_adma_use_reg_mode()
1336 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_use_reg_mode()
1337 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1340 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1341 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1349 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1350 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1355 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_prep()
1356 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1357 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1362 cpb->resp_flags = NV_CPB_RESP_DONE; in nv_adma_qc_prep()
1364 cpb->ctl_flags = 0; in nv_adma_qc_prep()
1367 cpb->len = 3; in nv_adma_qc_prep()
1368 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1369 cpb->next_cpb_idx = 0; in nv_adma_qc_prep()
1372 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1375 VPRINTK("qc->flags = 0x%lx\n", qc->flags); in nv_adma_qc_prep()
1377 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1379 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1383 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); in nv_adma_qc_prep()
1388 cpb->ctl_flags = ctl_flags; in nv_adma_qc_prep()
1390 cpb->resp_flags = 0; in nv_adma_qc_prep()
1397 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1398 void __iomem *mmio = pp->ctl_block; in nv_adma_qc_issue()
1399 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1406 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1407 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1408 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1414 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); in nv_adma_qc_issue()
1415 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_issue()
1416 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1417 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1420 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1423 and (number of cpbs to append -1) in top 8 bits */ in nv_adma_qc_issue()
1426 if (curr_ncq != pp->last_issue_ncq) { in nv_adma_qc_issue()
1428 non-NCQ commands, else we get command timeouts and such. */ in nv_adma_qc_issue()
1430 pp->last_issue_ncq = curr_ncq; in nv_adma_qc_issue()
1433 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1435 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_adma_qc_issue()
1447 spin_lock_irqsave(&host->lock, flags); in nv_generic_interrupt()
1449 for (i = 0; i < host->n_ports; i++) { in nv_generic_interrupt()
1450 struct ata_port *ap = host->ports[i]; in nv_generic_interrupt()
1453 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1454 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1461 ap->ops->sff_check_status(ap); in nv_generic_interrupt()
1465 spin_unlock_irqrestore(&host->lock, flags); in nv_generic_interrupt()
1474 for (i = 0; i < host->n_ports; i++) { in nv_do_interrupt()
1475 handled += nv_host_intr(host->ports[i], irq_stat); in nv_do_interrupt()
1488 spin_lock(&host->lock); in nv_nf2_interrupt()
1489 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); in nv_nf2_interrupt()
1491 spin_unlock(&host->lock); in nv_nf2_interrupt()
1502 spin_lock(&host->lock); in nv_ck804_interrupt()
1503 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_ck804_interrupt()
1505 spin_unlock(&host->lock); in nv_ck804_interrupt()
1513 return -EINVAL; in nv_scr_read()
1515 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_read()
1522 return -EINVAL; in nv_scr_write()
1524 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_write()
1531 struct ata_eh_context *ehc = &link->eh_context; in nv_hardreset()
1533 /* Do hardreset iff it's post-boot probing, please read the in nv_hardreset()
1536 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && in nv_hardreset()
1537 !ata_dev_enabled(link->device)) in nv_hardreset()
1544 if (!(ehc->i.flags & ATA_EHI_QUIET)) in nv_hardreset()
1551 if (rc && rc != -EOPNOTSUPP) in nv_hardreset()
1557 return -EAGAIN; in nv_hardreset()
1562 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_freeze()
1563 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_freeze()
1573 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_thaw()
1574 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_thaw()
1586 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_freeze()
1587 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_freeze()
1597 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_thaw()
1598 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_thaw()
1610 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_freeze()
1611 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_freeze()
1623 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_thaw()
1624 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_thaw()
1636 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_error_handler()
1637 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { in nv_adma_error_handler()
1638 void __iomem *mmio = pp->ctl_block; in nv_adma_error_handler()
1642 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { in nv_adma_error_handler()
1645 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_error_handler()
1658 struct nv_adma_cpb *cpb = &pp->cpb[i]; in nv_adma_error_handler()
1659 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || in nv_adma_error_handler()
1660 ap->link.sactive & (1 << i)) in nv_adma_error_handler()
1663 i, cpb->ctl_flags, cpb->resp_flags); in nv_adma_error_handler()
1673 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; in nv_adma_error_handler()
1692 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_to_dq()
1693 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_to_dq()
1696 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); in nv_swncq_qc_to_dq()
1697 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1698 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1703 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_from_dq()
1704 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_from_dq()
1707 if (dq->head == dq->tail) /* null queue */ in nv_swncq_qc_from_dq()
1710 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; in nv_swncq_qc_from_dq()
1711 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; in nv_swncq_qc_from_dq()
1712 WARN_ON(!(dq->defer_bits & (1 << tag))); in nv_swncq_qc_from_dq()
1713 dq->defer_bits &= ~(1 << tag); in nv_swncq_qc_from_dq()
1720 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fis_reinit()
1722 pp->dhfis_bits = 0; in nv_swncq_fis_reinit()
1723 pp->dmafis_bits = 0; in nv_swncq_fis_reinit()
1724 pp->sdbfis_bits = 0; in nv_swncq_fis_reinit()
1725 pp->ncq_flags = 0; in nv_swncq_fis_reinit()
1730 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_pp_reinit()
1731 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_pp_reinit()
1733 dq->head = 0; in nv_swncq_pp_reinit()
1734 dq->tail = 0; in nv_swncq_pp_reinit()
1735 dq->defer_bits = 0; in nv_swncq_pp_reinit()
1736 pp->qc_active = 0; in nv_swncq_pp_reinit()
1737 pp->last_issue_tag = ATA_TAG_POISON; in nv_swncq_pp_reinit()
1743 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_irq_clear()
1745 writew(fis, pp->irq_block); in nv_swncq_irq_clear()
1758 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_ncq_stop()
1764 ap->qc_active, ap->link.sactive); in nv_swncq_ncq_stop()
1768 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, in nv_swncq_ncq_stop()
1769 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); in nv_swncq_ncq_stop()
1772 ap->ops->sff_check_status(ap), in nv_swncq_ncq_stop()
1773 ioread8(ap->ioaddr.error_addr)); in nv_swncq_ncq_stop()
1775 sactive = readl(pp->sactive_block); in nv_swncq_ncq_stop()
1776 done_mask = pp->qc_active ^ sactive; in nv_swncq_ncq_stop()
1781 if (pp->qc_active & (1 << i)) in nv_swncq_ncq_stop()
1790 (pp->dhfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1791 (pp->dmafis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1792 (pp->sdbfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1798 ap->ops->sff_irq_clear(ap); in nv_swncq_ncq_stop()
1805 struct ata_eh_context *ehc = &ap->link.eh_context; in nv_swncq_error_handler()
1807 if (ap->link.sactive) { in nv_swncq_error_handler()
1809 ehc->i.action |= ATA_EH_RESET; in nv_swncq_error_handler()
1818 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_suspend()
1837 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_resume()
1857 void __iomem *mmio = host->iomap[NV_MMIO_BAR]; in nv_swncq_host_init()
1858 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_swncq_host_init()
1882 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_swncq_slave_config()
1883 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_swncq_slave_config()
1891 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_swncq_slave_config()
1895 dev = &ap->link.device[sdev->id]; in nv_swncq_slave_config()
1896 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) in nv_swncq_slave_config()
1900 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || in nv_swncq_slave_config()
1901 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) in nv_swncq_slave_config()
1905 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || in nv_swncq_slave_config()
1906 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { in nv_swncq_slave_config()
1915 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); in nv_swncq_slave_config()
1920 sdev->queue_depth); in nv_swncq_slave_config()
1928 struct device *dev = ap->host->dev; in nv_swncq_port_start()
1929 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_start()
1940 return -ENOMEM; in nv_swncq_port_start()
1942 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, in nv_swncq_port_start()
1943 &pp->prd_dma, GFP_KERNEL); in nv_swncq_port_start()
1944 if (!pp->prd) in nv_swncq_port_start()
1945 return -ENOMEM; in nv_swncq_port_start()
1947 ap->private_data = pp; in nv_swncq_port_start()
1948 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; in nv_swncq_port_start()
1949 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1950 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1957 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1962 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1972 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1974 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fill_sg()
1978 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1981 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
1992 len = 0x10000 - offset; in nv_swncq_fill_sg()
1998 sg_len -= len; in nv_swncq_fill_sg()
2003 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); in nv_swncq_fill_sg()
2009 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_issue_atacmd()
2016 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2017 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2018 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2019 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2020 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2022 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2023 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2025 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_swncq_issue_atacmd()
2032 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2033 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_issue()
2035 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2040 if (!pp->qc_active) in nv_swncq_qc_issue()
2051 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_hotplug()
2056 sata_scr_read(&ap->link, SCR_ERROR, &serror); in nv_swncq_hotplug()
2057 sata_scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_hotplug()
2068 ehi->serror |= serror; in nv_swncq_hotplug()
2076 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_sdbfis()
2077 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_sdbfis()
2083 host_stat = ap->ops->bmdma_status(ap); in nv_swncq_sdbfis()
2088 ehi->err_mask |= AC_ERR_HOST_BUS; in nv_swncq_sdbfis()
2089 ehi->action |= ATA_EH_RESET; in nv_swncq_sdbfis()
2090 return -EINVAL; in nv_swncq_sdbfis()
2093 ap->ops->sff_irq_clear(ap); in nv_swncq_sdbfis()
2096 sactive = readl(pp->sactive_block); in nv_swncq_sdbfis()
2097 done_mask = pp->qc_active ^ sactive; in nv_swncq_sdbfis()
2099 pp->qc_active &= ~done_mask; in nv_swncq_sdbfis()
2100 pp->dhfis_bits &= ~done_mask; in nv_swncq_sdbfis()
2101 pp->dmafis_bits &= ~done_mask; in nv_swncq_sdbfis()
2102 pp->sdbfis_bits |= done_mask; in nv_swncq_sdbfis()
2105 if (!ap->qc_active) { in nv_swncq_sdbfis()
2111 if (pp->qc_active & pp->dhfis_bits) in nv_swncq_sdbfis()
2114 if ((pp->ncq_flags & ncq_saw_backout) || in nv_swncq_sdbfis()
2115 (pp->qc_active ^ pp->dhfis_bits)) in nv_swncq_sdbfis()
2124 ap->print_id, ap->qc_active, pp->qc_active, in nv_swncq_sdbfis()
2125 pp->defer_queue.defer_bits, pp->dhfis_bits, in nv_swncq_sdbfis()
2126 pp->dmafis_bits, pp->last_issue_tag); in nv_swncq_sdbfis()
2131 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2136 if (pp->defer_queue.defer_bits) { in nv_swncq_sdbfis()
2148 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_tag()
2151 tag = readb(pp->tag_block) >> 2; in nv_swncq_tag()
2161 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_dmafis()
2172 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2175 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2176 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); in nv_swncq_dmafis()
2178 /* specify data direction, triple-check start bit is clear */ in nv_swncq_dmafis()
2179 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2184 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2189 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_host_interrupt()
2191 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_host_interrupt()
2195 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2200 if (ap->pflags & ATA_PFLAG_FROZEN) in nv_swncq_host_interrupt()
2208 if (!pp->qc_active) in nv_swncq_host_interrupt()
2211 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) in nv_swncq_host_interrupt()
2213 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_host_interrupt()
2218 ehi->err_mask |= AC_ERR_DEV; in nv_swncq_host_interrupt()
2219 ehi->serror |= serror; in nv_swncq_host_interrupt()
2220 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2229 pp->ncq_flags |= ncq_saw_backout; in nv_swncq_host_interrupt()
2233 pp->ncq_flags |= ncq_saw_sdb; in nv_swncq_host_interrupt()
2236 ap->print_id, pp->qc_active, pp->dhfis_bits, in nv_swncq_host_interrupt()
2237 pp->dmafis_bits, readl(pp->sactive_block)); in nv_swncq_host_interrupt()
2246 pp->dhfis_bits |= (0x1 << pp->last_issue_tag); in nv_swncq_host_interrupt()
2247 pp->ncq_flags |= ncq_saw_d2h; in nv_swncq_host_interrupt()
2248 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { in nv_swncq_host_interrupt()
2250 ehi->err_mask |= AC_ERR_HSM; in nv_swncq_host_interrupt()
2251 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2256 !(pp->ncq_flags & ncq_saw_dmas)) { in nv_swncq_host_interrupt()
2257 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2261 if (pp->defer_queue.defer_bits) { in nv_swncq_host_interrupt()
2273 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); in nv_swncq_host_interrupt()
2274 pp->ncq_flags |= ncq_saw_dmas; in nv_swncq_host_interrupt()
2294 spin_lock_irqsave(&host->lock, flags); in nv_swncq_interrupt()
2296 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); in nv_swncq_interrupt()
2298 for (i = 0; i < host->n_ports; i++) { in nv_swncq_interrupt()
2299 struct ata_port *ap = host->ports[i]; in nv_swncq_interrupt()
2301 if (ap->link.sactive) { in nv_swncq_interrupt()
2313 spin_unlock_irqrestore(&host->lock, flags); in nv_swncq_interrupt()
2327 unsigned long type = ent->driver_data; in nv_init_one()
2329 // Make sure this is a SATA controller by counting the number of bars in nv_init_one()
2330 // (NVIDIA SATA controllers will always have six bars). Otherwise, in nv_init_one()
2334 return -ENODEV; in nv_init_one()
2336 ata_print_version_once(&pdev->dev, DRV_VERSION); in nv_init_one()
2344 dev_notice(&pdev->dev, "Using ADMA mode\n"); in nv_init_one()
2347 dev_notice(&pdev->dev, "Using SWNCQ mode\n"); in nv_init_one()
2352 ipriv = ppi[0]->private_data; in nv_init_one()
2357 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); in nv_init_one()
2359 return -ENOMEM; in nv_init_one()
2360 hpriv->type = type; in nv_init_one()
2361 host->private_data = hpriv; in nv_init_one()
2369 base = host->iomap[NV_MMIO_BAR]; in nv_init_one()
2370 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET; in nv_init_one()
2371 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET; in nv_init_one()
2373 /* enable SATA space for CK804 */ in nv_init_one()
2391 dev_notice(&pdev->dev, "Using MSI\n"); in nv_init_one()
2396 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); in nv_init_one()
2403 struct nv_host_priv *hpriv = host->private_data; in nv_pci_device_resume()
2410 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { in nv_pci_device_resume()
2411 if (hpriv->type >= CK804) { in nv_pci_device_resume()
2418 if (hpriv->type == ADMA) { in nv_pci_device_resume()
2424 pp = host->ports[0]->private_data; in nv_pci_device_resume()
2425 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2431 pp = host->ports[1]->private_data; in nv_pci_device_resume()
2432 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2451 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_ck804_host_stop()
2454 /* disable SATA space for CK804 */ in nv_ck804_host_stop()
2462 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_stop()