Lines Matching +full:sata +full:- +full:cold
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_nv.c - NVIDIA nForce SATA
9 * as Documentation/driver-api/libata.rst
12 * This driver programs the NVIDIA SATA controller in a similar
14 * NV-specific details such as register offsets, SATA phy location,
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
84 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
188 /* ADMA Physical Region Descriptor - one SG segment */
199 WNB = (1 << 14), /* wait-not-BSY */
219 __le16 reserved2; /* 6-7 */
220 __le16 tf[12]; /* 8-31 */
221 struct nv_adma_prd aprd[5]; /* 32-111 */
222 __le64 next_aprd; /* 112-119 */
223 __le64 reserved3; /* 120-127 */
391 .can_queue = ATA_MAX_QUEUE - 1,
401 * NV SATA controllers have various different problems with hardreset
408 * linux-ide.
417 * failure on cold boot with the standard debouncing timing.
444 * - Softreset during boot always works.
446 * - Hardreset during boot sometimes fails to bring up the link on
450 * - Hardreset is often necessary after hotplug.
454 * post-boot probing should work around the above issues in most
455 * cases. Define nv_hardreset() which only kicks in for post-boot
587 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
598 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_register_mode()
599 void __iomem *mmio = pp->ctl_block; in nv_adma_register_mode()
603 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_register_mode()
631 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_register_mode()
636 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_mode()
637 void __iomem *mmio = pp->ctl_block; in nv_adma_mode()
641 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) in nv_adma_mode()
644 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_mode()
661 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; in nv_adma_mode()
666 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_adma_slave_config()
667 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_slave_config()
669 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_adma_slave_config()
678 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_adma_slave_config()
682 spin_lock_irqsave(ap->lock, flags); in nv_adma_slave_config()
684 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { in nv_adma_slave_config()
688 * However, the legacy interface only supports 32-bit DMA. in nv_adma_slave_config()
694 libata-scsi.c */ in nv_adma_slave_config()
695 sg_tablesize = LIBATA_MAX_PRD - 1; in nv_adma_slave_config()
709 if (ap->port_no == 1) in nv_adma_slave_config()
718 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_slave_config()
721 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_slave_config()
727 port0 = ap->host->ports[0]->private_data; in nv_adma_slave_config()
728 port1 = ap->host->ports[1]->private_data; in nv_adma_slave_config()
729 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_slave_config()
730 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { in nv_adma_slave_config()
732 * We have to set the DMA mask to 32-bit if either port is in in nv_adma_slave_config()
738 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); in nv_adma_slave_config()
740 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); in nv_adma_slave_config()
743 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); in nv_adma_slave_config()
744 blk_queue_max_segments(sdev->request_queue, sg_tablesize); in nv_adma_slave_config()
747 (unsigned long long)*ap->host->dev->dma_mask, in nv_adma_slave_config()
750 spin_unlock_irqrestore(ap->lock, flags); in nv_adma_slave_config()
757 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
758 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_check_atapi_dma()
763 /* Other than when internal or pass-through commands are executed, in nv_adma_tf_read()
779 if (tf->flags & ATA_TFLAG_ISADDR) { in nv_adma_tf_to_cpb()
780 if (tf->flags & ATA_TFLAG_LBA48) { in nv_adma_tf_to_cpb()
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); in nv_adma_tf_to_cpb()
782 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); in nv_adma_tf_to_cpb()
783 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); in nv_adma_tf_to_cpb()
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); in nv_adma_tf_to_cpb()
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); in nv_adma_tf_to_cpb()
786 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); in nv_adma_tf_to_cpb()
788 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); in nv_adma_tf_to_cpb()
790 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); in nv_adma_tf_to_cpb()
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); in nv_adma_tf_to_cpb()
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); in nv_adma_tf_to_cpb()
793 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); in nv_adma_tf_to_cpb()
796 if (tf->flags & ATA_TFLAG_DEVICE) in nv_adma_tf_to_cpb()
797 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); in nv_adma_tf_to_cpb()
799 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); in nv_adma_tf_to_cpb()
809 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_check_cpb()
810 u8 flags = pp->cpb[cpb_num].resp_flags; in nv_adma_check_cpb()
818 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_check_cpb()
825 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
828 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
831 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_check_cpb()
836 ehi->err_mask |= AC_ERR_OTHER; in nv_adma_check_cpb()
844 return -1; in nv_adma_check_cpb()
854 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr()
867 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
882 spin_lock(&host->lock); in nv_adma_interrupt()
884 for (i = 0; i < host->n_ports; i++) { in nv_adma_interrupt()
885 struct ata_port *ap = host->ports[i]; in nv_adma_interrupt()
886 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_interrupt()
887 void __iomem *mmio = pp->ctl_block; in nv_adma_interrupt()
895 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_interrupt()
896 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
903 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { in nv_adma_interrupt()
904 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
906 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
919 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_interrupt()
921 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && in nv_adma_interrupt()
945 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_interrupt()
950 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_interrupt()
976 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
978 ap->link.active_tag; in nv_adma_interrupt()
980 check_commands = ap->link.sactive; in nv_adma_interrupt()
985 pos--; in nv_adma_interrupt()
1001 struct nv_adma_port_priv *pp = host->ports[0]->private_data; in nv_adma_interrupt()
1002 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_interrupt()
1003 pp = host->ports[1]->private_data; in nv_adma_interrupt()
1004 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_interrupt()
1007 spin_unlock(&host->lock); in nv_adma_interrupt()
1014 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_freeze()
1015 void __iomem *mmio = pp->ctl_block; in nv_adma_freeze()
1020 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_freeze()
1024 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_freeze()
1025 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_freeze()
1036 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_thaw()
1037 void __iomem *mmio = pp->ctl_block; in nv_adma_thaw()
1042 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_thaw()
1054 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_irq_clear()
1055 void __iomem *mmio = pp->ctl_block; in nv_adma_irq_clear()
1058 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_irq_clear()
1064 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_irq_clear()
1065 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_irq_clear()
1070 /* clear notifiers - note both ports need to be written with in nv_adma_irq_clear()
1072 if (ap->port_no == 0) { in nv_adma_irq_clear()
1079 pp = ap->host->ports[0]->private_data; in nv_adma_irq_clear()
1080 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_irq_clear()
1081 pp = ap->host->ports[1]->private_data; in nv_adma_irq_clear()
1082 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_irq_clear()
1087 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1089 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_post_internal_cmd()
1095 struct device *dev = ap->host->dev; in nv_adma_port_start()
1105 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and in nv_adma_port_start()
1108 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in nv_adma_port_start()
1119 return -ENOMEM; in nv_adma_port_start()
1121 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + in nv_adma_port_start()
1122 ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_port_start()
1123 pp->ctl_block = mmio; in nv_adma_port_start()
1124 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; in nv_adma_port_start()
1125 pp->notifier_clear_block = pp->gen_block + in nv_adma_port_start()
1126 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); in nv_adma_port_start()
1132 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nv_adma_port_start()
1134 pp->adma_dma_mask = *dev->dma_mask; in nv_adma_port_start()
1139 return -ENOMEM; in nv_adma_port_start()
1143 * 128-byte command parameter block (CPB) in nv_adma_port_start()
1146 pp->cpb = mem; in nv_adma_port_start()
1147 pp->cpb_dma = mem_dma; in nv_adma_port_start()
1158 pp->aprd = mem; in nv_adma_port_start()
1159 pp->aprd_dma = mem_dma; in nv_adma_port_start()
1161 ap->private_data = pp; in nv_adma_port_start()
1167 pp->flags = NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_start()
1189 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_stop()
1190 void __iomem *mmio = pp->ctl_block; in nv_adma_port_stop()
1198 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_suspend()
1199 void __iomem *mmio = pp->ctl_block; in nv_adma_port_suspend()
1201 /* Go to register mode - clears GO */ in nv_adma_port_suspend()
1215 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_resume()
1216 void __iomem *mmio = pp->ctl_block; in nv_adma_port_resume()
1220 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); in nv_adma_port_resume()
1221 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); in nv_adma_port_resume()
1227 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_resume()
1250 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_adma_setup_port()
1251 struct ata_ioports *ioport = &ap->ioaddr; in nv_adma_setup_port()
1253 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_setup_port()
1255 ioport->cmd_addr = mmio; in nv_adma_setup_port()
1256 ioport->data_addr = mmio + (ATA_REG_DATA * 4); in nv_adma_setup_port()
1257 ioport->error_addr = in nv_adma_setup_port()
1258 ioport->feature_addr = mmio + (ATA_REG_ERR * 4); in nv_adma_setup_port()
1259 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); in nv_adma_setup_port()
1260 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); in nv_adma_setup_port()
1261 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); in nv_adma_setup_port()
1262 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); in nv_adma_setup_port()
1263 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); in nv_adma_setup_port()
1264 ioport->status_addr = in nv_adma_setup_port()
1265 ioport->command_addr = mmio + (ATA_REG_STATUS * 4); in nv_adma_setup_port()
1266 ioport->altstatus_addr = in nv_adma_setup_port()
1267 ioport->ctl_addr = mmio + 0x20; in nv_adma_setup_port()
1272 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_init()
1285 for (i = 0; i < host->n_ports; i++) in nv_adma_host_init()
1286 nv_adma_setup_port(host->ports[i]); in nv_adma_host_init()
1297 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1299 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); in nv_adma_fill_aprd()
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ in nv_adma_fill_aprd()
1306 aprd->flags = flags; in nv_adma_fill_aprd()
1307 aprd->packet_len = 0; in nv_adma_fill_aprd()
1312 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1317 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1318 aprd = (si < 5) ? &cpb->aprd[si] : in nv_adma_fill_sg()
1319 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1323 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1325 cpb->next_aprd = cpu_to_le64(0); in nv_adma_fill_sg()
1330 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1332 /* ADMA engine can only be used for non-ATAPI DMA commands, in nv_adma_use_reg_mode()
1333 or interrupt-driven no-data commands. */ in nv_adma_use_reg_mode()
1334 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_use_reg_mode()
1335 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1338 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1339 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1347 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1348 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1353 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_prep()
1354 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1355 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1360 cpb->resp_flags = NV_CPB_RESP_DONE; in nv_adma_qc_prep()
1362 cpb->ctl_flags = 0; in nv_adma_qc_prep()
1365 cpb->len = 3; in nv_adma_qc_prep()
1366 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1367 cpb->next_cpb_idx = 0; in nv_adma_qc_prep()
1370 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1373 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1375 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1379 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); in nv_adma_qc_prep()
1384 cpb->ctl_flags = ctl_flags; in nv_adma_qc_prep()
1386 cpb->resp_flags = 0; in nv_adma_qc_prep()
1393 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1394 void __iomem *mmio = pp->ctl_block; in nv_adma_qc_issue()
1395 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1400 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1401 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1402 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1408 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_issue()
1409 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1410 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1413 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1416 and (number of cpbs to append -1) in top 8 bits */ in nv_adma_qc_issue()
1419 if (curr_ncq != pp->last_issue_ncq) { in nv_adma_qc_issue()
1421 non-NCQ commands, else we get command timeouts and such. */ in nv_adma_qc_issue()
1423 pp->last_issue_ncq = curr_ncq; in nv_adma_qc_issue()
1426 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1438 spin_lock_irqsave(&host->lock, flags); in nv_generic_interrupt()
1440 for (i = 0; i < host->n_ports; i++) { in nv_generic_interrupt()
1441 struct ata_port *ap = host->ports[i]; in nv_generic_interrupt()
1444 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1445 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1452 ap->ops->sff_check_status(ap); in nv_generic_interrupt()
1456 spin_unlock_irqrestore(&host->lock, flags); in nv_generic_interrupt()
1465 for (i = 0; i < host->n_ports; i++) { in nv_do_interrupt()
1466 handled += nv_host_intr(host->ports[i], irq_stat); in nv_do_interrupt()
1479 spin_lock(&host->lock); in nv_nf2_interrupt()
1480 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); in nv_nf2_interrupt()
1482 spin_unlock(&host->lock); in nv_nf2_interrupt()
1493 spin_lock(&host->lock); in nv_ck804_interrupt()
1494 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_ck804_interrupt()
1496 spin_unlock(&host->lock); in nv_ck804_interrupt()
1504 return -EINVAL; in nv_scr_read()
1506 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_read()
1513 return -EINVAL; in nv_scr_write()
1515 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_write()
1522 struct ata_eh_context *ehc = &link->eh_context; in nv_hardreset()
1524 /* Do hardreset iff it's post-boot probing, please read the in nv_hardreset()
1527 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && in nv_hardreset()
1528 !ata_dev_enabled(link->device)) in nv_hardreset()
1535 if (!(ehc->i.flags & ATA_EHI_QUIET)) in nv_hardreset()
1542 if (rc && rc != -EOPNOTSUPP) in nv_hardreset()
1548 return -EAGAIN; in nv_hardreset()
1553 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_freeze()
1554 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_freeze()
1564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_thaw()
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_thaw()
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_freeze()
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_freeze()
1588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_thaw()
1589 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_thaw()
1601 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_freeze()
1602 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_freeze()
1614 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_thaw()
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_thaw()
1627 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_error_handler()
1628 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { in nv_adma_error_handler()
1629 void __iomem *mmio = pp->ctl_block; in nv_adma_error_handler()
1633 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { in nv_adma_error_handler()
1636 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_error_handler()
1649 struct nv_adma_cpb *cpb = &pp->cpb[i]; in nv_adma_error_handler()
1650 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || in nv_adma_error_handler()
1651 ap->link.sactive & (1 << i)) in nv_adma_error_handler()
1654 i, cpb->ctl_flags, cpb->resp_flags); in nv_adma_error_handler()
1664 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; in nv_adma_error_handler()
1683 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_to_dq()
1684 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_to_dq()
1687 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); in nv_swncq_qc_to_dq()
1688 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1689 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1694 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_from_dq()
1695 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_from_dq()
1698 if (dq->head == dq->tail) /* null queue */ in nv_swncq_qc_from_dq()
1701 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; in nv_swncq_qc_from_dq()
1702 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; in nv_swncq_qc_from_dq()
1703 WARN_ON(!(dq->defer_bits & (1 << tag))); in nv_swncq_qc_from_dq()
1704 dq->defer_bits &= ~(1 << tag); in nv_swncq_qc_from_dq()
1711 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fis_reinit()
1713 pp->dhfis_bits = 0; in nv_swncq_fis_reinit()
1714 pp->dmafis_bits = 0; in nv_swncq_fis_reinit()
1715 pp->sdbfis_bits = 0; in nv_swncq_fis_reinit()
1716 pp->ncq_flags = 0; in nv_swncq_fis_reinit()
1721 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_pp_reinit()
1722 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_pp_reinit()
1724 dq->head = 0; in nv_swncq_pp_reinit()
1725 dq->tail = 0; in nv_swncq_pp_reinit()
1726 dq->defer_bits = 0; in nv_swncq_pp_reinit()
1727 pp->qc_active = 0; in nv_swncq_pp_reinit()
1728 pp->last_issue_tag = ATA_TAG_POISON; in nv_swncq_pp_reinit()
1734 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_irq_clear()
1736 writew(fis, pp->irq_block); in nv_swncq_irq_clear()
1749 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_ncq_stop()
1755 ap->qc_active, ap->link.sactive); in nv_swncq_ncq_stop()
1759 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, in nv_swncq_ncq_stop()
1760 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); in nv_swncq_ncq_stop()
1763 ap->ops->sff_check_status(ap), in nv_swncq_ncq_stop()
1764 ioread8(ap->ioaddr.error_addr)); in nv_swncq_ncq_stop()
1766 sactive = readl(pp->sactive_block); in nv_swncq_ncq_stop()
1767 done_mask = pp->qc_active ^ sactive; in nv_swncq_ncq_stop()
1772 if (pp->qc_active & (1 << i)) in nv_swncq_ncq_stop()
1781 (pp->dhfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1782 (pp->dmafis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1783 (pp->sdbfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1789 ap->ops->sff_irq_clear(ap); in nv_swncq_ncq_stop()
1796 struct ata_eh_context *ehc = &ap->link.eh_context; in nv_swncq_error_handler()
1798 if (ap->link.sactive) { in nv_swncq_error_handler()
1800 ehc->i.action |= ATA_EH_RESET; in nv_swncq_error_handler()
1809 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_suspend()
1828 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_resume()
1848 void __iomem *mmio = host->iomap[NV_MMIO_BAR]; in nv_swncq_host_init()
1849 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_swncq_host_init()
1859 dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp); in nv_swncq_host_init()
1864 dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp); in nv_swncq_host_init()
1873 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_swncq_slave_config()
1874 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_swncq_slave_config()
1882 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_swncq_slave_config()
1886 dev = &ap->link.device[sdev->id]; in nv_swncq_slave_config()
1887 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) in nv_swncq_slave_config()
1891 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || in nv_swncq_slave_config()
1892 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) in nv_swncq_slave_config()
1896 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || in nv_swncq_slave_config()
1897 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { in nv_swncq_slave_config()
1906 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); in nv_swncq_slave_config()
1911 sdev->queue_depth); in nv_swncq_slave_config()
1919 struct device *dev = ap->host->dev; in nv_swncq_port_start()
1920 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_start()
1931 return -ENOMEM; in nv_swncq_port_start()
1933 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, in nv_swncq_port_start()
1934 &pp->prd_dma, GFP_KERNEL); in nv_swncq_port_start()
1935 if (!pp->prd) in nv_swncq_port_start()
1936 return -ENOMEM; in nv_swncq_port_start()
1938 ap->private_data = pp; in nv_swncq_port_start()
1939 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; in nv_swncq_port_start()
1940 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1941 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1948 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1953 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1963 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1965 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fill_sg()
1969 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1972 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
1983 len = 0x10000 - offset; in nv_swncq_fill_sg()
1989 sg_len -= len; in nv_swncq_fill_sg()
1994 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); in nv_swncq_fill_sg()
2000 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_issue_atacmd()
2005 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2006 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2007 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2008 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2009 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2011 trace_ata_tf_load(ap, &qc->tf); in nv_swncq_issue_atacmd()
2012 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2013 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); in nv_swncq_issue_atacmd()
2014 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2021 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2022 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_issue()
2024 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2027 if (!pp->qc_active) in nv_swncq_qc_issue()
2038 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_hotplug()
2043 sata_scr_read(&ap->link, SCR_ERROR, &serror); in nv_swncq_hotplug()
2044 sata_scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_hotplug()
2055 ehi->serror |= serror; in nv_swncq_hotplug()
2063 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_sdbfis()
2064 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_sdbfis()
2070 host_stat = ap->ops->bmdma_status(ap); in nv_swncq_sdbfis()
2076 ehi->err_mask |= AC_ERR_HOST_BUS; in nv_swncq_sdbfis()
2077 ehi->action |= ATA_EH_RESET; in nv_swncq_sdbfis()
2078 return -EINVAL; in nv_swncq_sdbfis()
2081 ap->ops->sff_irq_clear(ap); in nv_swncq_sdbfis()
2084 sactive = readl(pp->sactive_block); in nv_swncq_sdbfis()
2085 done_mask = pp->qc_active ^ sactive; in nv_swncq_sdbfis()
2087 pp->qc_active &= ~done_mask; in nv_swncq_sdbfis()
2088 pp->dhfis_bits &= ~done_mask; in nv_swncq_sdbfis()
2089 pp->dmafis_bits &= ~done_mask; in nv_swncq_sdbfis()
2090 pp->sdbfis_bits |= done_mask; in nv_swncq_sdbfis()
2093 if (!ap->qc_active) { in nv_swncq_sdbfis()
2099 if (pp->qc_active & pp->dhfis_bits) in nv_swncq_sdbfis()
2102 if ((pp->ncq_flags & ncq_saw_backout) || in nv_swncq_sdbfis()
2103 (pp->qc_active ^ pp->dhfis_bits)) in nv_swncq_sdbfis()
2112 ap->qc_active, pp->qc_active, in nv_swncq_sdbfis()
2113 pp->defer_queue.defer_bits, pp->dhfis_bits, in nv_swncq_sdbfis()
2114 pp->dmafis_bits, pp->last_issue_tag); in nv_swncq_sdbfis()
2119 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2124 if (pp->defer_queue.defer_bits) { in nv_swncq_sdbfis()
2136 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_tag()
2139 tag = readb(pp->tag_block) >> 2; in nv_swncq_tag()
2149 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_dmafis()
2160 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2163 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2164 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); in nv_swncq_dmafis()
2166 /* specify data direction, triple-check start bit is clear */ in nv_swncq_dmafis()
2167 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2172 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2177 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_host_interrupt()
2179 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_host_interrupt()
2183 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2196 if (!pp->qc_active) in nv_swncq_host_interrupt()
2199 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) in nv_swncq_host_interrupt()
2201 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_host_interrupt()
2206 ehi->err_mask |= AC_ERR_DEV; in nv_swncq_host_interrupt()
2207 ehi->serror |= serror; in nv_swncq_host_interrupt()
2208 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2217 pp->ncq_flags |= ncq_saw_backout; in nv_swncq_host_interrupt()
2221 pp->ncq_flags |= ncq_saw_sdb; in nv_swncq_host_interrupt()
2224 pp->qc_active, pp->dhfis_bits, in nv_swncq_host_interrupt()
2225 pp->dmafis_bits, readl(pp->sactive_block)); in nv_swncq_host_interrupt()
2234 pp->dhfis_bits |= (0x1 << pp->last_issue_tag); in nv_swncq_host_interrupt()
2235 pp->ncq_flags |= ncq_saw_d2h; in nv_swncq_host_interrupt()
2236 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { in nv_swncq_host_interrupt()
2238 ehi->err_mask |= AC_ERR_HSM; in nv_swncq_host_interrupt()
2239 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2244 !(pp->ncq_flags & ncq_saw_dmas)) { in nv_swncq_host_interrupt()
2245 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2249 if (pp->defer_queue.defer_bits) { in nv_swncq_host_interrupt()
2261 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); in nv_swncq_host_interrupt()
2262 pp->ncq_flags |= ncq_saw_dmas; in nv_swncq_host_interrupt()
2282 spin_lock_irqsave(&host->lock, flags); in nv_swncq_interrupt()
2284 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); in nv_swncq_interrupt()
2286 for (i = 0; i < host->n_ports; i++) { in nv_swncq_interrupt()
2287 struct ata_port *ap = host->ports[i]; in nv_swncq_interrupt()
2289 if (ap->link.sactive) { in nv_swncq_interrupt()
2301 spin_unlock_irqrestore(&host->lock, flags); in nv_swncq_interrupt()
2315 unsigned long type = ent->driver_data; in nv_init_one()
2317 // Make sure this is a SATA controller by counting the number of bars in nv_init_one()
2318 // (NVIDIA SATA controllers will always have six bars). Otherwise, in nv_init_one()
2322 return -ENODEV; in nv_init_one()
2324 ata_print_version_once(&pdev->dev, DRV_VERSION); in nv_init_one()
2332 dev_notice(&pdev->dev, "Using ADMA mode\n"); in nv_init_one()
2335 dev_notice(&pdev->dev, "Using SWNCQ mode\n"); in nv_init_one()
2340 ipriv = ppi[0]->private_data; in nv_init_one()
2345 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); in nv_init_one()
2347 return -ENOMEM; in nv_init_one()
2348 hpriv->type = type; in nv_init_one()
2349 host->private_data = hpriv; in nv_init_one()
2357 base = host->iomap[NV_MMIO_BAR]; in nv_init_one()
2358 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET; in nv_init_one()
2359 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET; in nv_init_one()
2361 /* enable SATA space for CK804 */ in nv_init_one()
2379 dev_notice(&pdev->dev, "Using MSI\n"); in nv_init_one()
2384 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); in nv_init_one()
2391 struct nv_host_priv *hpriv = host->private_data; in nv_pci_device_resume()
2398 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { in nv_pci_device_resume()
2399 if (hpriv->type >= CK804) { in nv_pci_device_resume()
2406 if (hpriv->type == ADMA) { in nv_pci_device_resume()
2412 pp = host->ports[0]->private_data; in nv_pci_device_resume()
2413 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2419 pp = host->ports[1]->private_data; in nv_pci_device_resume()
2420 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2439 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_ck804_host_stop()
2442 /* disable SATA space for CK804 */ in nv_ck804_host_stop()
2450 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_stop()