Lines Matching refs:qc
299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
314 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
322 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
748 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) in nv_adma_check_atapi_dma() argument
750 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr() local
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
866 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
1078 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) in nv_adma_post_internal_cmd() argument
1080 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1083 ata_bmdma_post_internal_cmd(qc); in nv_adma_post_internal_cmd()
1291 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, in nv_adma_fill_aprd() argument
1297 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1299 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1310 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) in nv_adma_fill_sg() argument
1312 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1319 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1321 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1322 nv_adma_fill_aprd(qc, sg, si, aprd); in nv_adma_fill_sg()
1325 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1330 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) in nv_adma_use_reg_mode() argument
1332 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1337 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1340 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1341 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1347 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) in nv_adma_qc_prep() argument
1349 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1350 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1354 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_prep()
1356 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1357 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1358 ata_bmdma_qc_prep(qc); in nv_adma_qc_prep()
1368 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1372 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1375 VPRINTK("qc->flags = 0x%lx\n", qc->flags); in nv_adma_qc_prep()
1377 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1379 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1380 nv_adma_fill_sg(qc, cpb); in nv_adma_qc_prep()
1395 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) in nv_adma_qc_issue() argument
1397 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1399 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1406 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1407 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1408 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1412 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_issue()
1414 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); in nv_adma_qc_issue()
1416 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1417 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1418 return ata_bmdma_qc_issue(qc); in nv_adma_qc_issue()
1420 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1433 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1435 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_adma_qc_issue()
1451 struct ata_queued_cmd *qc; in nv_generic_interrupt() local
1453 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1454 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1455 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1690 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1697 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1698 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1750 struct ata_queued_cmd qc; in __ata_bmdma_stop() local
1752 qc.ap = ap; in __ata_bmdma_stop()
1753 ata_bmdma_stop(&qc); in __ata_bmdma_stop()
1955 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) in nv_swncq_qc_prep() argument
1957 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1958 ata_bmdma_qc_prep(qc); in nv_swncq_qc_prep()
1962 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1965 nv_swncq_fill_sg(qc); in nv_swncq_qc_prep()
1970 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) in nv_swncq_fill_sg() argument
1972 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1978 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1981 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
2007 struct ata_queued_cmd *qc) in nv_swncq_issue_atacmd() argument
2011 if (qc == NULL) in nv_swncq_issue_atacmd()
2016 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2017 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2018 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2019 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2020 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2022 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2023 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2025 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_swncq_issue_atacmd()
2030 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) in nv_swncq_qc_issue() argument
2032 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2035 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2036 return ata_bmdma_qc_issue(qc); in nv_swncq_qc_issue()
2041 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2043 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2075 struct ata_queued_cmd *qc; in nv_swncq_sdbfis() local
2131 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2132 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2138 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2139 WARN_ON(qc == NULL); in nv_swncq_sdbfis()
2140 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2157 struct ata_queued_cmd *qc; in nv_swncq_dmafis() local
2167 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2169 if (unlikely(!qc)) in nv_swncq_dmafis()
2172 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2175 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2190 struct ata_queued_cmd *qc; in nv_swncq_host_interrupt() local
2263 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2264 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()