Lines Matching refs:qc
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
783 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) in nv_adma_check_atapi_dma() argument
785 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
882 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr() local
895 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
901 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
1113 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) in nv_adma_post_internal_cmd() argument
1115 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1118 ata_bmdma_post_internal_cmd(qc); in nv_adma_post_internal_cmd()
1329 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, in nv_adma_fill_aprd() argument
1335 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1337 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1348 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) in nv_adma_fill_sg() argument
1350 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1357 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1359 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; in nv_adma_fill_sg()
1360 nv_adma_fill_aprd(qc, sg, si, aprd); in nv_adma_fill_sg()
1363 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); in nv_adma_fill_sg()
1368 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) in nv_adma_use_reg_mode() argument
1370 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1375 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1378 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1379 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1385 static void nv_adma_qc_prep(struct ata_queued_cmd *qc) in nv_adma_qc_prep() argument
1387 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1388 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; in nv_adma_qc_prep()
1392 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_prep()
1394 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1395 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1396 ata_bmdma_qc_prep(qc); in nv_adma_qc_prep()
1406 cpb->tag = qc->tag; in nv_adma_qc_prep()
1410 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1413 VPRINTK("qc->flags = 0x%lx\n", qc->flags); in nv_adma_qc_prep()
1415 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1417 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1418 nv_adma_fill_sg(qc, cpb); in nv_adma_qc_prep()
1431 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) in nv_adma_qc_issue() argument
1433 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1435 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1442 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1443 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1444 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1448 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_issue()
1450 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); in nv_adma_qc_issue()
1452 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1453 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1454 return ata_bmdma_qc_issue(qc); in nv_adma_qc_issue()
1456 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1469 writew(qc->tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1471 DPRINTK("Issued tag %u\n", qc->tag); in nv_adma_qc_issue()
1487 struct ata_queued_cmd *qc; in nv_generic_interrupt() local
1489 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1490 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1491 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1726 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1733 dq->defer_bits |= (1 << qc->tag); in nv_swncq_qc_to_dq()
1734 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; in nv_swncq_qc_to_dq()
1786 struct ata_queued_cmd qc; in __ata_bmdma_stop() local
1788 qc.ap = ap; in __ata_bmdma_stop()
1789 ata_bmdma_stop(&qc); in __ata_bmdma_stop()
1992 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) in nv_swncq_qc_prep() argument
1994 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1995 ata_bmdma_qc_prep(qc); in nv_swncq_qc_prep()
1999 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
2002 nv_swncq_fill_sg(qc); in nv_swncq_qc_prep()
2005 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) in nv_swncq_fill_sg() argument
2007 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
2013 prd = pp->prd + ATA_MAX_PRD * qc->tag; in nv_swncq_fill_sg()
2016 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
2042 struct ata_queued_cmd *qc) in nv_swncq_issue_atacmd() argument
2046 if (qc == NULL) in nv_swncq_issue_atacmd()
2051 writel((1 << qc->tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2052 pp->last_issue_tag = qc->tag; in nv_swncq_issue_atacmd()
2053 pp->dhfis_bits &= ~(1 << qc->tag); in nv_swncq_issue_atacmd()
2054 pp->dmafis_bits &= ~(1 << qc->tag); in nv_swncq_issue_atacmd()
2055 pp->qc_active |= (0x1 << qc->tag); in nv_swncq_issue_atacmd()
2057 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2058 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2060 DPRINTK("Issued tag %u\n", qc->tag); in nv_swncq_issue_atacmd()
2065 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) in nv_swncq_qc_issue() argument
2067 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2070 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2071 return ata_bmdma_qc_issue(qc); in nv_swncq_qc_issue()
2076 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2078 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2110 struct ata_queued_cmd *qc; in nv_swncq_sdbfis() local
2166 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2167 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2173 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2174 WARN_ON(qc == NULL); in nv_swncq_sdbfis()
2175 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2192 struct ata_queued_cmd *qc; in nv_swncq_dmafis() local
2202 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2204 if (unlikely(!qc)) in nv_swncq_dmafis()
2207 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2210 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, in nv_swncq_dmafis()
2225 struct ata_queued_cmd *qc; in nv_swncq_host_interrupt() local
2298 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2299 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()