• Home
  • Raw
  • Download

Lines Matching +full:disable +full:- +full:cqe +full:- +full:dcmd

1 // SPDX-License-Identifier: GPL-2.0-only
10 #include <linux/dma-mapping.h>
37 return cq_host->desc_base + (tag * cq_host->slot_sz); in get_desc()
44 return desc + cq_host->task_desc_len; in get_link_desc()
49 return cq_host->trans_desc_dma_base + in get_trans_desc_dma()
50 (cq_host->mmc->max_segs * tag * in get_trans_desc_dma()
51 cq_host->trans_desc_len); in get_trans_desc_dma()
56 return cq_host->trans_desc_base + in get_trans_desc()
57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); in get_trans_desc()
68 memset(link_temp, 0, cq_host->link_desc_len); in setup_trans_desc()
69 if (cq_host->link_desc_len > 8) in setup_trans_desc()
72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { in setup_trans_desc()
79 if (cq_host->dma64) { in setup_trans_desc()
103 struct mmc_host *mmc = cq_host->mmc; in cqhci_dumpregs()
131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", in cqhci_dumpregs()
141 if (cq_host->ops->dumpregs) in cqhci_dumpregs()
142 cq_host->ops->dumpregs(mmc); in cqhci_dumpregs()
150 * |----------|
151 * |task desc | |->|----------|
152 * |----------| | |trans desc|
153 * |link desc-|->| |----------|
154 * |----------| .
156 * no. of slots max-segs
157 * . |----------|
158 * |----------|
167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { in cqhci_host_alloc_tdl()
170 cq_host->task_desc_len = 16; in cqhci_host_alloc_tdl()
172 cq_host->task_desc_len = 8; in cqhci_host_alloc_tdl()
180 if (cq_host->dma64) { in cqhci_host_alloc_tdl()
181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) in cqhci_host_alloc_tdl()
182 cq_host->trans_desc_len = 12; in cqhci_host_alloc_tdl()
184 cq_host->trans_desc_len = 16; in cqhci_host_alloc_tdl()
185 cq_host->link_desc_len = 16; in cqhci_host_alloc_tdl()
187 cq_host->trans_desc_len = 8; in cqhci_host_alloc_tdl()
188 cq_host->link_desc_len = 8; in cqhci_host_alloc_tdl()
192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; in cqhci_host_alloc_tdl()
194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; in cqhci_host_alloc_tdl()
196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * in cqhci_host_alloc_tdl()
197 cq_host->mmc->cqe_qdepth; in cqhci_host_alloc_tdl()
199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", in cqhci_host_alloc_tdl()
200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, in cqhci_host_alloc_tdl()
201 cq_host->slot_sz); in cqhci_host_alloc_tdl()
204 * allocate a dma-mapped chunk of memory for the descriptors in cqhci_host_alloc_tdl()
205 * allocate a dma-mapped chunk of memory for link descriptors in cqhci_host_alloc_tdl()
206 * setup each link-desc memory offset per slot-number to in cqhci_host_alloc_tdl()
209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), in cqhci_host_alloc_tdl()
210 cq_host->desc_size, in cqhci_host_alloc_tdl()
211 &cq_host->desc_dma_base, in cqhci_host_alloc_tdl()
213 if (!cq_host->desc_base) in cqhci_host_alloc_tdl()
214 return -ENOMEM; in cqhci_host_alloc_tdl()
216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), in cqhci_host_alloc_tdl()
217 cq_host->data_size, in cqhci_host_alloc_tdl()
218 &cq_host->trans_desc_dma_base, in cqhci_host_alloc_tdl()
220 if (!cq_host->trans_desc_base) { in cqhci_host_alloc_tdl()
221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, in cqhci_host_alloc_tdl()
222 cq_host->desc_base, in cqhci_host_alloc_tdl()
223 cq_host->desc_dma_base); in cqhci_host_alloc_tdl()
224 cq_host->desc_base = NULL; in cqhci_host_alloc_tdl()
225 cq_host->desc_dma_base = 0; in cqhci_host_alloc_tdl()
226 return -ENOMEM; in cqhci_host_alloc_tdl()
229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", in cqhci_host_alloc_tdl()
230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, in cqhci_host_alloc_tdl()
231 (unsigned long long)cq_host->desc_dma_base, in cqhci_host_alloc_tdl()
232 (unsigned long long)cq_host->trans_desc_dma_base); in cqhci_host_alloc_tdl()
234 for (; i < (cq_host->num_slots); i++) in cqhci_host_alloc_tdl()
242 struct mmc_host *mmc = cq_host->mmc; in __cqhci_enable()
255 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) in __cqhci_enable()
258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) in __cqhci_enable()
263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), in __cqhci_enable()
265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), in __cqhci_enable()
268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); in __cqhci_enable()
279 mmc->cqe_on = true; in __cqhci_enable()
281 if (cq_host->ops->enable) in __cqhci_enable()
282 cq_host->ops->enable(mmc); in __cqhci_enable()
289 cq_host->activated = true; in __cqhci_enable()
300 cq_host->mmc->cqe_on = false; in __cqhci_disable()
302 cq_host->activated = false; in __cqhci_disable()
307 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_deactivate()
309 if (cq_host->enabled && cq_host->activated) in cqhci_deactivate()
318 /* Re-enable is done upon first request */ in cqhci_resume()
325 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_enable()
328 if (!card->ext_csd.cmdq_en) in cqhci_enable()
329 return -EINVAL; in cqhci_enable()
331 if (cq_host->enabled) in cqhci_enable()
334 cq_host->rca = card->rca; in cqhci_enable()
338 pr_err("%s: Failed to enable CQE, error %d\n", in cqhci_enable()
345 cq_host->enabled = true; in cqhci_enable()
363 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_off()
367 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) in cqhci_off()
370 if (cq_host->ops->disable) in cqhci_off()
371 cq_host->ops->disable(mmc, false); in cqhci_off()
378 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); in cqhci_off()
380 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); in cqhci_off()
382 if (cq_host->ops->post_disable) in cqhci_off()
383 cq_host->ops->post_disable(mmc); in cqhci_off()
385 mmc->cqe_on = false; in cqhci_off()
390 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_disable()
392 if (!cq_host->enabled) in cqhci_disable()
399 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, in cqhci_disable()
400 cq_host->trans_desc_base, in cqhci_disable()
401 cq_host->trans_desc_dma_base); in cqhci_disable()
403 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, in cqhci_disable()
404 cq_host->desc_base, in cqhci_disable()
405 cq_host->desc_dma_base); in cqhci_disable()
407 cq_host->trans_desc_base = NULL; in cqhci_disable()
408 cq_host->desc_base = NULL; in cqhci_disable()
410 cq_host->enabled = false; in cqhci_disable()
416 u32 req_flags = mrq->data->flags; in cqhci_prep_task_desc()
428 CQHCI_BLK_COUNT(mrq->data->blocks) | in cqhci_prep_task_desc()
429 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); in cqhci_prep_task_desc()
432 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); in cqhci_prep_task_desc()
438 struct mmc_data *data = mrq->data; in cqhci_dma_map()
441 return -EINVAL; in cqhci_dma_map()
443 sg_count = dma_map_sg(mmc_dev(host), data->sg, in cqhci_dma_map()
444 data->sg_len, in cqhci_dma_map()
445 (data->flags & MMC_DATA_WRITE) ? in cqhci_dma_map()
448 pr_err("%s: sg-len: %d\n", __func__, data->sg_len); in cqhci_dma_map()
449 return -ENOMEM; in cqhci_dma_map()
480 struct mmc_data *data = mrq->data; in cqhci_prep_tran_desc()
483 bool dma64 = cq_host->dma64; in cqhci_prep_tran_desc()
488 sg_count = cqhci_dma_map(mrq->host, mrq); in cqhci_prep_tran_desc()
491 mmc_hostname(mrq->host), __func__, sg_count); in cqhci_prep_tran_desc()
497 for_each_sg(data->sg, sg, sg_count, i) { in cqhci_prep_tran_desc()
504 desc += cq_host->trans_desc_len; in cqhci_prep_tran_desc()
518 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_prep_dcmd_desc()
521 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { in cqhci_prep_dcmd_desc()
525 if (mrq->cmd->flags & MMC_RSP_R1B) { in cqhci_prep_dcmd_desc()
534 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); in cqhci_prep_dcmd_desc()
535 memset(task_desc, 0, cq_host->task_desc_len); in cqhci_prep_dcmd_desc()
541 CQHCI_CMD_INDEX(mrq->cmd->opcode) | in cqhci_prep_dcmd_desc()
543 if (cq_host->ops->update_dcmd_desc) in cqhci_prep_dcmd_desc()
544 cq_host->ops->update_dcmd_desc(mmc, mrq, &data); in cqhci_prep_dcmd_desc()
547 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", in cqhci_prep_dcmd_desc()
548 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); in cqhci_prep_dcmd_desc()
550 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); in cqhci_prep_dcmd_desc()
556 struct mmc_data *data = mrq->data; in cqhci_post_req()
559 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, in cqhci_post_req()
560 (data->flags & MMC_DATA_READ) ? in cqhci_post_req()
567 return mrq->cmd ? DCMD_SLOT : mrq->tag; in cqhci_tag()
576 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_request()
579 if (!cq_host->enabled) { in cqhci_request()
581 return -EINVAL; in cqhci_request()
584 /* First request after resume has to re-enable */ in cqhci_request()
585 if (!cq_host->activated) in cqhci_request()
588 if (!mmc->cqe_on) { in cqhci_request()
589 if (cq_host->ops->pre_enable) in cqhci_request()
590 cq_host->ops->pre_enable(mmc); in cqhci_request()
593 mmc->cqe_on = true; in cqhci_request()
594 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); in cqhci_request()
596 pr_err("%s: cqhci: CQE failed to exit halt state\n", in cqhci_request()
599 if (cq_host->ops->enable) in cqhci_request()
600 cq_host->ops->enable(mmc); in cqhci_request()
603 if (mrq->data) { in cqhci_request()
617 spin_lock_irqsave(&cq_host->lock, flags); in cqhci_request()
619 if (cq_host->recovery_halt) { in cqhci_request()
620 err = -EBUSY; in cqhci_request()
624 cq_host->slot[tag].mrq = mrq; in cqhci_request()
625 cq_host->slot[tag].flags = 0; in cqhci_request()
627 cq_host->qcnt += 1; in cqhci_request()
635 spin_unlock_irqrestore(&cq_host->lock, flags); in cqhci_request()
646 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_recovery_needed()
648 if (!cq_host->recovery_halt) { in cqhci_recovery_needed()
649 cq_host->recovery_halt = true; in cqhci_recovery_needed()
651 wake_up(&cq_host->wait_queue); in cqhci_recovery_needed()
652 if (notify && mrq->recovery_notifier) in cqhci_recovery_needed()
653 mrq->recovery_notifier(mrq); in cqhci_recovery_needed()
662 case -EILSEQ: in cqhci_error_flags()
664 case -ETIMEDOUT: in cqhci_error_flags()
674 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_error_irq()
679 spin_lock(&cq_host->lock); in cqhci_error_irq()
687 if (cq_host->recovery_halt) in cqhci_error_irq()
690 if (!cq_host->qcnt) { in cqhci_error_irq()
699 slot = &cq_host->slot[tag]; in cqhci_error_irq()
700 if (slot->mrq) { in cqhci_error_irq()
701 slot->flags = cqhci_error_flags(cmd_error, data_error); in cqhci_error_irq()
702 cqhci_recovery_needed(mmc, slot->mrq, true); in cqhci_error_irq()
708 slot = &cq_host->slot[tag]; in cqhci_error_irq()
709 if (slot->mrq) { in cqhci_error_irq()
710 slot->flags = cqhci_error_flags(data_error, cmd_error); in cqhci_error_irq()
711 cqhci_recovery_needed(mmc, slot->mrq, true); in cqhci_error_irq()
715 if (!cq_host->recovery_halt) { in cqhci_error_irq()
721 slot = &cq_host->slot[tag]; in cqhci_error_irq()
722 if (!slot->mrq) in cqhci_error_irq()
724 slot->flags = cqhci_error_flags(data_error, cmd_error); in cqhci_error_irq()
725 cqhci_recovery_needed(mmc, slot->mrq, true); in cqhci_error_irq()
731 spin_unlock(&cq_host->lock); in cqhci_error_irq()
736 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_finish_mrq()
737 struct cqhci_slot *slot = &cq_host->slot[tag]; in cqhci_finish_mrq()
738 struct mmc_request *mrq = slot->mrq; in cqhci_finish_mrq()
748 if (cq_host->recovery_halt) { in cqhci_finish_mrq()
749 slot->flags |= CQHCI_COMPLETED; in cqhci_finish_mrq()
753 slot->mrq = NULL; in cqhci_finish_mrq()
755 cq_host->qcnt -= 1; in cqhci_finish_mrq()
757 data = mrq->data; in cqhci_finish_mrq()
759 if (data->error) in cqhci_finish_mrq()
760 data->bytes_xfered = 0; in cqhci_finish_mrq()
762 data->bytes_xfered = data->blksz * data->blocks; in cqhci_finish_mrq()
773 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_irq()
790 spin_lock(&cq_host->lock); in cqhci_irq()
792 for_each_set_bit(tag, &comp_status, cq_host->num_slots) { in cqhci_irq()
799 if (cq_host->waiting_for_idle && !cq_host->qcnt) { in cqhci_irq()
800 cq_host->waiting_for_idle = false; in cqhci_irq()
801 wake_up(&cq_host->wait_queue); in cqhci_irq()
804 spin_unlock(&cq_host->lock); in cqhci_irq()
808 wake_up(&cq_host->wait_queue); in cqhci_irq()
811 wake_up(&cq_host->wait_queue); in cqhci_irq()
822 spin_lock_irqsave(&cq_host->lock, flags); in cqhci_is_idle()
823 is_idle = !cq_host->qcnt || cq_host->recovery_halt; in cqhci_is_idle()
824 *ret = cq_host->recovery_halt ? -EBUSY : 0; in cqhci_is_idle()
825 cq_host->waiting_for_idle = !is_idle; in cqhci_is_idle()
826 spin_unlock_irqrestore(&cq_host->lock, flags); in cqhci_is_idle()
833 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_wait_for_idle()
836 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); in cqhci_wait_for_idle()
844 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_timeout()
846 struct cqhci_slot *slot = &cq_host->slot[tag]; in cqhci_timeout()
850 spin_lock_irqsave(&cq_host->lock, flags); in cqhci_timeout()
851 timed_out = slot->mrq == mrq; in cqhci_timeout()
853 slot->flags |= CQHCI_EXTERNAL_TIMEOUT; in cqhci_timeout()
855 *recovery_needed = cq_host->recovery_halt; in cqhci_timeout()
857 spin_unlock_irqrestore(&cq_host->lock, flags); in cqhci_timeout()
875 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_clear_all_tasks()
885 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), in cqhci_clear_all_tasks()
906 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_halt()
919 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), in cqhci_halt()
942 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_recovery_start()
946 WARN_ON(!cq_host->recovery_halt); in cqhci_recovery_start()
950 if (cq_host->ops->disable) in cqhci_recovery_start()
951 cq_host->ops->disable(mmc, true); in cqhci_recovery_start()
953 mmc->cqe_on = false; in cqhci_recovery_start()
961 /* CRC errors might indicate re-tuning so prefer to report that */ in cqhci_error_from_flags()
963 return -EILSEQ; in cqhci_error_from_flags()
966 return -ETIMEDOUT; in cqhci_error_from_flags()
968 return -EIO; in cqhci_error_from_flags()
973 struct cqhci_slot *slot = &cq_host->slot[tag]; in cqhci_recover_mrq()
974 struct mmc_request *mrq = slot->mrq; in cqhci_recover_mrq()
980 slot->mrq = NULL; in cqhci_recover_mrq()
982 cq_host->qcnt -= 1; in cqhci_recover_mrq()
984 data = mrq->data; in cqhci_recover_mrq()
986 data->bytes_xfered = 0; in cqhci_recover_mrq()
987 data->error = cqhci_error_from_flags(slot->flags); in cqhci_recover_mrq()
989 mrq->cmd->error = cqhci_error_from_flags(slot->flags); in cqhci_recover_mrq()
992 mmc_cqe_request_done(cq_host->mmc, mrq); in cqhci_recover_mrq()
999 for (i = 0; i < cq_host->num_slots; i++) in cqhci_recover_mrqs()
1015 struct cqhci_host *cq_host = mmc->cqe_private; in cqhci_recovery_finish()
1022 WARN_ON(!cq_host->recovery_halt); in cqhci_recovery_finish()
1032 * be disabled/re-enabled, but not to disable before clearing tasks. in cqhci_recovery_finish()
1036 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); in cqhci_recovery_finish()
1051 WARN_ON(cq_host->qcnt); in cqhci_recovery_finish()
1053 spin_lock_irqsave(&cq_host->lock, flags); in cqhci_recovery_finish()
1054 cq_host->qcnt = 0; in cqhci_recovery_finish()
1055 cq_host->recovery_halt = false; in cqhci_recovery_finish()
1056 mmc->cqe_on = false; in cqhci_recovery_finish()
1057 spin_unlock_irqrestore(&cq_host->lock, flags); in cqhci_recovery_finish()
1059 /* Ensure all writes are done before interrupts are re-enabled */ in cqhci_recovery_finish()
1090 dev_dbg(&pdev->dev, "CMDQ not supported\n"); in cqhci_pltfm_init()
1091 return ERR_PTR(-EINVAL); in cqhci_pltfm_init()
1094 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); in cqhci_pltfm_init()
1096 return ERR_PTR(-ENOMEM); in cqhci_pltfm_init()
1097 cq_host->mmio = devm_ioremap(&pdev->dev, in cqhci_pltfm_init()
1098 cqhci_memres->start, in cqhci_pltfm_init()
1100 if (!cq_host->mmio) { in cqhci_pltfm_init()
1101 dev_err(&pdev->dev, "failed to remap cqhci regs\n"); in cqhci_pltfm_init()
1102 return ERR_PTR(-EBUSY); in cqhci_pltfm_init()
1104 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); in cqhci_pltfm_init()
1127 cq_host->dma64 = dma64; in cqhci_init()
1128 cq_host->mmc = mmc; in cqhci_init()
1129 cq_host->mmc->cqe_private = cq_host; in cqhci_init()
1131 cq_host->num_slots = NUM_SLOTS; in cqhci_init()
1132 cq_host->dcmd_slot = DCMD_SLOT; in cqhci_init()
1134 mmc->cqe_ops = &cqhci_cqe_ops; in cqhci_init()
1136 mmc->cqe_qdepth = NUM_SLOTS; in cqhci_init()
1137 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) in cqhci_init()
1138 mmc->cqe_qdepth -= 1; in cqhci_init()
1140 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, in cqhci_init()
1141 sizeof(*cq_host->slot), GFP_KERNEL); in cqhci_init()
1142 if (!cq_host->slot) { in cqhci_init()
1143 err = -ENOMEM; in cqhci_init()
1147 spin_lock_init(&cq_host->lock); in cqhci_init()
1149 init_completion(&cq_host->halt_comp); in cqhci_init()
1150 init_waitqueue_head(&cq_host->wait_queue); in cqhci_init()