Lines Matching +full:cmdq +full:- +full:sync
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
39 #define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
40 #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
189 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
211 return "Transaction marks non-substream disabled"; in ivpu_mmu_event_to_str()
239 return "Unknown CMDQ command"; in ivpu_mmu_event_to_str()
279 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc()
280 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_alloc()
283 cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL); in ivpu_mmu_cdtab_alloc()
284 if (!cdtab->base) in ivpu_mmu_cdtab_alloc()
285 return -ENOMEM; in ivpu_mmu_cdtab_alloc()
287 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); in ivpu_mmu_cdtab_alloc()
294 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc()
295 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_alloc()
298 strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL); in ivpu_mmu_strtab_alloc()
299 if (!strtab->base) in ivpu_mmu_strtab_alloc()
300 return -ENOMEM; in ivpu_mmu_strtab_alloc()
302 strtab->base_cfg = IVPU_MMU_STRTAB_CFG; in ivpu_mmu_strtab_alloc()
303 strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA; in ivpu_mmu_strtab_alloc()
304 strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK; in ivpu_mmu_strtab_alloc()
307 &strtab->dma, &strtab->dma_q, size); in ivpu_mmu_strtab_alloc()
314 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc()
315 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
317 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_cmdq_alloc()
318 if (!q->base) in ivpu_mmu_cmdq_alloc()
319 return -ENOMEM; in ivpu_mmu_cmdq_alloc()
321 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_cmdq_alloc()
322 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_cmdq_alloc()
323 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_cmdq_alloc()
325 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc()
326 &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_alloc()
333 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc()
334 struct ivpu_mmu_queue *q = &mmu->evtq; in ivpu_mmu_evtq_alloc()
336 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_evtq_alloc()
337 if (!q->base) in ivpu_mmu_evtq_alloc()
338 return -ENOMEM; in ivpu_mmu_evtq_alloc()
340 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_evtq_alloc()
341 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_evtq_alloc()
342 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_evtq_alloc()
345 &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_evtq_alloc()
368 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); in ivpu_mmu_structs_alloc()
408 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local
410 return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), in ivpu_mmu_cmdq_wait_for_cons()
416 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write()
417 u64 *queue_buffer = q->base; in ivpu_mmu_cmdq_cmd_write()
418 int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); in ivpu_mmu_cmdq_cmd_write()
420 if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) { in ivpu_mmu_cmdq_cmd_write()
422 return -EBUSY; in ivpu_mmu_cmdq_cmd_write()
427 q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_cmdq_cmd_write()
436 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync()
445 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); in ivpu_mmu_cmdq_sync()
449 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_sync()
450 REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod); in ivpu_mmu_cmdq_sync()
484 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset()
488 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
489 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
490 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
491 mmu->cmdq.cons = 0; in ivpu_mmu_reset()
493 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_reset()
494 mmu->evtq.prod = 0; in ivpu_mmu_reset()
495 mmu->evtq.cons = 0; in ivpu_mmu_reset()
509 REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); in ivpu_mmu_reset()
510 REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); in ivpu_mmu_reset()
512 REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
533 REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); in ivpu_mmu_reset()
557 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd()
558 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_link_cd()
559 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_strtab_link_cd()
560 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); in ivpu_mmu_strtab_link_cd()
567 (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK); in ivpu_mmu_strtab_link_cd()
598 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb()
601 mutex_lock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
602 if (!mmu->on) in ivpu_mmu_invalidate_tlb()
611 mutex_unlock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
617 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cd_add()
618 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cd_add()
624 return -EINVAL; in ivpu_mmu_cd_add()
626 entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); in ivpu_mmu_cd_add()
662 mutex_lock(&mmu->lock); in ivpu_mmu_cd_add()
663 if (!mmu->on) in ivpu_mmu_cd_add()
672 mutex_unlock(&mmu->lock); in ivpu_mmu_cd_add()
680 ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma); in ivpu_mmu_cd_add_gbl()
693 return -EINVAL; in ivpu_mmu_cd_add_user()
705 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init()
710 drmm_mutex_init(&vdev->drm, &mmu->lock); in ivpu_mmu_init()
742 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable()
745 mutex_lock(&mmu->lock); in ivpu_mmu_enable()
747 mmu->on = true; in ivpu_mmu_enable()
767 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
771 mmu->on = false; in ivpu_mmu_enable()
772 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
778 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_disable()
780 mutex_lock(&mmu->lock); in ivpu_mmu_disable()
781 mmu->on = false; in ivpu_mmu_disable()
782 mutex_unlock(&mmu->lock); in ivpu_mmu_disable()
799 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq; in ivpu_mmu_get_event()
800 u32 idx = IVPU_MMU_Q_IDX(evtq->cons); in ivpu_mmu_get_event()
801 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); in ivpu_mmu_get_event()
803 evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC); in ivpu_mmu_get_event()
804 if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) in ivpu_mmu_get_event()
807 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_get_event()
808 REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); in ivpu_mmu_get_event()
858 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
866 if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active)) in ivpu_mmu_irq_gerr_handler()
867 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()
874 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma); in ivpu_mmu_set_pgtable()