/drivers/staging/media/ipu3/ |
D | ipu3-mmu.c | 78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument 80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate() 83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument 84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument 86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered() 89 func(mmu); in call_if_imgu_is_powered() 90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered() 101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) in imgu_mmu_set_halt() argument 106 writel(halt, mmu->base + REG_GP_HALT); in imgu_mmu_set_halt() 107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED, in imgu_mmu_set_halt() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument 74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get() 82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get() 93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get() 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument 124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find() 134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find() [all …]
|
D | Kbuild | 2 nvkm-y += nvkm/subdev/mmu/base.o 3 nvkm-y += nvkm/subdev/mmu/nv04.o 4 nvkm-y += nvkm/subdev/mmu/nv41.o 5 nvkm-y += nvkm/subdev/mmu/nv44.o 6 nvkm-y += nvkm/subdev/mmu/nv50.o 7 nvkm-y += nvkm/subdev/mmu/g84.o 8 nvkm-y += nvkm/subdev/mmu/mcp77.o 9 nvkm-y += nvkm/subdev/mmu/gf100.o 10 nvkm-y += nvkm/subdev/mmu/gk104.o 11 nvkm-y += nvkm/subdev/mmu/gk20a.o [all …]
|
D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap() 69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap() 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type() [all …]
|
D | umem.c | 72 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap() 90 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local 109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map() 145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local 161 if (type >= mmu->type_nr) in nvkm_umem_new() 167 umem->mmu = mmu; in nvkm_umem_new() 168 umem->type = mmu->type[type].type; in nvkm_umem_new() 172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new() 177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
|
D | mem.c | 33 struct nvkm_mmu *mmu; member 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() 144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_host() argument 147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host() 157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host() 158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host() 169 mem->mmu = mmu; in nvkm_mem_new_host() 199 if (mmu->dma_bits > 32) in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() 224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_type() argument [all …]
|
D | nv44.c | 32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init() 35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init() 46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init() 59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
|
D | memnv04.c | 31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument 37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map() 51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument 62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new() 67 return nvkm_ram_get(mmu->subdev.device, type, 0x01, page, in nv04_mem_new()
|
D | vmmtu102.c | 29 struct nvkm_device *device = vmm->mmu->subdev.device; in tu102_vmm_flush() 36 mutex_lock(&vmm->mmu->mutex); in tu102_vmm_flush() 48 mutex_unlock(&vmm->mmu->mutex); in tu102_vmm_flush() 71 tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, in tu102_vmm_new() argument 75 return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size, in tu102_vmm_new()
|
D | vmmgf100.c | 183 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_invalidate_pdb() 190 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_invalidate() 194 mutex_lock(&vmm->mmu->mutex); in gf100_vmm_invalidate() 224 mutex_unlock(&vmm->mmu->mutex); in gf100_vmm_invalidate() 247 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_valid() 276 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); in gf100_vmm_valid() 402 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, in gf100_vmm_new_() argument 406 switch (mmu->subdev.device->fb->page) { in gf100_vmm_new_() 407 case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size, in gf100_vmm_new_() 409 case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size, in gf100_vmm_new_() [all …]
|
/drivers/staging/media/atomisp/pci/mmu/ |
D | isp_mmu.c | 57 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, 79 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, in isp_pte_to_pgaddr() argument 82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr() 85 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, in isp_pgaddr_to_pte_valid() argument 88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid() 90 return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu)); in isp_pgaddr_to_pte_valid() 97 static phys_addr_t alloc_page_table(struct isp_mmu *mmu) in alloc_page_table() argument 119 atomisp_set_pte(page, i, mmu->driver->null_pte); in alloc_page_table() 125 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page) in free_page_table() argument 142 static void mmu_remap_error(struct isp_mmu *mmu, in mmu_remap_error() argument [all …]
|
D | sh_mmu_mrfld.c | 31 static unsigned int sh_phys_to_pte(struct isp_mmu *mmu, in sh_phys_to_pte() argument 37 static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu, in sh_pte_to_phys() argument 40 unsigned int mask = mmu->driver->pte_valid_mask; in sh_pte_to_phys() 45 static unsigned int sh_get_pd_base(struct isp_mmu *mmu, in sh_get_pd_base() argument 48 unsigned int pte = sh_phys_to_pte(mmu, phys); in sh_get_pd_base() 64 static void sh_tlb_flush(struct isp_mmu *mmu) in sh_tlb_flush() argument
|
/drivers/gpu/drm/nouveau/nvif/ |
D | mmu.c | 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 kfree(mmu->kind); in nvif_mmu_dtor() 31 kfree(mmu->type); in nvif_mmu_dtor() 32 kfree(mmu->heap); in nvif_mmu_dtor() 33 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 38 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 50 mmu->heap = NULL; in nvif_mmu_ctor() 51 mmu->type = NULL; in nvif_mmu_ctor() 52 mmu->kind = NULL; in nvif_mmu_ctor() 55 &args, sizeof(args), &mmu->object); in nvif_mmu_ctor() [all …]
|
D | mem.c | 28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument 31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map() 48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument 72 ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass, in nvif_mem_ctor_type() 75 mem->type = mmu->type[type].type; in nvif_mem_ctor_type() 88 nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type, in nvif_mem_ctor() argument 95 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_ctor() 96 if ((mmu->type[i].type & type) == type) { in nvif_mem_ctor() 97 ret = nvif_mem_ctor_type(mmu, name, oclass, i, page, in nvif_mem_ctor()
|
/drivers/iommu/ |
D | ipmmu-vmsa.c | 72 struct ipmmu_vmsa_device *mmu; member 150 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument 152 return mmu->root == mmu; in ipmmu_is_root() 157 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local 160 if (ipmmu_is_root(mmu)) in __ipmmu_check_device() 161 *rootp = mmu; in __ipmmu_check_device() 178 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 180 return ioread32(mmu->base + offset); in ipmmu_read() 183 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument 186 iowrite32(data, mmu->base + offset); in ipmmu_write() [all …]
|
/drivers/staging/media/atomisp/include/mmu/ |
D | isp_mmu.h | 88 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base); 100 void (*tlb_flush_range)(struct isp_mmu *mmu, 102 void (*tlb_flush_all)(struct isp_mmu *mmu); 103 unsigned int (*phys_to_pte)(struct isp_mmu *mmu, 105 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu, 120 #define ISP_PTE_VALID_MASK(mmu) \ argument 121 ((mmu)->driver->pte_valid_mask) 123 #define ISP_PTE_VALID(mmu, pte) \ argument 124 ((pte) & ISP_PTE_VALID_MASK(mmu)) 132 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver); [all …]
|
/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 97 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument 103 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation() 108 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument 110 int as_nr = mmu->as; in panfrost_mmu_enable() 111 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable() 142 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument 148 as = mmu->as; in panfrost_mmu_as_get() 150 int en = atomic_inc_return(&mmu->as_count); in panfrost_mmu_as_get() 159 list_move(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get() 169 panfrost_mmu_enable(pfdev, mmu); in panfrost_mmu_as_get() [all …]
|
D | panfrost_gem.c | 63 if (iter->mmu == priv->mmu) { in panfrost_gem_mapping_get() 80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 83 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 94 panfrost_mmu_ctx_put(mapping->mmu); in panfrost_gem_mapping_release() 144 mapping->mmu = panfrost_mmu_ctx_get(priv->mmu); in panfrost_gem_open() 145 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_open() 146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode, in panfrost_gem_open() 148 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_open() 177 if (iter->mmu == priv->mmu) { in panfrost_gem_close()
|
/drivers/gpu/drm/msm/ |
D | msm_mmu.h | 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 18 void (*resume_translation)(struct msm_mmu *mmu); 35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 38 mmu->dev = dev; in msm_mmu_init() 39 mmu->funcs = funcs; in msm_mmu_init() 40 mmu->type = type; in msm_mmu_init() 46 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument [all …]
|
D | msm_iommu.c | 27 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument 29 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable() 32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 35 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_unmap() 51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 54 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map() 68 msm_iommu_pagetable_unmap(mmu, iova, mapped); in msm_iommu_pagetable_map() 82 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) in msm_iommu_pagetable_destroy() argument 84 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_destroy() 100 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument [all …]
|
D | msm_gem_vma.c | 18 if (aspace->mmu) in msm_gem_address_space_destroy() 19 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy() 54 if (aspace->mmu) in msm_gem_purge_vma() 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 87 if (aspace && aspace->mmu) in msm_gem_map_vma() 88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 143 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, in msm_gem_address_space_create() argument 148 if (IS_ERR(mmu)) in msm_gem_address_space_create() 149 return ERR_CAST(mmu); in msm_gem_address_space_create() 157 aspace->mmu = mmu; in msm_gem_address_space_create()
|
D | msm_gpummu.c | 24 static void msm_gpummu_detach(struct msm_mmu *mmu) in msm_gpummu_detach() argument 28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument 31 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_map() 56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument 58 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_unmap() 71 static void msm_gpummu_resume_translation(struct msm_mmu *mmu) in msm_gpummu_resume_translation() argument 75 static void msm_gpummu_destroy(struct msm_mmu *mmu) in msm_gpummu_destroy() argument 77 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_destroy() 79 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in msm_gpummu_destroy() 114 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in msm_gpummu_params() argument [all …]
|
/drivers/gpu/drm/nouveau/include/nvif/ |
D | mmu.h | 39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid() 49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_mem.c | 93 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_host() local 103 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) in nouveau_mem_host() 105 if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) { in nouveau_mem_host() 106 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_mem_host() 107 mem->kind = mmu->kind[mem->kind]; in nouveau_mem_host() 117 ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, in nouveau_mem_host() 130 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_vram() local 137 ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, in nouveau_mem_vram() 145 ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, in nouveau_mem_vram() 148 .bankswz = mmu->kind[mem->kind] == 2, in nouveau_mem_vram()
|
/drivers/gpu/drm/nouveau/nvkm/engine/device/ |
D | base.c | 88 .mmu = { 0x00000001, nv04_mmu_new }, 109 .mmu = { 0x00000001, nv04_mmu_new }, 131 .mmu = { 0x00000001, nv04_mmu_new }, 151 .mmu = { 0x00000001, nv04_mmu_new }, 173 .mmu = { 0x00000001, nv04_mmu_new }, 195 .mmu = { 0x00000001, nv04_mmu_new }, 217 .mmu = { 0x00000001, nv04_mmu_new }, 239 .mmu = { 0x00000001, nv04_mmu_new }, 261 .mmu = { 0x00000001, nv04_mmu_new }, 283 .mmu = { 0x00000001, nv04_mmu_new }, [all …]
|