Home
last modified time | relevance | path

Searched refs:mmu (Results 1 – 25 of 52) sorted by relevance

123

/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c96 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, in etnaviv_iommu_remove_mapping() argument
101 etnaviv_iommu_unmap(mmu, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
106 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, in etnaviv_iommu_find_iova() argument
112 lockdep_assert_held(&mmu->lock); in etnaviv_iommu_find_iova()
124 ret = drm_mm_insert_node_in_range(&mmu->mm, node, in etnaviv_iommu_find_iova()
125 size, 0, mmu->last_iova, ~0UL, in etnaviv_iommu_find_iova()
126 mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); in etnaviv_iommu_find_iova()
135 if (mmu->last_iova) { in etnaviv_iommu_find_iova()
136 mmu->last_iova = 0; in etnaviv_iommu_find_iova()
137 mmu->need_flush = true; in etnaviv_iommu_find_iova()
[all …]
Detnaviv_gem.c250 struct etnaviv_iommu *mmu) in etnaviv_gem_get_vram_mapping() argument
255 if (mapping->mmu == mmu) in etnaviv_gem_get_vram_mapping()
295 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); in etnaviv_gem_mapping_get()
304 mutex_lock(&gpu->mmu->lock); in etnaviv_gem_mapping_get()
305 if (mapping->mmu == gpu->mmu) in etnaviv_gem_mapping_get()
309 mutex_unlock(&gpu->mmu->lock); in etnaviv_gem_mapping_get()
342 mapping->mmu = gpu->mmu; in etnaviv_gem_mapping_get()
345 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, in etnaviv_gem_mapping_get()
564 struct etnaviv_iommu *mmu = mapping->mmu; in etnaviv_gem_free_object() local
568 if (mmu) in etnaviv_gem_free_object()
[all …]
Detnaviv_buffer.c272 if (gpu->mmu->need_flush || gpu->switch_context) { in etnaviv_buffer_queue()
279 if (gpu->mmu->need_flush) { in etnaviv_buffer_queue()
280 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) in etnaviv_buffer_queue()
292 if (gpu->mmu->need_flush) { in etnaviv_buffer_queue()
294 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { in etnaviv_buffer_queue()
312 gpu->mmu->need_flush = false; in etnaviv_buffer_queue()
Detnaviv_dump.c102 etnaviv_iommu_dump(gpu->mmu, iter->data); in etnaviv_core_dump_mmu()
127 mmu_size = etnaviv_iommu_dump_size(gpu->mmu); in etnaviv_core_dump()
143 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { in etnaviv_core_dump()
197 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { in etnaviv_core_dump()
Detnaviv_mmu.h58 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
61 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at() local
35 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_at()
39 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; in nvkm_vm_map_at()
40 u32 max = 1 << (mmu->func->pgt_bits - bits); in nvkm_vm_map_at()
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at()
70 mmu->func->flush(vm); in nvkm_vm_map_at()
78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table() local
79 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table()
[all …]
Dnv04.c78 struct nv04_mmu *mmu = nv04_mmu(base); in nv04_mmu_oneinit() local
79 struct nvkm_device *device = mmu->base.subdev.device; in nv04_mmu_oneinit()
83 ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, in nv04_mmu_oneinit()
84 &mmu->vm); in nv04_mmu_oneinit()
91 mmu->vm->pgt[0].mem[0] = dma; in nv04_mmu_oneinit()
92 mmu->vm->pgt[0].refcount[0] = 1; in nv04_mmu_oneinit()
106 struct nv04_mmu *mmu = nv04_mmu(base); in nv04_mmu_dtor() local
107 struct nvkm_device *device = mmu->base.subdev.device; in nv04_mmu_dtor()
108 if (mmu->vm) { in nv04_mmu_dtor()
109 nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); in nv04_mmu_dtor()
[all …]
Dnv44.c87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_map_sg() local
95 nv44_vm_fill(pgt, mmu->null, list, pte, part); in nv44_vm_map_sg()
112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt); in nv44_vm_map_sg()
119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_unmap() local
125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part); in nv44_vm_unmap()
139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt); in nv44_vm_unmap()
146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); in nv44_vm_flush() local
147 struct nvkm_device *device = mmu->base.subdev.device; in nv44_vm_flush()
148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE); in nv44_vm_flush()
164 struct nv04_mmu *mmu = nv04_mmu(base); in nv44_mmu_oneinit() local
[all …]
Dnv41.c71 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); in nv41_vm_flush() local
72 struct nvkm_device *device = mmu->base.subdev.device; in nv41_vm_flush()
74 mutex_lock(&mmu->base.subdev.mutex); in nv41_vm_flush()
81 mutex_unlock(&mmu->base.subdev.mutex); in nv41_vm_flush()
91 struct nv04_mmu *mmu = nv04_mmu(base); in nv41_mmu_oneinit() local
92 struct nvkm_device *device = mmu->base.subdev.device; in nv41_mmu_oneinit()
95 ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL, in nv41_mmu_oneinit()
96 &mmu->vm); in nv41_mmu_oneinit()
102 &mmu->vm->pgt[0].mem[0]); in nv41_mmu_oneinit()
103 mmu->vm->pgt[0].refcount[0] = 1; in nv41_mmu_oneinit()
[all …]
DKbuild1 nvkm-y += nvkm/subdev/mmu/base.o
2 nvkm-y += nvkm/subdev/mmu/nv04.o
3 nvkm-y += nvkm/subdev/mmu/nv41.o
4 nvkm-y += nvkm/subdev/mmu/nv44.o
5 nvkm-y += nvkm/subdev/mmu/nv50.o
6 nvkm-y += nvkm/subdev/mmu/gf100.o
Dgf100.c112 struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc; in gf100_vm_map()
164 struct nvkm_mmu *mmu = vm->mmu; in gf100_vm_flush() local
165 struct nvkm_device *device = mmu->subdev.device; in gf100_vm_flush()
173 mutex_lock(&mmu->subdev.mutex); in gf100_vm_flush()
192 mutex_unlock(&mmu->subdev.mutex); in gf100_vm_flush()
196 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, in gf100_vm_create() argument
199 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm); in gf100_vm_create()
Dnv50.c80 struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram; in nv50_vm_map()
158 struct nvkm_mmu *mmu = vm->mmu; in nv50_vm_flush() local
159 struct nvkm_subdev *subdev = &mmu->subdev; in nv50_vm_flush()
202 nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, in nv50_vm_create() argument
205 u32 block = (1 << (mmu->func->pgt_bits + 12)); in nv50_vm_create()
209 return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm); in nv50_vm_create()
/drivers/iommu/
Dipmmu-vmsa.c40 struct ipmmu_vmsa_device *mmu; member
51 struct ipmmu_vmsa_device *mmu; member
188 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
190 return ioread32(mmu->base + offset); in ipmmu_read()
193 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
196 iowrite32(data, mmu->base + offset); in ipmmu_write()
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read()
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write()
222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
247 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local
[all …]
/drivers/gpu/drm/msm/
Dmsm_iommu.c34 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_attach() argument
37 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach()
38 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
41 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_detach() argument
44 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach()
45 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
48 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_map() argument
51 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map()
87 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_unmap() argument
90 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap()
[all …]
Dmsm_mmu.h24 int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
25 void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
30 void (*destroy)(struct msm_mmu *mmu);
38 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
41 mmu->dev = dev; in msm_mmu_init()
42 mmu->funcs = funcs; in msm_mmu_init()
Dmsm_gpu.c660 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); in msm_gpu_init()
661 if (IS_ERR(gpu->mmu)) { in msm_gpu_init()
662 ret = PTR_ERR(gpu->mmu); in msm_gpu_init()
664 gpu->mmu = NULL; in msm_gpu_init()
672 gpu->id = msm_register_mmu(drm, gpu->mmu); in msm_gpu_init()
708 if (gpu->mmu) in msm_gpu_cleanup()
709 gpu->mmu->funcs->destroy(gpu->mmu); in msm_gpu_cleanup()
/drivers/gpu/drm/nouveau/nvkm/engine/device/
Dbase.c87 .mmu = nv04_mmu_new,
108 .mmu = nv04_mmu_new,
130 .mmu = nv04_mmu_new,
150 .mmu = nv04_mmu_new,
172 .mmu = nv04_mmu_new,
194 .mmu = nv04_mmu_new,
216 .mmu = nv04_mmu_new,
238 .mmu = nv04_mmu_new,
260 .mmu = nv04_mmu_new,
282 .mmu = nv04_mmu_new,
[all …]
/drivers/gpu/drm/msm/mdp/mdp4/
Dmdp4_kms.c162 struct msm_mmu *mmu = mdp4_kms->mmu; in mdp4_destroy() local
164 if (mmu) { in mdp4_destroy()
165 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); in mdp4_destroy()
166 mmu->funcs->destroy(mmu); in mdp4_destroy()
443 struct msm_mmu *mmu; in mdp4_kms_init() local
534 mmu = msm_iommu_new(&pdev->dev, config->iommu); in mdp4_kms_init()
535 if (IS_ERR(mmu)) { in mdp4_kms_init()
536 ret = PTR_ERR(mmu); in mdp4_kms_init()
539 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp4_kms_init()
544 mdp4_kms->mmu = mmu; in mdp4_kms_init()
[all …]
/drivers/gpu/drm/msm/mdp/mdp5/
Dmdp5_kms.c120 struct msm_mmu *mmu = mdp5_kms->mmu; in mdp5_kms_destroy() local
122 if (mmu) { in mdp5_kms_destroy()
123 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); in mdp5_kms_destroy()
124 mmu->funcs->destroy(mmu); in mdp5_kms_destroy()
567 struct msm_mmu *mmu; in mdp5_kms_init() local
609 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); in mdp5_kms_init()
610 if (IS_ERR(mmu)) { in mdp5_kms_init()
611 ret = PTR_ERR(mmu); in mdp5_kms_init()
617 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp5_kms_init()
622 mmu->funcs->destroy(mmu); in mdp5_kms_init()
[all …]
/drivers/gpu/drm/gma500/
Dpsb_drv.c182 if (dev_priv->mmu) { in psb_driver_unload()
188 (dev_priv->mmu), in psb_driver_unload()
192 psb_mmu_driver_takedown(dev_priv->mmu); in psb_driver_unload()
193 dev_priv->mmu = NULL; in psb_driver_unload()
326 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); in psb_driver_load()
327 if (!dev_priv->mmu) in psb_driver_load()
330 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); in psb_driver_load()
340 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu), in psb_driver_load()
346 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); in psb_driver_load()
/drivers/gpu/drm/nouveau/nvkm/engine/dma/
Dusernv04.c52 struct nv04_mmu *mmu = nv04_mmu(device->mmu); in nv04_dmaobj_bind() local
53 struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0]; in nv04_dmaobj_bind()
98 if (device->mmu->func == &nv04_mmu) in nv04_dmaobj_new()
/drivers/infiniband/hw/hfi1/
Duser_exp_rcv.c62 struct mmu_rb_node mmu; member
855 node->mmu.addr = vaddr; in set_rcvarray_entry()
856 node->mmu.len = npages * PAGE_SIZE; in set_rcvarray_entry()
866 ret = tid_rb_insert(fd, &node->mmu); in set_rcvarray_entry()
868 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu); in set_rcvarray_entry()
872 node->rcventry, node->mmu.addr, node->phys, ret); in set_rcvarray_entry()
880 node->mmu.addr, node->phys, phys); in set_rcvarray_entry()
915 hfi1_mmu_rb_remove(fd->handler, &node->mmu); in unprogram_rcvarray()
926 node->npages, node->mmu.addr, node->phys, in clear_tid_node()
936 pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, in clear_tid_node()
[all …]
/drivers/gpu/drm/nouveau/
Dnouveau_chan.c98 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_prep() local
146 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_prep()
176 args.limit = mmu->limit - 1; in nouveau_channel_prep()
304 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_init() local
316 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init()
333 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init()
345 args.limit = mmu->limit - 1; in nouveau_channel_init()
/drivers/memory/
Dmtk-smi.c62 u32 *mmu; member
154 larb->mmu = &smi_iommu->larb_imu[i].mmu; in mtk_smi_larb_bind()
165 writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN); in mtk_smi_larb_config_port()
182 if (*larb->mmu & BIT(i)) { in mtk_smi_larb_config_port_gen1()
/drivers/gpu/drm/msm/adreno/
Dadreno_gpu.c352 struct msm_mmu *mmu; in adreno_gpu_init() local
391 mmu = gpu->mmu; in adreno_gpu_init()
392 if (mmu) { in adreno_gpu_init()
393 ret = mmu->funcs->attach(mmu, iommu_ports, in adreno_gpu_init()

123