Lines Matching refs:iommu
85 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_destroy() local
93 if (atomic_dec_return(&iommu->pagetables) == 0) in msm_iommu_pagetable_destroy()
151 struct msm_iommu *iommu = to_msm_iommu(parent); in msm_iommu_pagetable_create() local
173 iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu); in msm_iommu_pagetable_create()
190 &ttbr0_cfg, iommu->domain); in msm_iommu_pagetable_create()
201 if (atomic_inc_return(&iommu->pagetables) == 1) { in msm_iommu_pagetable_create()
231 struct msm_iommu *iommu = arg; in msm_fault_handler() local
232 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev); in msm_fault_handler()
240 if (iommu->base.handler) in msm_fault_handler()
241 return iommu->base.handler(iommu->base.arg, iova, flags, ptr); in msm_fault_handler()
256 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() local
258 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
264 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map() local
271 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot); in msm_iommu_map()
279 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap() local
284 iommu_unmap(iommu->domain, iova, len); in msm_iommu_unmap()
291 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_destroy() local
292 iommu_domain_free(iommu->domain); in msm_iommu_destroy()
293 kfree(iommu); in msm_iommu_destroy()
306 struct msm_iommu *iommu; in msm_iommu_new() local
312 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in msm_iommu_new()
313 if (!iommu) in msm_iommu_new()
316 iommu->domain = domain; in msm_iommu_new()
317 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU); in msm_iommu_new()
319 atomic_set(&iommu->pagetables, 0); in msm_iommu_new()
321 ret = iommu_attach_device(iommu->domain, dev); in msm_iommu_new()
323 kfree(iommu); in msm_iommu_new()
327 return &iommu->base; in msm_iommu_new()