/kernel/linux/linux-5.10/drivers/staging/media/tegra-vde/ |
D | iommu.c | 21 struct iova **iovap, in tegra_vde_iommu_map() 24 struct iova *iova; in tegra_vde_iommu_map() local 30 size = iova_align(&vde->iova, size); in tegra_vde_iommu_map() 31 shift = iova_shift(&vde->iova); in tegra_vde_iommu_map() 33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true); in tegra_vde_iommu_map() 34 if (!iova) in tegra_vde_iommu_map() 37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map() 42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map() 46 *iovap = iova; in tegra_vde_iommu_map() 51 void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova) in tegra_vde_iommu_unmap() argument [all …]
|
D | dmabuf-cache.c | 25 struct iova *iova; member 36 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 70 struct iova *iova; in tegra_vde_dmabuf_cache_map() local 88 *addrp = iova_dma_addr(&vde->iova, entry->iova); in tegra_vde_dmabuf_cache_map() 122 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size); in tegra_vde_dmabuf_cache_map() 126 *addrp = iova_dma_addr(&vde->iova, iova); in tegra_vde_dmabuf_cache_map() 129 iova = NULL; in tegra_vde_dmabuf_cache_map() 136 entry->iova = iova; in tegra_vde_dmabuf_cache_map()
|
D | vde.h | 48 struct iova_domain iova; member 49 struct iova *iova_resv_static_addresses; 50 struct iova *iova_resv_last_page; 59 struct iova **iovap, 61 void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
|
/kernel/linux/linux-5.10/include/linux/ |
D | iova.h | 19 struct iova { struct 85 struct iova anchor; /* rbtree lookup anchor */ 100 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument 102 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size() 115 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument 117 return iova & iova_mask(iovad); in iova_offset() 125 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument 127 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr() 130 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) in iova_pfn() argument 132 return iova >> iova_shift(iovad); in iova_pfn() [all …]
|
D | io-pgtable.h | 41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, 43 void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, 46 unsigned long iova, size_t granule, void *cookie); 157 int (*map)(struct io_pgtable_ops *ops, unsigned long iova, 159 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, 162 unsigned long iova); 217 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, in io_pgtable_tlb_flush_walk() argument 220 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk() 224 io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, in io_pgtable_tlb_flush_leaf() argument 227 iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_leaf() [all …]
|
/kernel/linux/linux-5.10/drivers/iommu/ |
D | iova.c | 125 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update() 134 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() 136 struct iova *cached_iova; in __cached_rbnode_delete_update() 138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update() 147 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update() 154 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument 162 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree() 166 if (iova->pfn_lo < this->pfn_lo) in iova_insert_rbtree() 168 else if (iova->pfn_lo > this->pfn_lo) in iova_insert_rbtree() 176 rb_link_node(&iova->node, parent, new); in iova_insert_rbtree() [all …]
|
D | tegra-gart.c | 60 #define for_each_gart_pte(gart, iova) \ argument 61 for (iova = gart->iovmm_base; \ 62 iova < gart->iovmm_end; \ 63 iova += GART_PAGE_SIZE) 66 unsigned long iova, unsigned long pte) in gart_set_pte() argument 68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_set_pte() 73 unsigned long iova) in gart_read_pte() argument 77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_read_pte() 85 unsigned long iova; in do_gart_setup() local 87 for_each_gart_pte(gart, iova) in do_gart_setup() [all …]
|
D | io-pgtable-arm-v7s.c | 415 unsigned long iova, phys_addr_t paddr, int prot, in arm_v7s_init_pte() argument 431 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); in arm_v7s_init_pte() 432 if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, in arm_v7s_init_pte() 475 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, in __arm_v7s_map() argument 484 ptep += ARM_V7S_LVL_IDX(iova, lvl); in __arm_v7s_map() 488 return arm_v7s_init_pte(data, iova, paddr, prot, in __arm_v7s_map() 519 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); in __arm_v7s_map() 522 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_v7s_map() argument 533 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || in arm_v7s_map() 537 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); in arm_v7s_map() [all …]
|
D | exynos-iommu.c | 99 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument 101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument 103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument 108 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument 110 return iova >> SECT_ORDER; in lv1ent_offset() 113 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument 115 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset() 183 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument 185 return pgtable + lv1ent_offset(iova); in section_entry() 188 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) in page_entry() argument [all …]
|
D | io-pgtable-arm.c | 253 unsigned long iova, size_t size, int lvl, 273 unsigned long iova, phys_addr_t paddr, in arm_lpae_init_pte() argument 291 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte() 292 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { in arm_lpae_init_pte() 334 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument 344 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map() 348 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map() 377 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); in __arm_lpae_map() 444 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_lpae_map() argument 452 long iaext = (s64)iova >> cfg->ias; in arm_lpae_map() [all …]
|
D | rockchip-iommu.c | 260 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument 262 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index() 265 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument 267 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index() 270 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument 272 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset() 307 dma_addr_t iova; in rk_iommu_zap_lines() local 309 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines() 310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines() 473 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument [all …]
|
D | tegra-smmu.c | 155 static unsigned int iova_pd_index(unsigned long iova) in iova_pd_index() argument 157 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index() 160 static unsigned int iova_pt_index(unsigned long iova) in iova_pt_index() argument 162 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index() 222 unsigned long iova) in smmu_flush_tlb_section() argument 231 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section() 237 unsigned long iova) in smmu_flush_tlb_group() argument 246 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group() 536 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument 539 unsigned int pd_index = iova_pd_index(iova); in tegra_smmu_set_pde() [all …]
|
D | sun50i-iommu.c | 164 static u32 sun50i_iova_get_dte_index(dma_addr_t iova) in sun50i_iova_get_dte_index() argument 166 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); in sun50i_iova_get_dte_index() 169 static u32 sun50i_iova_get_pte_index(dma_addr_t iova) in sun50i_iova_get_pte_index() argument 171 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); in sun50i_iova_get_pte_index() 174 static u32 sun50i_iova_get_page_offset(dma_addr_t iova) in sun50i_iova_get_page_offset() argument 176 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova); in sun50i_iova_get_page_offset() 484 dma_addr_t iova, gfp_t gfp) in sun50i_dte_get_page_table() argument 492 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; in sun50i_dte_get_page_table() 522 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova, in sun50i_iommu_map() argument 531 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp); in sun50i_iommu_map() [all …]
|
D | virtio-iommu.c | 59 struct interval_tree_node iova; member 313 static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, in viommu_add_mapping() argument 324 mapping->iova.start = iova; in viommu_add_mapping() 325 mapping->iova.last = iova + size - 1; in viommu_add_mapping() 329 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping() 346 unsigned long iova, size_t size) in viommu_del_mappings() argument 350 unsigned long last = iova + size - 1; in viommu_del_mappings() 355 next = interval_tree_iter_first(&vdomain->mappings, iova, last); in viommu_del_mappings() 358 mapping = container_of(node, struct viommu_mapping, iova); in viommu_del_mappings() 359 next = interval_tree_iter_next(node, iova, last); in viommu_del_mappings() [all …]
|
D | dma-iommu.c | 29 dma_addr_t iova; member 185 msi_page->iova = start; in cookie_init_hw_msi_region() 409 unsigned long shift, iova_len, iova = 0; in iommu_dma_alloc_iova() local 434 iova = alloc_iova_fast(iovad, iova_len, in iommu_dma_alloc_iova() 437 if (!iova) in iommu_dma_alloc_iova() 438 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, in iommu_dma_alloc_iova() 441 return (dma_addr_t)iova << shift; in iommu_dma_alloc_iova() 445 dma_addr_t iova, size_t size) in iommu_dma_free_iova() argument 453 queue_iova(iovad, iova_pfn(iovad, iova), in iommu_dma_free_iova() 456 free_iova_fast(iovad, iova_pfn(iovad, iova), in iommu_dma_free_iova() [all …]
|
/kernel/linux/linux-5.10/drivers/fpga/ |
D | dfl-afu-dma-region.c | 125 u64 iova, u64 size) in dma_region_check_iova() argument 127 if (!size && region->iova != iova) in dma_region_check_iova() 130 return (region->iova <= iova) && in dma_region_check_iova() 131 (region->length + region->iova >= iova + size); in dma_region_check_iova() 150 (unsigned long long)region->iova); in afu_dma_region_add() 161 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add() 164 if (region->iova < this->iova) in afu_dma_region_add() 166 else if (region->iova > this->iova) in afu_dma_region_add() 191 (unsigned long long)region->iova); in afu_dma_region_remove() 213 (unsigned long long)region->iova); in afu_dma_region_destroy() [all …]
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | iommu.h | 88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 90 TP_ARGS(iova, paddr, size), 93 __field(u64, iova) 99 __entry->iova = iova; 105 __entry->iova, __entry->paddr, __entry->size 111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), 113 TP_ARGS(iova, size, unmapped_size), 116 __field(u64, iova) 122 __entry->iova = iova; 128 __entry->iova, __entry->size, __entry->unmapped_size [all …]
|
/kernel/linux/linux-5.10/drivers/vfio/ |
D | vfio_iommu_type1.c | 90 dma_addr_t iova; /* Device address */ member 125 dma_addr_t iova; /* Device address */ member 132 dma_addr_t iova; member 172 if (start + size <= dma->iova) in vfio_find_dma() 174 else if (start >= dma->iova + dma->size) in vfio_find_dma() 192 if (new->iova + new->size <= dma->iova) in vfio_link_dma() 242 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); in vfio_dma_populate_bitmap() 297 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) in vfio_find_vpfn() argument 305 if (iova < vpfn->iova) in vfio_find_vpfn() 307 else if (iova > vpfn->iova) in vfio_find_vpfn() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 102 struct iova *iova; in imgu_dmamap_alloc() local 107 iova = alloc_iova(&imgu->iova_domain, size >> shift, in imgu_dmamap_alloc() 109 if (!iova) in imgu_dmamap_alloc() 117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), in imgu_dmamap_alloc() 146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 153 struct iova *iova; in imgu_dmamap_unmap() local 155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap() 157 if (WARN_ON(!iova)) in imgu_dmamap_unmap() [all …]
|
D | ipu3-mmu.c | 154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, in address_to_pte_idx() argument 157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx() 160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx() 162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx() 165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx() 210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, in __imgu_mmu_map() argument 220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); in __imgu_mmu_map() 251 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, in imgu_mmu_map() argument 262 if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) { in imgu_mmu_map() 264 iova, &paddr, size); in imgu_mmu_map() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 41 unmapped += ops->unmap(ops, iova, 4096, NULL); in msm_iommu_pagetable_unmap() 42 iova += 4096; in msm_iommu_pagetable_unmap() 51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 58 u64 addr = iova; in msm_iommu_pagetable_map() 68 msm_iommu_pagetable_unmap(mmu, iova, mapped); in msm_iommu_pagetable_map() 129 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, in msm_iommu_tlb_flush_walk() argument 135 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument 212 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument 216 return iommu->base.handler(iommu->base.arg, iova, flags); in msm_fault_handler() [all …]
|
D | msm_gem_vma.c | 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 64 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma() 76 if (WARN_ON(!vma->iova)) in msm_gem_map_vma() 88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 107 if (vma->iova) in msm_gem_close_vma() 111 vma->iova = 0; in msm_gem_close_vma() 123 if (WARN_ON(vma->iova)) in msm_gem_init_vma() 134 vma->iova = vma->node.start << PAGE_SHIFT; in msm_gem_init_vma()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 17 unsigned long iova, size_t size) in etnaviv_context_unmap() argument 22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap() 24 iova, size, pgsize); in etnaviv_context_unmap() 29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap() 34 iova += unmapped_page; in etnaviv_context_unmap() 40 unsigned long iova, phys_addr_t paddr, in etnaviv_context_map() argument 43 unsigned long orig_iova = iova; in etnaviv_context_map() 48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map() 50 iova, &paddr, size, pgsize); in etnaviv_context_map() 55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 27 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) in mem_check_range() argument 35 if (iova < mem->iova || in mem_check_range() 37 iova > mem->iova + mem->length - length) in mem_check_range() 127 u64 length, u64 iova, int access, struct ib_udata *udata, in rxe_mem_init_user() argument 193 mem->iova = iova; in rxe_mem_init_user() 232 u64 iova, in lookup_iova() argument 237 size_t offset = iova - mem->iova + mem->offset; in lookup_iova() 270 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) in iova_to_vaddr() argument 283 addr = (void *)(uintptr_t)iova; in iova_to_vaddr() 287 if (mem_check_range(mem, iova, length)) { in iova_to_vaddr() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 58 u64 iova, u64 size) in lock_region() argument 61 u64 region = iova & PAGE_MASK; in lock_region() 78 u64 iova, u64 size, u32 op) in mmu_hw_do_operation_locked() argument 84 lock_region(pfdev, as_nr, iova, size); in mmu_hw_do_operation_locked() 95 u64 iova, u64 size, u32 op) in mmu_hw_do_operation() argument 100 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation() 228 u64 iova, u64 size) in panfrost_mmu_flush_range() argument 237 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); in panfrost_mmu_flush_range() 243 u64 iova, int prot, struct sg_table *sgt) in mmu_map_sg() argument 248 u64 start_iova = iova; in mmu_map_sg() [all …]
|