Home
last modified time | relevance | path

Searched refs:iova (Results 1 – 25 of 105) sorted by relevance

12345

/drivers/iommu/
Diova.c128 struct iova *curr_iova = in __get_cached_rbnode()
129 rb_entry(iovad->cached32_node, struct iova, node); in __get_cached_rbnode()
137 unsigned long limit_pfn, struct iova *new) in __cached_rbnode_insert_update()
145 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
147 struct iova *cached_iova; in __cached_rbnode_delete_update()
153 cached_iova = rb_entry(curr, struct iova, node); in __cached_rbnode_delete_update()
157 struct iova *iova = rb_entry(node, struct iova, node); in __cached_rbnode_delete_update() local
160 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) in __cached_rbnode_delete_update()
169 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument
177 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree()
[all …]
Dio-pgtable-arm-v7s.c370 unsigned long iova, phys_addr_t paddr, int prot, in arm_v7s_init_pte() argument
386 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); in arm_v7s_init_pte()
387 if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, in arm_v7s_init_pte()
430 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, in __arm_v7s_map() argument
439 ptep += ARM_V7S_LVL_IDX(iova, lvl); in __arm_v7s_map()
443 return arm_v7s_init_pte(data, iova, paddr, prot, in __arm_v7s_map()
474 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_v7s_map()
477 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_v7s_map() argument
488 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) in arm_v7s_map()
491 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); in arm_v7s_map()
[all …]
Dtegra-gart.c90 #define for_each_gart_pte(gart, iova) \ argument
91 for (iova = gart->iovmm_base; \
92 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
93 iova += GART_PAGE_SIZE)
118 unsigned long iova; in do_gart_setup() local
120 for_each_gart_pte(gart, iova) in do_gart_setup()
121 gart_set_pte(gart, iova, data ? *(data++) : 0); in do_gart_setup()
130 unsigned long iova; in gart_dump_table() local
134 for_each_gart_pte(gart, iova) { in gart_dump_table()
137 pte = gart_read_pte(gart, iova); in gart_dump_table()
[all …]
Dexynos-iommu.c101 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument
103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument
105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument
110 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument
112 return iova >> SECT_ORDER; in lv1ent_offset()
115 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument
117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
185 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument
187 return pgtable + lv1ent_offset(iova); in section_entry()
190 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) in page_entry() argument
[all …]
Dio-pgtable-arm.c272 unsigned long iova, size_t size, int lvl,
296 unsigned long iova, phys_addr_t paddr, in arm_lpae_init_pte() argument
314 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte()
315 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) in arm_lpae_init_pte()
355 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument
365 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map()
369 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map()
399 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_lpae_map()
443 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_lpae_map() argument
455 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || in arm_lpae_map()
[all …]
Drockchip-iommu.c262 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument
264 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index()
267 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument
269 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index()
272 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument
274 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset()
299 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, in rk_iommu_zap_lines() argument
304 dma_addr_t iova_end = iova + size; in rk_iommu_zap_lines()
310 for (; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines()
311 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
[all …]
Ddma-iommu.c38 dma_addr_t iova; member
217 msi_page->iova = start; in cookie_init_hw_msi_region()
366 unsigned long shift, iova_len, iova = 0; in iommu_dma_alloc_iova() local
389 iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); in iommu_dma_alloc_iova()
391 if (!iova) in iommu_dma_alloc_iova()
392 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); in iommu_dma_alloc_iova()
394 return (dma_addr_t)iova << shift; in iommu_dma_alloc_iova()
398 dma_addr_t iova, size_t size) in iommu_dma_free_iova() argument
406 free_iova_fast(iovad, iova_pfn(iovad, iova), in iommu_dma_free_iova()
535 dma_addr_t iova; in iommu_dma_alloc() local
[all …]
Dtegra-smmu.c143 static unsigned int iova_pd_index(unsigned long iova) in iova_pd_index() argument
145 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index()
148 static unsigned int iova_pt_index(unsigned long iova) in iova_pt_index() argument
150 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index()
210 unsigned long iova) in smmu_flush_tlb_section() argument
219 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section()
225 unsigned long iova) in smmu_flush_tlb_group() argument
234 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group()
514 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument
517 unsigned int pd_index = iova_pd_index(iova); in tegra_smmu_set_pde()
[all …]
Dio-pgtable.h32 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
120 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
122 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
125 unsigned long iova);
180 unsigned long iova, size_t size, size_t granule, bool leaf) in io_pgtable_tlb_add_flush() argument
182 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); in io_pgtable_tlb_add_flush()
Ds390-iommu.c267 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, in s390_iommu_map() argument
279 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, in s390_iommu_map()
286 dma_addr_t iova) in s390_iommu_iova_to_phys() argument
293 if (iova < domain->geometry.aperture_start || in s390_iommu_iova_to_phys()
294 iova > domain->geometry.aperture_end) in s390_iommu_iova_to_phys()
297 rtx = calc_rtx(iova); in s390_iommu_iova_to_phys()
298 sx = calc_sx(iova); in s390_iommu_iova_to_phys()
299 px = calc_px(iova); in s390_iommu_iova_to_phys()
317 unsigned long iova, size_t size) in s390_iommu_unmap() argument
324 paddr = s390_iommu_iova_to_phys(domain, iova); in s390_iommu_unmap()
[all …]
Diommu.c1465 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
1470 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
1506 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
1509 unsigned long orig_iova = iova; in iommu_map()
1530 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { in iommu_map()
1532 iova, &paddr, size, min_pagesz); in iommu_map()
1536 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); in iommu_map()
1539 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); in iommu_map()
1542 iova, &paddr, pgsize); in iommu_map()
1544 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in iommu_map()
[all …]
Dipmmu-vmsa.c292 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, in ipmmu_tlb_add_flush() argument
450 u32 iova; in ipmmu_domain_irq() local
456 iova = ipmmu_ctx_read(domain, IMEAR); in ipmmu_domain_irq()
469 iova); in ipmmu_domain_irq()
472 iova); in ipmmu_domain_irq()
483 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
488 status, iova); in ipmmu_domain_irq()
605 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_map() argument
613 return domain->iop->map(domain->iop, iova, paddr, size, prot); in ipmmu_map()
616 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_unmap() argument
[all …]
Dqcom_iommu.c156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, in qcom_iommu_tlb_inv_range_nosync() argument
168 iova &= ~12UL; in qcom_iommu_tlb_inv_range_nosync()
169 iova |= ctx->asid; in qcom_iommu_tlb_inv_range_nosync()
171 iommu_writel(ctx, reg, iova); in qcom_iommu_tlb_inv_range_nosync()
172 iova += granule; in qcom_iommu_tlb_inv_range_nosync()
187 u64 iova; in qcom_iommu_fault() local
195 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); in qcom_iommu_fault()
200 fsr, iova, fsynr, ctx->asid); in qcom_iommu_fault()
400 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, in qcom_iommu_map() argument
412 ret = ops->map(ops, iova, paddr, size, prot); in qcom_iommu_map()
[all …]
/drivers/vfio/
Dvfio_iommu_type1.c87 dma_addr_t iova; /* Device address */ member
107 dma_addr_t iova; /* Device address */ member
130 if (start + size <= dma->iova) in vfio_find_dma()
132 else if (start >= dma->iova + dma->size) in vfio_find_dma()
150 if (new->iova + new->size <= dma->iova) in vfio_link_dma()
168 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) in vfio_find_vpfn() argument
176 if (iova < vpfn->iova) in vfio_find_vpfn()
178 else if (iova > vpfn->iova) in vfio_find_vpfn()
197 if (new->iova < vpfn->iova) in vfio_link_pfn()
212 static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, in vfio_add_to_pfn_list() argument
[all …]
/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c26 unsigned long iova, int flags, void *arg) in etnaviv_fault_handler() argument
28 DBG("*** fault: iova=%08lx, flags=%d", iova, flags); in etnaviv_fault_handler()
32 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, in etnaviv_iommu_map() argument
37 unsigned int da = iova; in etnaviv_iommu_map()
48 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); in etnaviv_iommu_map()
60 da = iova; in etnaviv_iommu_map()
71 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, in etnaviv_iommu_unmap() argument
76 unsigned int da = iova; in etnaviv_iommu_unmap()
87 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); in etnaviv_iommu_unmap()
218 u32 iova; in etnaviv_iommu_map_gem() local
[all …]
Detnaviv_iommu.c70 unsigned long iova) in pgtable_read() argument
73 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; in pgtable_read()
82 unsigned long iova, phys_addr_t paddr) in pgtable_write() argument
85 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; in pgtable_write()
136 static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, in etnaviv_iommuv1_map() argument
145 pgtable_write(&etnaviv_domain->pgtable, iova, paddr); in etnaviv_iommuv1_map()
152 unsigned long iova, size_t size) in etnaviv_iommuv1_unmap() argument
160 pgtable_write(&etnaviv_domain->pgtable, iova, in etnaviv_iommuv1_unmap()
168 dma_addr_t iova) in etnaviv_iommu_iova_to_phys() argument
172 return pgtable_read(&etnaviv_domain->pgtable, iova); in etnaviv_iommu_iova_to_phys()
Detnaviv_iommu_v2.c60 static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, in etnaviv_iommuv2_map() argument
74 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; in etnaviv_iommuv2_map()
75 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; in etnaviv_iommuv2_map()
83 unsigned long iova, size_t size) in etnaviv_iommuv2_unmap() argument
92 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; in etnaviv_iommuv2_unmap()
93 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; in etnaviv_iommuv2_unmap()
101 dma_addr_t iova) in etnaviv_iommuv2_iova_to_phys() argument
107 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; in etnaviv_iommuv2_iova_to_phys()
108 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; in etnaviv_iommuv2_iova_to_phys()
Detnaviv_mmu.h54 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
56 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
67 u32 *iova);
70 u32 iova);
/drivers/infiniband/sw/rxe/
Drxe_mr.c54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) in mem_check_range() argument
62 if (iova < mem->iova || in mem_check_range()
64 iova > mem->iova + mem->length - length) in mem_check_range()
162 u64 length, u64 iova, int access, struct ib_udata *udata, in rxe_mem_init_user() argument
227 mem->iova = iova; in rxe_mem_init_user()
266 u64 iova, in lookup_iova() argument
271 size_t offset = iova - mem->iova + mem->offset; in lookup_iova()
304 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) in iova_to_vaddr() argument
317 addr = (void *)(uintptr_t)iova; in iova_to_vaddr()
321 if (mem_check_range(mem, iova, length)) { in iova_to_vaddr()
[all …]
/drivers/gpu/drm/msm/
Dmsm_gem_submit.c121 submit->bos[i].iova = submit_bo.presumed; in submit_lookup_objects()
170 submit->bos[i].iova = 0; in submit_unlock_unpin_bo()
262 uint64_t iova; in submit_pin_objects() local
266 submit->gpu->aspace, &iova); in submit_pin_objects()
273 if (iova == submit->bos[i].iova) { in submit_pin_objects()
276 submit->bos[i].iova = iova; in submit_pin_objects()
287 struct msm_gem_object **obj, uint64_t *iova, bool *valid) in submit_bo() argument
297 if (iova) in submit_bo()
298 *iova = submit->bos[idx].iova; in submit_bo()
334 uint64_t iova; in submit_reloc() local
[all …]
Dmsm_rd.c279 uint64_t iova, uint32_t size) in snapshot_buf() argument
288 if (iova) { in snapshot_buf()
289 buf += iova - submit->bos[idx].iova; in snapshot_buf()
291 iova = submit->bos[idx].iova; in snapshot_buf()
296 (uint32_t[3]){ iova, size, iova >> 32 }, 12); in snapshot_buf()
338 uint64_t iova = submit->cmd[i].iova; in msm_rd_dump_submit() local
344 submit->cmd[i].iova, szd * 4); in msm_rd_dump_submit()
357 (uint32_t[3]){ iova, szd, iova >> 32 }, 12); in msm_rd_dump_submit()
Dmsm_iommu.c28 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument
32 return iommu->base.handler(iommu->base.arg, iova, flags); in msm_fault_handler()
33 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); in msm_fault_handler()
60 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, in msm_iommu_map() argument
67 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); in msm_iommu_map()
74 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, in msm_iommu_unmap() argument
80 iommu_unmap(iommu->domain, iova, len); in msm_iommu_unmap()
Dmsm_gem_vma.c45 if (!aspace || !vma->iova) in msm_gem_unmap_vma()
50 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); in msm_gem_unmap_vma()
57 vma->iova = 0; in msm_gem_unmap_vma()
80 vma->iova = vma->node.start << PAGE_SHIFT; in msm_gem_map_vma()
84 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma()
Dmsm_mmu.h26 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
28 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
36 int (*handler)(void *arg, unsigned long iova, int flags);
51 int (*handler)(void *arg, unsigned long iova, int flags)) in msm_mmu_set_fault_handler() argument
/drivers/s390/cio/
Dvfio_ccw_cp.c105 u64 iova, unsigned int len) in pfn_array_alloc_pin() argument
112 pa->pa_iova = iova; in pfn_array_alloc_pin()
114 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in pfn_array_alloc_pin()
169 unsigned long iova) in pfn_array_table_iova_pinned() argument
172 unsigned long iova_pfn = iova >> PAGE_SHIFT; in pfn_array_table_iova_pinned()
215 void *to, u64 iova, in copy_from_iova() argument
223 ret = pfn_array_alloc_pin(&pa, mdev, iova, n); in copy_from_iova()
232 from += iova & (PAGE_SIZE - 1); in copy_from_iova()
233 m -= iova & (PAGE_SIZE - 1); in copy_from_iova()
250 struct ccw1 *to, u64 iova, in copy_ccw_from_iova() argument
[all …]

12345