Home
last modified time | relevance | path

Searched full:iova (Results 1 – 25 of 538) sorted by relevance

12345678910>>...22

/kernel/linux/linux-5.10/drivers/iommu/
Diova.c8 #include <linux/iova.h>
34 * IOVA granularity will normally be equal to the smallest in init_iova_domain()
125 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update()
134 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
136 struct iova *cached_iova; in __cached_rbnode_delete_update()
138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
147 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
152 /* Insert the iova into domain rbtree by holding writer lock */
154 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument
162 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree()
[all …]
Dtegra-gart.c60 #define for_each_gart_pte(gart, iova) \ argument
61 for (iova = gart->iovmm_base; \
62 iova < gart->iovmm_end; \
63 iova += GART_PAGE_SIZE)
66 unsigned long iova, unsigned long pte) in gart_set_pte() argument
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_set_pte()
73 unsigned long iova) in gart_read_pte() argument
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_read_pte()
85 unsigned long iova; in do_gart_setup() local
87 for_each_gart_pte(gart, iova) in do_gart_setup()
[all …]
/kernel/linux/linux-6.6/drivers/media/platform/nvidia/tegra-vde/
Diommu.c9 #include <linux/iova.h>
21 struct iova **iovap, in tegra_vde_iommu_map()
24 struct iova *iova; in tegra_vde_iommu_map() local
30 size = iova_align(&vde->iova, size); in tegra_vde_iommu_map()
31 shift = iova_shift(&vde->iova); in tegra_vde_iommu_map()
33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true); in tegra_vde_iommu_map()
34 if (!iova) in tegra_vde_iommu_map()
37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map()
42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map()
46 *iovap = iova; in tegra_vde_iommu_map()
[all …]
/kernel/linux/linux-5.10/drivers/staging/media/tegra-vde/
Diommu.c9 #include <linux/iova.h>
21 struct iova **iovap, in tegra_vde_iommu_map()
24 struct iova *iova; in tegra_vde_iommu_map() local
30 size = iova_align(&vde->iova, size); in tegra_vde_iommu_map()
31 shift = iova_shift(&vde->iova); in tegra_vde_iommu_map()
33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true); in tegra_vde_iommu_map()
34 if (!iova) in tegra_vde_iommu_map()
37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map()
42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map()
46 *iovap = iova; in tegra_vde_iommu_map()
[all …]
/kernel/linux/linux-6.6/drivers/iommu/
Diova.c8 #include <linux/iova.h>
18 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
46 static struct iova *to_iova(struct rb_node *node) in to_iova()
48 return rb_entry(node, struct iova, node); in to_iova()
56 * IOVA granularity will normally be equal to the smallest in init_iova_domain()
86 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update()
95 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
97 struct iova *cached_iova; in __cached_rbnode_delete_update()
118 * enough to the highest-allocated IOVA that starting the allocation in iova_find_limit()
150 /* Insert the iova into domain rbtree by holding writer lock */
[all …]
Dtegra-gart.c60 #define for_each_gart_pte(gart, iova) \ argument
61 for (iova = gart->iovmm_base; \
62 iova < gart->iovmm_end; \
63 iova += GART_PAGE_SIZE)
66 unsigned long iova, unsigned long pte) in gart_set_pte() argument
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_set_pte()
73 unsigned long iova) in gart_read_pte() argument
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_read_pte()
85 unsigned long iova; in do_gart_setup() local
87 for_each_gart_pte(gart, iova) in do_gart_setup()
[all …]
/kernel/linux/linux-6.6/drivers/vfio/
Diova_bitmap.c14 * struct iova_bitmap_map - A bitmap representing an IOVA range
20 * total IOVA range. The struct iova_bitmap_map, though, represents the
21 * subset of said IOVA space that is pinned by its parent structure (struct
26 * records the IOVA *range* in the bitmap by setting the corresponding
29 * The bitmap is an array of u64 whereas each bit represents an IOVA of
32 * data[(iova / page_size) / 64] & (1ULL << (iova % 64))
35 /* base IOVA representing bit 0 of the first page */
36 unsigned long iova; member
52 * struct iova_bitmap - The IOVA bitmap object
56 * Abstracts the pinning work and iterates in IOVA ranges.
[all …]
Dvfio_iommu_type1.c17 * the IOVA range that can be mapped. The Type1 IOMMU is currently
90 dma_addr_t iova; /* Device address */ member
129 dma_addr_t iova; /* Device address */ member
136 dma_addr_t iova; member
173 if (start + size <= dma->iova) in vfio_find_dma()
175 else if (start >= dma->iova + dma->size) in vfio_find_dma()
194 if (start < dma->iova + dma->size) { in vfio_find_dma_first_node()
197 if (start >= dma->iova) in vfio_find_dma_first_node()
204 if (res && size && dma_res->iova >= start + size) in vfio_find_dma_first_node()
218 if (new->iova + new->size <= dma->iova) in vfio_link_dma()
[all …]
/kernel/linux/linux-5.10/include/linux/
Diova.h18 /* iova structure */
19 struct iova { struct
28 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
40 /* Call-Back from IOVA code into IOMMU drivers */
67 /* holds all the iova translations for a domain */
70 struct rb_root rbroot; /* iova domain rbtree root */
85 struct iova anchor; /* rbtree lookup anchor */
86 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
92 iova entry */
100 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument
[all …]
/kernel/linux/linux-5.10/drivers/fpga/
Ddfl-afu-dma-region.c118 * @iova: address of the dma memory area
121 * Compare the dma memory area defined by @iova and @size with given dma region.
125 u64 iova, u64 size) in dma_region_check_iova() argument
127 if (!size && region->iova != iova) in dma_region_check_iova()
130 return (region->iova <= iova) && in dma_region_check_iova()
131 (region->length + region->iova >= iova + size); in dma_region_check_iova()
149 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add()
150 (unsigned long long)region->iova); in afu_dma_region_add()
161 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add()
164 if (region->iova < this->iova) in afu_dma_region_add()
[all …]
/kernel/linux/linux-6.6/drivers/fpga/
Ddfl-afu-dma-region.c118 * @iova: address of the dma memory area
121 * Compare the dma memory area defined by @iova and @size with given dma region.
125 u64 iova, u64 size) in dma_region_check_iova() argument
127 if (!size && region->iova != iova) in dma_region_check_iova()
130 return (region->iova <= iova) && in dma_region_check_iova()
131 (region->length + region->iova >= iova + size); in dma_region_check_iova()
149 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add()
150 (unsigned long long)region->iova); in afu_dma_region_add()
161 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add()
164 if (region->iova < this->iova) in afu_dma_region_add()
[all …]
/kernel/linux/linux-6.6/include/linux/
Diova.h17 /* iova structure */
18 struct iova { struct
27 /* holds all the iova translations for a domain */ argument
30 struct rb_root rbroot; /* iova domain rbtree root */
37 struct iova anchor; /* rbtree lookup anchor */
43 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument
45 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size()
58 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument
60 return iova & iova_mask(iovad); in iova_offset()
68 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument
[all …]
/kernel/linux/linux-6.6/include/trace/events/
Diommu.h81 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
83 TP_ARGS(iova, paddr, size),
86 __field(u64, iova)
92 __entry->iova = iova;
97 TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
98 __entry->iova, __entry->iova + __entry->size, __entry->paddr,
105 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
107 TP_ARGS(iova, size, unmapped_size),
110 __field(u64, iova)
116 __entry->iova = iova;
[all …]
/kernel/linux/linux-5.10/include/trace/events/
Diommu.h88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
90 TP_ARGS(iova, paddr, size),
93 __field(u64, iova)
99 __entry->iova = iova;
104 TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
105 __entry->iova, __entry->paddr, __entry->size
111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
113 TP_ARGS(iova, size, unmapped_size),
116 __field(u64, iova)
122 __entry->iova = iova;
[all …]
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/
Dipu3-dmamap.c102 struct iova *iova; in imgu_dmamap_alloc() local
107 iova = alloc_iova(&imgu->iova_domain, size >> shift, in imgu_dmamap_alloc()
109 if (!iova) in imgu_dmamap_alloc()
117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
135 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__, in imgu_dmamap_alloc()
142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), in imgu_dmamap_alloc()
146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
153 struct iova *iova; in imgu_dmamap_unmap() local
155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap()
[all …]
Dipu3-mmu.c149 * address_to_pte_idx - split IOVA into L1 and L2 page table indices
150 * @iova: IOVA to split.
154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, in address_to_pte_idx() argument
157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx()
160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx()
162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx()
165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx()
210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, in __imgu_mmu_map() argument
220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); in __imgu_mmu_map()
244 * @iova: the virtual address
[all …]
/kernel/linux/linux-6.6/drivers/staging/media/ipu3/
Dipu3-dmamap.c102 struct iova *iova; in imgu_dmamap_alloc() local
107 iova = alloc_iova(&imgu->iova_domain, size >> shift, in imgu_dmamap_alloc()
109 if (!iova) in imgu_dmamap_alloc()
117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
135 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__, in imgu_dmamap_alloc()
142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), in imgu_dmamap_alloc()
146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc()
153 struct iova *iova; in imgu_dmamap_unmap() local
155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap()
[all …]
Dipu3-mmu.c149 * address_to_pte_idx - split IOVA into L1 and L2 page table indices
150 * @iova: IOVA to split.
154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, in address_to_pte_idx() argument
157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx()
160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx()
162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx()
165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx()
210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, in __imgu_mmu_map() argument
220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); in __imgu_mmu_map()
244 * @iova: the virtual address
[all …]
/kernel/linux/linux-5.10/drivers/vfio/
Dvfio_iommu_type1.c17 * the IOVA range that can be mapped. The Type1 IOMMU is currently
90 dma_addr_t iova; /* Device address */ member
126 dma_addr_t iova; /* Device address */ member
133 dma_addr_t iova; member
173 if (start + size <= dma->iova) in vfio_find_dma()
175 else if (start >= dma->iova + dma->size) in vfio_find_dma()
193 if (new->iova + new->size <= dma->iova) in vfio_link_dma()
243 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); in vfio_dma_populate_bitmap()
296 * Helper Functions for host iova-pfn list
298 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) in vfio_find_vpfn() argument
[all …]
/kernel/linux/linux-6.6/include/uapi/linux/
Dvduse.h61 * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last]
63 * @start: start of the IOVA region
64 * @last: last of the IOVA region
65 * @perm: access permission of the IOVA region
67 * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region.
80 * Find the first IOVA region that overlaps with the range [start, last]
82 * IOVA region doesn't exist. Caller should set start and last fields.
214 * struct vduse_iova_umem - userspace memory configuration for one IOVA region
216 * @iova: start of the IOVA region
217 * @size: size of the IOVA region
[all …]
Diommufd.h31 * - ENOENT: An ID or IOVA provided does not exist.
71 * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA)
83 * @start: First IOVA
84 * @last: Inclusive last IOVA
86 * An interval in IOVA space.
100 * @out_iova_alignment: Minimum alignment required for mapping IOVA
102 * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
118 * out_iova_alignment returns the minimum IOVA alignment that can be given
119 * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy::
124 * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/
Drxe_mr.c27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) in mr_check_range() argument
35 if (iova < mr->ibmr.iova || in mr_check_range()
36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range()
37 rxe_dbg_mr(mr, "iova/length out of range"); in mr_check_range()
75 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_index() argument
77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index()
80 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_page_offset() argument
82 return iova & (mr_page_size(mr) - 1); in rxe_mr_iova_to_page_offset()
129 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, in rxe_mr_init_user() argument
240 mr->page_offset = mr->ibmr.iova & (page_size - 1); in rxe_map_mr_sg()
[all …]
/kernel/linux/linux-6.6/drivers/vdpa/vdpa_user/
Diova_domain.c104 u64 iova, u64 size, u64 paddr) in vduse_domain_map_bounce_page() argument
107 u64 last = iova + size - 1; in vduse_domain_map_bounce_page()
109 while (iova <= last) { in vduse_domain_map_bounce_page()
110 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_map_bounce_page()
118 iova += PAGE_SIZE; in vduse_domain_map_bounce_page()
124 u64 iova, u64 size) in vduse_domain_unmap_bounce_page() argument
127 u64 last = iova + size - 1; in vduse_domain_unmap_bounce_page()
129 while (iova <= last) { in vduse_domain_unmap_bounce_page()
130 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_unmap_bounce_page()
132 iova += PAGE_SIZE; in vduse_domain_unmap_bounce_page()
[all …]
/kernel/linux/linux-6.6/drivers/iommu/iommufd/
Dselftest.c42 * Syzkaller has trouble randomizing the correct iova to use since it is linked
44 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
48 u64 *iova) in iommufd_test_syz_conv_iova() argument
54 struct syz_layout *syz = (void *)iova; in iommufd_test_syz_conv_iova()
73 unsigned int ioas_id, u64 *iova, u32 *flags) in iommufd_test_syz_conv_iova_id() argument
84 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); in iommufd_test_syz_conv_iova_id()
179 unsigned long iova, phys_addr_t paddr, in mock_domain_map_pages() argument
186 unsigned long start_iova = iova; in mock_domain_map_pages()
195 WARN_ON(iova % MOCK_IO_PAGE_SIZE); in mock_domain_map_pages()
205 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, in mock_domain_map_pages()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c17 unsigned long iova, size_t size) in etnaviv_context_unmap() argument
22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap()
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", in etnaviv_context_unmap()
24 iova, size, pgsize); in etnaviv_context_unmap()
29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap()
34 iova += unmapped_page; in etnaviv_context_unmap()
40 unsigned long iova, phys_addr_t paddr, in etnaviv_context_map() argument
43 unsigned long orig_iova = iova; in etnaviv_context_map()
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map()
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", in etnaviv_context_map()
[all …]

12345678910>>...22