Lines Matching full:iova
8 #include <linux/iova.h>
34 * IOVA granularity will normally be equal to the smallest in init_iova_domain()
125 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update()
134 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
136 struct iova *cached_iova; in __cached_rbnode_delete_update()
138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
147 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
152 /* Insert the iova into domain rbtree by holding writer lock */
154 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument
162 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree()
166 if (iova->pfn_lo < this->pfn_lo) in iova_insert_rbtree()
168 else if (iova->pfn_lo > this->pfn_lo) in iova_insert_rbtree()
176 rb_link_node(&iova->node, parent, new); in iova_insert_rbtree()
177 rb_insert_color(&iova->node, root); in iova_insert_rbtree()
182 struct iova *new, bool size_aligned) in __alloc_and_insert_iova_range()
185 struct iova *curr_iova; in __alloc_and_insert_iova_range()
200 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
206 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
234 struct iova *alloc_iova_mem(void) in alloc_iova_mem()
240 void free_iova_mem(struct iova *iova) in free_iova_mem() argument
242 if (iova->pfn_lo != IOVA_ANCHOR) in free_iova_mem()
243 kmem_cache_free(iova_cache, iova); in free_iova_mem()
252 "iommu_iova", sizeof(struct iova), 0, in iova_cache_get()
256 pr_err("Couldn't create iova cache\n"); in iova_cache_get()
283 * alloc_iova - allocates an iova
284 * @iovad: - iova domain in question
288 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
290 * flag is set then the allocated address iova->pfn_lo will be naturally
293 struct iova *
298 struct iova *new_iova; in alloc_iova()
317 static struct iova *
325 struct iova *iova = rb_entry(node, struct iova, node); in private_find_iova() local
327 if (pfn < iova->pfn_lo) in private_find_iova()
329 else if (pfn > iova->pfn_hi) in private_find_iova()
332 return iova; /* pfn falls within iova's range */ in private_find_iova()
338 static void private_free_iova(struct iova_domain *iovad, struct iova *iova) in private_free_iova() argument
341 __cached_rbnode_delete_update(iovad, iova); in private_free_iova()
342 rb_erase(&iova->node, &iovad->rbroot); in private_free_iova()
343 free_iova_mem(iova); in private_free_iova()
347 * find_iova - finds an iova for a given pfn
348 * @iovad: - iova domain in question.
350 * This function finds and returns an iova belonging to the
353 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova()
356 struct iova *iova; in find_iova() local
360 iova = private_find_iova(iovad, pfn); in find_iova()
362 return iova; in find_iova()
367 * __free_iova - frees the given iova
368 * @iovad: iova domain in question.
369 * @iova: iova in question.
370 * Frees the given iova belonging to the giving domain
373 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
378 private_free_iova(iovad, iova); in __free_iova()
384 * free_iova - finds and frees the iova for a given pfn
385 * @iovad: - iova domain in question.
387 * This functions finds an iova for a given pfn and then
388 * frees the iova from that domain.
393 struct iova *iova = find_iova(iovad, pfn); in free_iova() local
395 if (iova) in free_iova()
396 __free_iova(iovad, iova); in free_iova()
402 * alloc_iova_fast - allocates an iova from rcache
403 * @iovad: - iova domain in question
407 * This function tries to satisfy an iova allocation from the rcache,
416 struct iova *new_iova; in alloc_iova_fast()
442 * free_iova_fast - free iova pfn range into rcache
443 * @iovad: - iova domain in question.
446 * This functions frees an iova range by trying to put it into the rcache,
447 * falling back to regular iova deallocation via free_iova() if this fails.
589 * put_iova_domain - destroys the iova doamin
590 * @iovad: - iova domain in question.
591 * All the iova's in that domain are destroyed.
595 struct iova *iova, *tmp; in put_iova_domain() local
599 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) in put_iova_domain()
600 free_iova_mem(iova); in put_iova_domain()
608 struct iova *iova = rb_entry(node, struct iova, node); in __is_range_overlap() local
610 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) in __is_range_overlap()
615 static inline struct iova *
618 struct iova *iova; in alloc_and_init_iova() local
620 iova = alloc_iova_mem(); in alloc_and_init_iova()
621 if (iova) { in alloc_and_init_iova()
622 iova->pfn_lo = pfn_lo; in alloc_and_init_iova()
623 iova->pfn_hi = pfn_hi; in alloc_and_init_iova()
626 return iova; in alloc_and_init_iova()
629 static struct iova *
633 struct iova *iova; in __insert_new_range() local
635 iova = alloc_and_init_iova(pfn_lo, pfn_hi); in __insert_new_range()
636 if (iova) in __insert_new_range()
637 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
639 return iova; in __insert_new_range()
643 __adjust_overlap_range(struct iova *iova, in __adjust_overlap_range() argument
646 if (*pfn_lo < iova->pfn_lo) in __adjust_overlap_range()
647 iova->pfn_lo = *pfn_lo; in __adjust_overlap_range()
648 if (*pfn_hi > iova->pfn_hi) in __adjust_overlap_range()
649 *pfn_lo = iova->pfn_hi + 1; in __adjust_overlap_range()
653 * reserve_iova - reserves an iova in the given range
654 * @iovad: - iova domain pointer
660 struct iova *
666 struct iova *iova; in reserve_iova() local
676 iova = rb_entry(node, struct iova, node); in reserve_iova()
677 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); in reserve_iova()
678 if ((pfn_lo >= iova->pfn_lo) && in reserve_iova()
679 (pfn_hi <= iova->pfn_hi)) in reserve_iova()
690 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
694 return iova; in reserve_iova()
702 * This function copies reserved iova's from one doamin to
713 struct iova *iova = rb_entry(node, struct iova, node); in copy_reserved_iova() local
714 struct iova *new_iova; in copy_reserved_iova()
716 if (iova->pfn_lo == IOVA_ANCHOR) in copy_reserved_iova()
719 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); in copy_reserved_iova()
721 pr_err("Reserve iova range %lx@%lx failed\n", in copy_reserved_iova()
722 iova->pfn_lo, iova->pfn_lo); in copy_reserved_iova()
728 struct iova *
729 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, in split_and_remove_iova() argument
733 struct iova *prev = NULL, *next = NULL; in split_and_remove_iova()
736 if (iova->pfn_lo < pfn_lo) { in split_and_remove_iova()
737 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); in split_and_remove_iova()
741 if (iova->pfn_hi > pfn_hi) { in split_and_remove_iova()
742 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); in split_and_remove_iova()
747 __cached_rbnode_delete_update(iovad, iova); in split_and_remove_iova()
748 rb_erase(&iova->node, &iovad->rbroot); in split_and_remove_iova()
752 iova->pfn_lo = pfn_lo; in split_and_remove_iova()
756 iova->pfn_hi = pfn_hi; in split_and_remove_iova()
760 return iova; in split_and_remove_iova()
770 * Magazine caches for IOVA ranges. For an introduction to magazines,
812 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() local
814 if (WARN_ON(!iova)) in iova_magazine_free_pfns()
817 private_free_iova(iovad, iova); in iova_magazine_free_pfns()
886 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
888 * space, and free_iova() (our only caller) will then return the IOVA
951 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
990 * Try to satisfy IOVA allocation range from rcache. Fail if requested
1030 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)