• Home
  • Raw
  • Download

Lines Matching full:iova

8 #include <linux/iova.h>
18 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
46 static struct iova *to_iova(struct rb_node *node) in to_iova()
48 return rb_entry(node, struct iova, node); in to_iova()
56 * IOVA granularity will normally be equal to the smallest in init_iova_domain()
86 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update()
95 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
97 struct iova *cached_iova; in __cached_rbnode_delete_update()
118 * enough to the highest-allocated IOVA that starting the allocation in iova_find_limit()
150 /* Insert the iova into domain rbtree by holding writer lock */
152 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument
160 struct iova *this = to_iova(*new); in iova_insert_rbtree()
164 if (iova->pfn_lo < this->pfn_lo) in iova_insert_rbtree()
166 else if (iova->pfn_lo > this->pfn_lo) in iova_insert_rbtree()
174 rb_link_node(&iova->node, parent, new); in iova_insert_rbtree()
175 rb_insert_color(&iova->node, root); in iova_insert_rbtree()
180 struct iova *new, bool size_aligned) in __alloc_and_insert_iova_range()
183 struct iova *curr_iova; in __alloc_and_insert_iova_range()
243 static struct iova *alloc_iova_mem(void) in alloc_iova_mem()
248 static void free_iova_mem(struct iova *iova) in free_iova_mem() argument
250 if (iova->pfn_lo != IOVA_ANCHOR) in free_iova_mem()
251 kmem_cache_free(iova_cache, iova); in free_iova_mem()
260 ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL, in iova_cache_get()
269 "iommu_iova", sizeof(struct iova), 0, in iova_cache_get()
274 pr_err("Couldn't create iova cache\n"); in iova_cache_get()
303 * alloc_iova - allocates an iova
304 * @iovad: - iova domain in question
308 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
310 * flag is set then the allocated address iova->pfn_lo will be naturally
313 struct iova *
318 struct iova *new_iova; in alloc_iova()
337 static struct iova *
345 struct iova *iova = to_iova(node); in private_find_iova() local
347 if (pfn < iova->pfn_lo) in private_find_iova()
349 else if (pfn > iova->pfn_hi) in private_find_iova()
352 return iova; /* pfn falls within iova's range */ in private_find_iova()
358 static void remove_iova(struct iova_domain *iovad, struct iova *iova) in remove_iova() argument
361 __cached_rbnode_delete_update(iovad, iova); in remove_iova()
362 rb_erase(&iova->node, &iovad->rbroot); in remove_iova()
366 * find_iova - finds an iova for a given pfn
367 * @iovad: - iova domain in question.
369 * This function finds and returns an iova belonging to the
372 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova()
375 struct iova *iova; in find_iova() local
379 iova = private_find_iova(iovad, pfn); in find_iova()
381 return iova; in find_iova()
386 * __free_iova - frees the given iova
387 * @iovad: iova domain in question.
388 * @iova: iova in question.
389 * Frees the given iova belonging to the giving domain
392 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
397 remove_iova(iovad, iova); in __free_iova()
399 free_iova_mem(iova); in __free_iova()
404 * free_iova - finds and frees the iova for a given pfn
405 * @iovad: - iova domain in question.
407 * This functions finds an iova for a given pfn and then
408 * frees the iova from that domain.
414 struct iova *iova; in free_iova() local
417 iova = private_find_iova(iovad, pfn); in free_iova()
418 if (!iova) { in free_iova()
422 remove_iova(iovad, iova); in free_iova()
424 free_iova_mem(iova); in free_iova()
429 * alloc_iova_fast - allocates an iova from rcache
430 * @iovad: - iova domain in question
434 * This function tries to satisfy an iova allocation from the rcache,
443 struct iova *new_iova; in alloc_iova_fast()
446 * Freeing non-power-of-two-sized allocations back into the IOVA caches in alloc_iova_fast()
479 * free_iova_fast - free iova pfn range into rcache
480 * @iovad: - iova domain in question.
483 * This functions frees an iova range by trying to put it into the rcache,
484 * falling back to regular iova deallocation via free_iova() if this fails.
504 * put_iova_domain - destroys the iova domain
505 * @iovad: - iova domain in question.
506 * All the iova's in that domain are destroyed.
510 struct iova *iova, *tmp; in put_iova_domain() local
515 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) in put_iova_domain()
516 free_iova_mem(iova); in put_iova_domain()
524 struct iova *iova = to_iova(node); in __is_range_overlap() local
526 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) in __is_range_overlap()
531 static inline struct iova *
534 struct iova *iova; in alloc_and_init_iova() local
536 iova = alloc_iova_mem(); in alloc_and_init_iova()
537 if (iova) { in alloc_and_init_iova()
538 iova->pfn_lo = pfn_lo; in alloc_and_init_iova()
539 iova->pfn_hi = pfn_hi; in alloc_and_init_iova()
542 return iova; in alloc_and_init_iova()
545 static struct iova *
549 struct iova *iova; in __insert_new_range() local
551 iova = alloc_and_init_iova(pfn_lo, pfn_hi); in __insert_new_range()
552 if (iova) in __insert_new_range()
553 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
555 return iova; in __insert_new_range()
559 __adjust_overlap_range(struct iova *iova, in __adjust_overlap_range() argument
562 if (*pfn_lo < iova->pfn_lo) in __adjust_overlap_range()
563 iova->pfn_lo = *pfn_lo; in __adjust_overlap_range()
564 if (*pfn_hi > iova->pfn_hi) in __adjust_overlap_range()
565 *pfn_lo = iova->pfn_hi + 1; in __adjust_overlap_range()
569 * reserve_iova - reserves an iova in the given range
570 * @iovad: - iova domain pointer
576 struct iova *
582 struct iova *iova; in reserve_iova() local
592 iova = to_iova(node); in reserve_iova()
593 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); in reserve_iova()
594 if ((pfn_lo >= iova->pfn_lo) && in reserve_iova()
595 (pfn_hi <= iova->pfn_hi)) in reserve_iova()
606 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
610 return iova; in reserve_iova()
615 * Magazine caches for IOVA ranges. For an introduction to magazines,
673 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() local
675 if (WARN_ON(!iova)) in iova_magazine_free_pfns()
678 remove_iova(iovad, iova); in iova_magazine_free_pfns()
679 free_iova_mem(iova); in iova_magazine_free_pfns()
770 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
772 * space, and free_iova() (our only caller) will then return the IOVA
835 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
874 * Try to satisfy IOVA allocation range from rcache. Fail if requested
919 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
939 * free all the IOVA ranges of global cache