Lines Matching refs:iovad
18 static bool iova_rcache_insert(struct iova_domain *iovad,
21 static unsigned long iova_rcache_get(struct iova_domain *iovad,
24 static void init_iova_rcaches(struct iova_domain *iovad);
25 static void free_iova_rcaches(struct iova_domain *iovad);
26 static void fq_destroy_all_entries(struct iova_domain *iovad);
30 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
40 spin_lock_init(&iovad->iova_rbtree_lock); in init_iova_domain()
41 iovad->rbroot = RB_ROOT; in init_iova_domain()
42 iovad->cached_node = &iovad->anchor.node; in init_iova_domain()
43 iovad->cached32_node = &iovad->anchor.node; in init_iova_domain()
44 iovad->granule = granule; in init_iova_domain()
45 iovad->start_pfn = start_pfn; in init_iova_domain()
46 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); in init_iova_domain()
47 iovad->max32_alloc_size = iovad->dma_32bit_pfn; in init_iova_domain()
48 iovad->flush_cb = NULL; in init_iova_domain()
49 iovad->fq = NULL; in init_iova_domain()
50 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; in init_iova_domain()
51 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); in init_iova_domain()
52 rb_insert_color(&iovad->anchor.node, &iovad->rbroot); in init_iova_domain()
53 init_iova_rcaches(iovad); in init_iova_domain()
57 bool has_iova_flush_queue(struct iova_domain *iovad) in has_iova_flush_queue() argument
59 return !!iovad->fq; in has_iova_flush_queue()
62 static void free_iova_flush_queue(struct iova_domain *iovad) in free_iova_flush_queue() argument
64 if (!has_iova_flush_queue(iovad)) in free_iova_flush_queue()
67 del_timer_sync(&iovad->fq_timer); in free_iova_flush_queue()
69 fq_destroy_all_entries(iovad); in free_iova_flush_queue()
71 free_percpu(iovad->fq); in free_iova_flush_queue()
73 iovad->fq = NULL; in free_iova_flush_queue()
74 iovad->flush_cb = NULL; in free_iova_flush_queue()
75 iovad->entry_dtor = NULL; in free_iova_flush_queue()
78 int init_iova_flush_queue(struct iova_domain *iovad, in init_iova_flush_queue() argument
84 atomic64_set(&iovad->fq_flush_start_cnt, 0); in init_iova_flush_queue()
85 atomic64_set(&iovad->fq_flush_finish_cnt, 0); in init_iova_flush_queue()
91 iovad->flush_cb = flush_cb; in init_iova_flush_queue()
92 iovad->entry_dtor = entry_dtor; in init_iova_flush_queue()
106 iovad->fq = queue; in init_iova_flush_queue()
108 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); in init_iova_flush_queue()
109 atomic_set(&iovad->fq_timer_on, 0); in init_iova_flush_queue()
116 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) in __get_cached_rbnode() argument
118 if (limit_pfn <= iovad->dma_32bit_pfn) in __get_cached_rbnode()
119 return iovad->cached32_node; in __get_cached_rbnode()
121 return iovad->cached_node; in __get_cached_rbnode()
125 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update() argument
127 if (new->pfn_hi < iovad->dma_32bit_pfn) in __cached_rbnode_insert_update()
128 iovad->cached32_node = &new->node; in __cached_rbnode_insert_update()
130 iovad->cached_node = &new->node; in __cached_rbnode_insert_update()
134 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() argument
138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
140 (free->pfn_hi < iovad->dma_32bit_pfn && in __cached_rbnode_delete_update()
142 iovad->cached32_node = rb_next(&free->node); in __cached_rbnode_delete_update()
144 if (free->pfn_lo < iovad->dma_32bit_pfn) in __cached_rbnode_delete_update()
145 iovad->max32_alloc_size = iovad->dma_32bit_pfn; in __cached_rbnode_delete_update()
147 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
149 iovad->cached_node = rb_next(&free->node); in __cached_rbnode_delete_update()
180 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, in __alloc_and_insert_iova_range() argument
194 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
195 if (limit_pfn <= iovad->dma_32bit_pfn && in __alloc_and_insert_iova_range()
196 size >= iovad->max32_alloc_size) in __alloc_and_insert_iova_range()
199 curr = __get_cached_rbnode(iovad, limit_pfn); in __alloc_and_insert_iova_range()
209 if (limit_pfn < size || new_pfn < iovad->start_pfn) { in __alloc_and_insert_iova_range()
210 iovad->max32_alloc_size = size; in __alloc_and_insert_iova_range()
219 iova_insert_rbtree(&iovad->rbroot, new, prev); in __alloc_and_insert_iova_range()
220 __cached_rbnode_insert_update(iovad, new); in __alloc_and_insert_iova_range()
222 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
226 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
294 alloc_iova(struct iova_domain *iovad, unsigned long size, in alloc_iova() argument
305 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, in alloc_iova()
318 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument
320 struct rb_node *node = iovad->rbroot.rb_node; in private_find_iova()
322 assert_spin_locked(&iovad->iova_rbtree_lock); in private_find_iova()
338 static void private_free_iova(struct iova_domain *iovad, struct iova *iova) in private_free_iova() argument
340 assert_spin_locked(&iovad->iova_rbtree_lock); in private_free_iova()
341 __cached_rbnode_delete_update(iovad, iova); in private_free_iova()
342 rb_erase(&iova->node, &iovad->rbroot); in private_free_iova()
353 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument
359 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in find_iova()
360 iova = private_find_iova(iovad, pfn); in find_iova()
361 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in find_iova()
373 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
377 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __free_iova()
378 private_free_iova(iovad, iova); in __free_iova()
379 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __free_iova()
391 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument
393 struct iova *iova = find_iova(iovad, pfn); in free_iova()
396 __free_iova(iovad, iova); in free_iova()
412 alloc_iova_fast(struct iova_domain *iovad, unsigned long size, in alloc_iova_fast() argument
418 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); in alloc_iova_fast()
423 new_iova = alloc_iova(iovad, size, limit_pfn, true); in alloc_iova_fast()
433 free_cpu_cached_iovas(cpu, iovad); in alloc_iova_fast()
450 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument
452 if (iova_rcache_insert(iovad, pfn, size)) in free_iova_fast()
455 free_iova(iovad, pfn); in free_iova_fast()
479 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) in fq_ring_free() argument
481 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); in fq_ring_free()
491 if (iovad->entry_dtor) in fq_ring_free()
492 iovad->entry_dtor(fq->entries[idx].data); in fq_ring_free()
494 free_iova_fast(iovad, in fq_ring_free()
502 static void iova_domain_flush(struct iova_domain *iovad) in iova_domain_flush() argument
504 atomic64_inc(&iovad->fq_flush_start_cnt); in iova_domain_flush()
505 iovad->flush_cb(iovad); in iova_domain_flush()
506 atomic64_inc(&iovad->fq_flush_finish_cnt); in iova_domain_flush()
509 static void fq_destroy_all_entries(struct iova_domain *iovad) in fq_destroy_all_entries() argument
518 if (!iovad->entry_dtor) in fq_destroy_all_entries()
522 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); in fq_destroy_all_entries()
526 iovad->entry_dtor(fq->entries[idx].data); in fq_destroy_all_entries()
532 struct iova_domain *iovad = from_timer(iovad, t, fq_timer); in fq_flush_timeout() local
535 atomic_set(&iovad->fq_timer_on, 0); in fq_flush_timeout()
536 iova_domain_flush(iovad); in fq_flush_timeout()
542 fq = per_cpu_ptr(iovad->fq, cpu); in fq_flush_timeout()
544 fq_ring_free(iovad, fq); in fq_flush_timeout()
549 void queue_iova(struct iova_domain *iovad, in queue_iova() argument
553 struct iova_fq *fq = raw_cpu_ptr(iovad->fq); in queue_iova()
564 fq_ring_free(iovad, fq); in queue_iova()
567 iova_domain_flush(iovad); in queue_iova()
568 fq_ring_free(iovad, fq); in queue_iova()
576 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); in queue_iova()
581 if (!atomic_read(&iovad->fq_timer_on) && in queue_iova()
582 !atomic_xchg(&iovad->fq_timer_on, 1)) in queue_iova()
583 mod_timer(&iovad->fq_timer, in queue_iova()
593 void put_iova_domain(struct iova_domain *iovad) in put_iova_domain() argument
597 free_iova_flush_queue(iovad); in put_iova_domain()
598 free_iova_rcaches(iovad); in put_iova_domain()
599 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) in put_iova_domain()
630 __insert_new_range(struct iova_domain *iovad, in __insert_new_range() argument
637 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
661 reserve_iova(struct iova_domain *iovad, in reserve_iova() argument
670 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad)))) in reserve_iova()
673 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in reserve_iova()
674 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { in reserve_iova()
690 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
693 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in reserve_iova()
729 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, in split_and_remove_iova() argument
735 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
747 __cached_rbnode_delete_update(iovad, iova); in split_and_remove_iova()
748 rb_erase(&iova->node, &iovad->rbroot); in split_and_remove_iova()
751 iova_insert_rbtree(&iovad->rbroot, prev, NULL); in split_and_remove_iova()
755 iova_insert_rbtree(&iovad->rbroot, next, NULL); in split_and_remove_iova()
758 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
763 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
801 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) in iova_magazine_free_pfns() argument
809 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
812 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns()
817 private_free_iova(iovad, iova); in iova_magazine_free_pfns()
820 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
862 static void init_iova_rcaches(struct iova_domain *iovad) in init_iova_rcaches() argument
870 rcache = &iovad->rcaches[i]; in init_iova_rcaches()
891 static bool __iova_rcache_insert(struct iova_domain *iovad, in __iova_rcache_insert() argument
932 iova_magazine_free_pfns(mag_to_free, iovad); in __iova_rcache_insert()
939 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, in iova_rcache_insert() argument
947 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); in iova_rcache_insert()
994 static unsigned long iova_rcache_get(struct iova_domain *iovad, in iova_rcache_get() argument
1003 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); in iova_rcache_get()
1009 static void free_iova_rcaches(struct iova_domain *iovad) in free_iova_rcaches() argument
1017 rcache = &iovad->rcaches[i]; in free_iova_rcaches()
1032 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) in free_cpu_cached_iovas() argument
1040 rcache = &iovad->rcaches[i]; in free_cpu_cached_iovas()
1043 iova_magazine_free_pfns(cpu_rcache->loaded, iovad); in free_cpu_cached_iovas()
1044 iova_magazine_free_pfns(cpu_rcache->prev, iovad); in free_cpu_cached_iovas()