• Home
  • Raw
  • Download

Lines Matching refs:iovad

27 static bool iova_rcache_insert(struct iova_domain *iovad,
30 static unsigned long iova_rcache_get(struct iova_domain *iovad,
33 static void init_iova_rcaches(struct iova_domain *iovad);
34 static void free_iova_rcaches(struct iova_domain *iovad);
35 static void fq_destroy_all_entries(struct iova_domain *iovad);
39 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
49 spin_lock_init(&iovad->iova_rbtree_lock); in init_iova_domain()
50 iovad->rbroot = RB_ROOT; in init_iova_domain()
51 iovad->cached32_node = NULL; in init_iova_domain()
52 iovad->granule = granule; in init_iova_domain()
53 iovad->start_pfn = start_pfn; in init_iova_domain()
54 iovad->dma_32bit_pfn = pfn_32bit + 1; in init_iova_domain()
55 iovad->flush_cb = NULL; in init_iova_domain()
56 iovad->fq = NULL; in init_iova_domain()
57 init_iova_rcaches(iovad); in init_iova_domain()
61 bool has_iova_flush_queue(struct iova_domain *iovad) in has_iova_flush_queue() argument
63 return !!iovad->fq; in has_iova_flush_queue()
66 static void free_iova_flush_queue(struct iova_domain *iovad) in free_iova_flush_queue() argument
68 if (!has_iova_flush_queue(iovad)) in free_iova_flush_queue()
71 if (timer_pending(&iovad->fq_timer)) in free_iova_flush_queue()
72 del_timer(&iovad->fq_timer); in free_iova_flush_queue()
74 fq_destroy_all_entries(iovad); in free_iova_flush_queue()
76 free_percpu(iovad->fq); in free_iova_flush_queue()
78 iovad->fq = NULL; in free_iova_flush_queue()
79 iovad->flush_cb = NULL; in free_iova_flush_queue()
80 iovad->entry_dtor = NULL; in free_iova_flush_queue()
83 int init_iova_flush_queue(struct iova_domain *iovad, in init_iova_flush_queue() argument
89 atomic64_set(&iovad->fq_flush_start_cnt, 0); in init_iova_flush_queue()
90 atomic64_set(&iovad->fq_flush_finish_cnt, 0); in init_iova_flush_queue()
96 iovad->flush_cb = flush_cb; in init_iova_flush_queue()
97 iovad->entry_dtor = entry_dtor; in init_iova_flush_queue()
111 iovad->fq = queue; in init_iova_flush_queue()
113 setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad); in init_iova_flush_queue()
114 atomic_set(&iovad->fq_timer_on, 0); in init_iova_flush_queue()
121 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) in __get_cached_rbnode() argument
123 if ((*limit_pfn > iovad->dma_32bit_pfn) || in __get_cached_rbnode()
124 (iovad->cached32_node == NULL)) in __get_cached_rbnode()
125 return rb_last(&iovad->rbroot); in __get_cached_rbnode()
127 struct rb_node *prev_node = rb_prev(iovad->cached32_node); in __get_cached_rbnode()
129 rb_entry(iovad->cached32_node, struct iova, node); in __get_cached_rbnode()
136 __cached_rbnode_insert_update(struct iova_domain *iovad, in __cached_rbnode_insert_update() argument
139 if (limit_pfn != iovad->dma_32bit_pfn) in __cached_rbnode_insert_update()
141 iovad->cached32_node = &new->node; in __cached_rbnode_insert_update()
145 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() argument
150 if (!iovad->cached32_node) in __cached_rbnode_delete_update()
152 curr = iovad->cached32_node; in __cached_rbnode_delete_update()
160 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) in __cached_rbnode_delete_update()
161 iovad->cached32_node = node; in __cached_rbnode_delete_update()
163 iovad->cached32_node = NULL; in __cached_rbnode_delete_update()
205 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, in __alloc_and_insert_iova_range() argument
215 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
217 curr = __get_cached_rbnode(iovad, &limit_pfn); in __alloc_and_insert_iova_range()
239 if ((iovad->start_pfn + size + pad_size) > limit_pfn) { in __alloc_and_insert_iova_range()
240 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
250 iova_insert_rbtree(&iovad->rbroot, new, prev); in __alloc_and_insert_iova_range()
251 __cached_rbnode_insert_update(iovad, saved_pfn, new); in __alloc_and_insert_iova_range()
253 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
322 alloc_iova(struct iova_domain *iovad, unsigned long size, in alloc_iova() argument
333 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, in alloc_iova()
346 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument
348 struct rb_node *node = iovad->rbroot.rb_node; in private_find_iova()
350 assert_spin_locked(&iovad->iova_rbtree_lock); in private_find_iova()
369 static void private_free_iova(struct iova_domain *iovad, struct iova *iova) in private_free_iova() argument
371 assert_spin_locked(&iovad->iova_rbtree_lock); in private_free_iova()
372 __cached_rbnode_delete_update(iovad, iova); in private_free_iova()
373 rb_erase(&iova->node, &iovad->rbroot); in private_free_iova()
384 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument
390 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in find_iova()
391 iova = private_find_iova(iovad, pfn); in find_iova()
392 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in find_iova()
404 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
408 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __free_iova()
409 private_free_iova(iovad, iova); in __free_iova()
410 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __free_iova()
422 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument
424 struct iova *iova = find_iova(iovad, pfn); in free_iova()
427 __free_iova(iovad, iova); in free_iova()
441 alloc_iova_fast(struct iova_domain *iovad, unsigned long size, in alloc_iova_fast() argument
448 iova_pfn = iova_rcache_get(iovad, size, limit_pfn); in alloc_iova_fast()
453 new_iova = alloc_iova(iovad, size, limit_pfn, true); in alloc_iova_fast()
463 free_cpu_cached_iovas(cpu, iovad); in alloc_iova_fast()
480 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument
482 if (iova_rcache_insert(iovad, pfn, size)) in free_iova_fast()
485 free_iova(iovad, pfn); in free_iova_fast()
509 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) in fq_ring_free() argument
511 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); in fq_ring_free()
521 if (iovad->entry_dtor) in fq_ring_free()
522 iovad->entry_dtor(fq->entries[idx].data); in fq_ring_free()
524 free_iova_fast(iovad, in fq_ring_free()
532 static void iova_domain_flush(struct iova_domain *iovad) in iova_domain_flush() argument
534 atomic64_inc(&iovad->fq_flush_start_cnt); in iova_domain_flush()
535 iovad->flush_cb(iovad); in iova_domain_flush()
536 atomic64_inc(&iovad->fq_flush_finish_cnt); in iova_domain_flush()
539 static void fq_destroy_all_entries(struct iova_domain *iovad) in fq_destroy_all_entries() argument
548 if (!iovad->entry_dtor) in fq_destroy_all_entries()
552 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); in fq_destroy_all_entries()
556 iovad->entry_dtor(fq->entries[idx].data); in fq_destroy_all_entries()
562 struct iova_domain *iovad = (struct iova_domain *)data; in fq_flush_timeout() local
565 atomic_set(&iovad->fq_timer_on, 0); in fq_flush_timeout()
566 iova_domain_flush(iovad); in fq_flush_timeout()
572 fq = per_cpu_ptr(iovad->fq, cpu); in fq_flush_timeout()
574 fq_ring_free(iovad, fq); in fq_flush_timeout()
579 void queue_iova(struct iova_domain *iovad, in queue_iova() argument
583 struct iova_fq *fq = get_cpu_ptr(iovad->fq); in queue_iova()
594 fq_ring_free(iovad, fq); in queue_iova()
597 iova_domain_flush(iovad); in queue_iova()
598 fq_ring_free(iovad, fq); in queue_iova()
606 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); in queue_iova()
611 if (!atomic_read(&iovad->fq_timer_on) && in queue_iova()
612 !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) in queue_iova()
613 mod_timer(&iovad->fq_timer, in queue_iova()
616 put_cpu_ptr(iovad->fq); in queue_iova()
625 void put_iova_domain(struct iova_domain *iovad) in put_iova_domain() argument
630 free_iova_flush_queue(iovad); in put_iova_domain()
631 free_iova_rcaches(iovad); in put_iova_domain()
632 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in put_iova_domain()
633 node = rb_first(&iovad->rbroot); in put_iova_domain()
637 rb_erase(node, &iovad->rbroot); in put_iova_domain()
639 node = rb_first(&iovad->rbroot); in put_iova_domain()
641 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in put_iova_domain()
671 __insert_new_range(struct iova_domain *iovad, in __insert_new_range() argument
678 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
702 reserve_iova(struct iova_domain *iovad, in reserve_iova() argument
710 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in reserve_iova()
711 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { in reserve_iova()
727 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
730 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in reserve_iova()
763 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, in split_and_remove_iova() argument
769 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
781 __cached_rbnode_delete_update(iovad, iova); in split_and_remove_iova()
782 rb_erase(&iova->node, &iovad->rbroot); in split_and_remove_iova()
785 iova_insert_rbtree(&iovad->rbroot, prev, NULL); in split_and_remove_iova()
789 iova_insert_rbtree(&iovad->rbroot, next, NULL); in split_and_remove_iova()
792 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
797 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
835 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) in iova_magazine_free_pfns() argument
843 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
846 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns()
849 private_free_iova(iovad, iova); in iova_magazine_free_pfns()
852 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
885 static void init_iova_rcaches(struct iova_domain *iovad) in init_iova_rcaches() argument
893 rcache = &iovad->rcaches[i]; in init_iova_rcaches()
914 static bool __iova_rcache_insert(struct iova_domain *iovad, in __iova_rcache_insert() argument
955 iova_magazine_free_pfns(mag_to_free, iovad); in __iova_rcache_insert()
962 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, in iova_rcache_insert() argument
970 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); in iova_rcache_insert()
1017 static unsigned long iova_rcache_get(struct iova_domain *iovad, in iova_rcache_get() argument
1026 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn); in iova_rcache_get()
1032 static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad, in free_cpu_iova_rcache() argument
1040 iova_magazine_free_pfns(cpu_rcache->loaded, iovad); in free_cpu_iova_rcache()
1043 iova_magazine_free_pfns(cpu_rcache->prev, iovad); in free_cpu_iova_rcache()
1052 static void free_iova_rcaches(struct iova_domain *iovad) in free_iova_rcaches() argument
1060 rcache = &iovad->rcaches[i]; in free_iova_rcaches()
1062 free_cpu_iova_rcache(cpu, iovad, rcache); in free_iova_rcaches()
1066 iova_magazine_free_pfns(rcache->depot[j], iovad); in free_iova_rcaches()
1076 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) in free_cpu_cached_iovas() argument
1084 rcache = &iovad->rcaches[i]; in free_cpu_cached_iovas()
1087 iova_magazine_free_pfns(cpu_rcache->loaded, iovad); in free_cpu_cached_iovas()
1088 iova_magazine_free_pfns(cpu_rcache->prev, iovad); in free_cpu_cached_iovas()