Lines Matching refs:fq
49 iovad->fq = NULL; in init_iova_domain()
60 return !!iovad->fq; in has_iova_flush_queue()
72 free_percpu(iovad->fq); in free_iova_flush_queue()
74 iovad->fq = NULL; in free_iova_flush_queue()
96 struct iova_fq *fq; in init_iova_flush_queue() local
98 fq = per_cpu_ptr(queue, cpu); in init_iova_flush_queue()
99 fq->head = 0; in init_iova_flush_queue()
100 fq->tail = 0; in init_iova_flush_queue()
102 spin_lock_init(&fq->lock); in init_iova_flush_queue()
107 iovad->fq = queue; in init_iova_flush_queue()
559 #define fq_ring_for_each(i, fq) \ argument
560 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
562 static inline bool fq_full(struct iova_fq *fq) in fq_full() argument
564 assert_spin_locked(&fq->lock); in fq_full()
565 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); in fq_full()
568 static inline unsigned fq_ring_add(struct iova_fq *fq) in fq_ring_add() argument
570 unsigned idx = fq->tail; in fq_ring_add()
572 assert_spin_locked(&fq->lock); in fq_ring_add()
574 fq->tail = (idx + 1) % IOVA_FQ_SIZE; in fq_ring_add()
579 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) in fq_ring_free() argument
584 assert_spin_locked(&fq->lock); in fq_ring_free()
586 fq_ring_for_each(idx, fq) { in fq_ring_free()
588 if (fq->entries[idx].counter >= counter) in fq_ring_free()
592 iovad->entry_dtor(fq->entries[idx].data); in fq_ring_free()
595 fq->entries[idx].iova_pfn, in fq_ring_free()
596 fq->entries[idx].pages); in fq_ring_free()
598 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; in fq_ring_free()
622 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); in fq_destroy_all_entries() local
625 fq_ring_for_each(idx, fq) in fq_destroy_all_entries()
626 iovad->entry_dtor(fq->entries[idx].data); in fq_destroy_all_entries()
640 struct iova_fq *fq; in fq_flush_timeout() local
642 fq = per_cpu_ptr(iovad->fq, cpu); in fq_flush_timeout()
643 spin_lock_irqsave(&fq->lock, flags); in fq_flush_timeout()
644 fq_ring_free(iovad, fq); in fq_flush_timeout()
645 spin_unlock_irqrestore(&fq->lock, flags); in fq_flush_timeout()
653 struct iova_fq *fq = raw_cpu_ptr(iovad->fq); in queue_iova() local
657 spin_lock_irqsave(&fq->lock, flags); in queue_iova()
664 fq_ring_free(iovad, fq); in queue_iova()
666 if (fq_full(fq)) { in queue_iova()
668 fq_ring_free(iovad, fq); in queue_iova()
671 idx = fq_ring_add(fq); in queue_iova()
673 fq->entries[idx].iova_pfn = pfn; in queue_iova()
674 fq->entries[idx].pages = pages; in queue_iova()
675 fq->entries[idx].data = data; in queue_iova()
676 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); in queue_iova()
678 spin_unlock_irqrestore(&fq->lock, flags); in queue_iova()