• Home
  • Raw
  • Download

Lines Matching refs:fq

57 			struct iova_fq __percpu *fq;	/* Flush queue */  member
111 #define fq_ring_for_each(i, fq) \ argument
112 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
114 static inline bool fq_full(struct iova_fq *fq) in fq_full() argument
116 assert_spin_locked(&fq->lock); in fq_full()
117 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); in fq_full()
120 static inline unsigned int fq_ring_add(struct iova_fq *fq) in fq_ring_add() argument
122 unsigned int idx = fq->tail; in fq_ring_add()
124 assert_spin_locked(&fq->lock); in fq_ring_add()
126 fq->tail = (idx + 1) % IOVA_FQ_SIZE; in fq_ring_add()
131 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) in fq_ring_free() argument
136 assert_spin_locked(&fq->lock); in fq_ring_free()
138 fq_ring_for_each(idx, fq) { in fq_ring_free()
140 if (fq->entries[idx].counter >= counter) in fq_ring_free()
143 put_pages_list(&fq->entries[idx].freelist); in fq_ring_free()
145 fq->entries[idx].iova_pfn, in fq_ring_free()
146 fq->entries[idx].pages); in fq_ring_free()
148 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; in fq_ring_free()
169 struct iova_fq *fq; in fq_flush_timeout() local
171 fq = per_cpu_ptr(cookie->fq, cpu); in fq_flush_timeout()
172 spin_lock_irqsave(&fq->lock, flags); in fq_flush_timeout()
173 fq_ring_free(cookie, fq); in fq_flush_timeout()
174 spin_unlock_irqrestore(&fq->lock, flags); in fq_flush_timeout()
182 struct iova_fq *fq; in queue_iova() local
195 fq = raw_cpu_ptr(cookie->fq); in queue_iova()
196 spin_lock_irqsave(&fq->lock, flags); in queue_iova()
203 fq_ring_free(cookie, fq); in queue_iova()
205 if (fq_full(fq)) { in queue_iova()
207 fq_ring_free(cookie, fq); in queue_iova()
210 idx = fq_ring_add(fq); in queue_iova()
212 fq->entries[idx].iova_pfn = pfn; in queue_iova()
213 fq->entries[idx].pages = pages; in queue_iova()
214 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); in queue_iova()
215 list_splice(freelist, &fq->entries[idx].freelist); in queue_iova()
217 spin_unlock_irqrestore(&fq->lock, flags); in queue_iova()
230 if (!cookie->fq) in iommu_dma_free_fq()
236 struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); in iommu_dma_free_fq() local
238 fq_ring_for_each(idx, fq) in iommu_dma_free_fq()
239 put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq()
242 free_percpu(cookie->fq); in iommu_dma_free_fq()
265 struct iova_fq *fq = per_cpu_ptr(queue, cpu); in iommu_dma_init_fq() local
267 fq->head = 0; in iommu_dma_init_fq()
268 fq->tail = 0; in iommu_dma_init_fq()
270 spin_lock_init(&fq->lock); in iommu_dma_init_fq()
273 INIT_LIST_HEAD(&fq->entries[i].freelist); in iommu_dma_init_fq()
276 cookie->fq = queue; in iommu_dma_init_fq()