• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
28 #include <xen/xen.h>
29 
30 #ifdef DEBUG
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...)				\
33 	do {							\
34 		dev_err(&(_vq)->vq.vdev->dev,			\
35 			"%s:"fmt, (_vq)->vq.name, ##args);	\
36 		BUG();						\
37 	} while (0)
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq)						\
40 	do {							\
41 		if ((_vq)->in_use)				\
42 			panic("%s:in_use = %i\n",		\
43 			      (_vq)->vq.name, (_vq)->in_use);	\
44 		(_vq)->in_use = __LINE__;			\
45 	} while (0)
46 #define END_USE(_vq) \
47 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
48 #else
49 #define BAD_RING(_vq, fmt, args...)				\
50 	do {							\
51 		dev_err(&_vq->vq.vdev->dev,			\
52 			"%s:"fmt, (_vq)->vq.name, ##args);	\
53 		(_vq)->broken = true;				\
54 	} while (0)
55 #define START_USE(vq)
56 #define END_USE(vq)
57 #endif
58 
59 struct vring_desc_state {
60 	void *data;			/* Data for callback. */
61 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
62 };
63 
64 struct vring_virtqueue {
65 	struct virtqueue vq;
66 
67 	/* Actual memory layout for this queue */
68 	struct vring vring;
69 
70 	/* Can we use weak barriers? */
71 	bool weak_barriers;
72 
73 	/* Other side has made a mess, don't try any more. */
74 	bool broken;
75 
76 	/* Host supports indirect buffers */
77 	bool indirect;
78 
79 	/* Host publishes avail event idx */
80 	bool event;
81 
82 	/* Head of free buffer list. */
83 	unsigned int free_head;
84 	/* Number we've added since last sync. */
85 	unsigned int num_added;
86 
87 	/* Last used index we've seen. */
88 	u16 last_used_idx;
89 
90 	/* Last written value to avail->flags */
91 	u16 avail_flags_shadow;
92 
93 	/* Last written value to avail->idx in guest byte order */
94 	u16 avail_idx_shadow;
95 
96 	/* How to notify other side. FIXME: commonalize hcalls! */
97 	bool (*notify)(struct virtqueue *vq);
98 
99 	/* DMA, allocation, and size information */
100 	bool we_own_ring;
101 	size_t queue_size_in_bytes;
102 	dma_addr_t queue_dma_addr;
103 
104 #ifdef DEBUG
105 	/* They're supposed to lock for us. */
106 	unsigned int in_use;
107 
108 	/* Figure out if their kicks are too delayed. */
109 	bool last_add_time_valid;
110 	ktime_t last_add_time;
111 #endif
112 
113 	/* Per-descriptor state. */
114 	struct vring_desc_state desc_state[];
115 };
116 
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118 
119 /*
120  * Modern virtio devices have feature bits to specify whether they need a
121  * quirk and bypass the IOMMU. If not there, just use the DMA API.
122  *
123  * If there, the interaction between virtio and DMA API is messy.
124  *
125  * On most systems with virtio, physical addresses match bus addresses,
126  * and it doesn't particularly matter whether we use the DMA API.
127  *
128  * On some systems, including Xen and any system with a physical device
129  * that speaks virtio behind a physical IOMMU, we must use the DMA API
130  * for virtio DMA to work at all.
131  *
132  * On other systems, including SPARC and PPC64, virtio-pci devices are
133  * enumerated as though they are behind an IOMMU, but the virtio host
134  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135  * there or somehow map everything as the identity.
136  *
137  * For the time being, we preserve historic behavior and bypass the DMA
138  * API.
139  *
140  * TODO: install a per-device DMA ops structure that does the right thing
141  * taking into account all the above quirks, and use the DMA API
142  * unconditionally on data path.
143  */
144 
vring_use_dma_api(struct virtio_device * vdev)145 static bool vring_use_dma_api(struct virtio_device *vdev)
146 {
147 	if (!virtio_has_iommu_quirk(vdev))
148 		return true;
149 
150 	/* Otherwise, we are left to guess. */
151 	/*
152 	 * In theory, it's possible to have a buggy QEMU-supposed
153 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
154 	 * such a configuration, virtio has never worked and will
155 	 * not work without an even larger kludge.  Instead, enable
156 	 * the DMA API if we're a Xen guest, which at least allows
157 	 * all of the sensible Xen configurations to work correctly.
158 	 */
159 	if (xen_domain())
160 		return true;
161 
162 	return false;
163 }
164 
165 /*
166  * The DMA ops on various arches are rather gnarly right now, and
167  * making all of the arch DMA ops work on the vring device itself
168  * is a mess.  For now, we use the parent device for DMA ops.
169  */
vring_dma_dev(const struct vring_virtqueue * vq)170 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171 {
172 	return vq->vq.vdev->dev.parent;
173 }
174 
175 /* Map one sg entry. */
vring_map_one_sg(const struct vring_virtqueue * vq,struct scatterlist * sg,enum dma_data_direction direction)176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 				   struct scatterlist *sg,
178 				   enum dma_data_direction direction)
179 {
180 	if (!vring_use_dma_api(vq->vq.vdev))
181 		return (dma_addr_t)sg_phys(sg);
182 
183 	/*
184 	 * We can't use dma_map_sg, because we don't use scatterlists in
185 	 * the way it expects (we don't guarantee that the scatterlist
186 	 * will exist for the lifetime of the mapping).
187 	 */
188 	return dma_map_page(vring_dma_dev(vq),
189 			    sg_page(sg), sg->offset, sg->length,
190 			    direction);
191 }
192 
vring_map_single(const struct vring_virtqueue * vq,void * cpu_addr,size_t size,enum dma_data_direction direction)193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 				   void *cpu_addr, size_t size,
195 				   enum dma_data_direction direction)
196 {
197 	if (!vring_use_dma_api(vq->vq.vdev))
198 		return (dma_addr_t)virt_to_phys(cpu_addr);
199 
200 	return dma_map_single(vring_dma_dev(vq),
201 			      cpu_addr, size, direction);
202 }
203 
vring_unmap_one(const struct vring_virtqueue * vq,struct vring_desc * desc)204 static void vring_unmap_one(const struct vring_virtqueue *vq,
205 			    struct vring_desc *desc)
206 {
207 	u16 flags;
208 
209 	if (!vring_use_dma_api(vq->vq.vdev))
210 		return;
211 
212 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213 
214 	if (flags & VRING_DESC_F_INDIRECT) {
215 		dma_unmap_single(vring_dma_dev(vq),
216 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 				 (flags & VRING_DESC_F_WRITE) ?
219 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 	} else {
221 		dma_unmap_page(vring_dma_dev(vq),
222 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
224 			       (flags & VRING_DESC_F_WRITE) ?
225 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 	}
227 }
228 
vring_mapping_error(const struct vring_virtqueue * vq,dma_addr_t addr)229 static int vring_mapping_error(const struct vring_virtqueue *vq,
230 			       dma_addr_t addr)
231 {
232 	if (!vring_use_dma_api(vq->vq.vdev))
233 		return 0;
234 
235 	return dma_mapping_error(vring_dma_dev(vq), addr);
236 }
237 
alloc_indirect(struct virtqueue * _vq,unsigned int total_sg,gfp_t gfp)238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 					 unsigned int total_sg, gfp_t gfp)
240 {
241 	struct vring_desc *desc;
242 	unsigned int i;
243 
244 	/*
245 	 * We require lowmem mappings for the descriptors because
246 	 * otherwise virt_to_phys will give us bogus addresses in the
247 	 * virtqueue.
248 	 */
249 	gfp &= ~__GFP_HIGHMEM;
250 
251 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
252 	if (!desc)
253 		return NULL;
254 
255 	for (i = 0; i < total_sg; i++)
256 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257 	return desc;
258 }
259 
virtqueue_add(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)260 static inline int virtqueue_add(struct virtqueue *_vq,
261 				struct scatterlist *sgs[],
262 				unsigned int total_sg,
263 				unsigned int out_sgs,
264 				unsigned int in_sgs,
265 				void *data,
266 				gfp_t gfp)
267 {
268 	struct vring_virtqueue *vq = to_vvq(_vq);
269 	struct scatterlist *sg;
270 	struct vring_desc *desc;
271 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
272 	int head;
273 	bool indirect;
274 
275 	START_USE(vq);
276 
277 	BUG_ON(data == NULL);
278 
279 	if (unlikely(vq->broken)) {
280 		END_USE(vq);
281 		return -EIO;
282 	}
283 
284 #ifdef DEBUG
285 	{
286 		ktime_t now = ktime_get();
287 
288 		/* No kick or get, with .1 second between?  Warn. */
289 		if (vq->last_add_time_valid)
290 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
291 					    > 100);
292 		vq->last_add_time = now;
293 		vq->last_add_time_valid = true;
294 	}
295 #endif
296 
297 	BUG_ON(total_sg > vq->vring.num);
298 	BUG_ON(total_sg == 0);
299 
300 	head = vq->free_head;
301 
302 	/* If the host supports indirect descriptor tables, and we have multiple
303 	 * buffers, then go indirect. FIXME: tune this threshold */
304 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
305 		desc = alloc_indirect(_vq, total_sg, gfp);
306 	else
307 		desc = NULL;
308 
309 	if (desc) {
310 		/* Use a single buffer which doesn't continue */
311 		indirect = true;
312 		/* Set up rest to use this indirect table. */
313 		i = 0;
314 		descs_used = 1;
315 	} else {
316 		indirect = false;
317 		desc = vq->vring.desc;
318 		i = head;
319 		descs_used = total_sg;
320 	}
321 
322 	if (vq->vq.num_free < descs_used) {
323 		pr_debug("Can't add buf len %i - avail = %i\n",
324 			 descs_used, vq->vq.num_free);
325 		/* FIXME: for historical reasons, we force a notify here if
326 		 * there are outgoing parts to the buffer.  Presumably the
327 		 * host should service the ring ASAP. */
328 		if (out_sgs)
329 			vq->notify(&vq->vq);
330 		if (indirect)
331 			kfree(desc);
332 		END_USE(vq);
333 		return -ENOSPC;
334 	}
335 
336 	for (n = 0; n < out_sgs; n++) {
337 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
338 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
339 			if (vring_mapping_error(vq, addr))
340 				goto unmap_release;
341 
342 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
343 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
344 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
345 			prev = i;
346 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
347 		}
348 	}
349 	for (; n < (out_sgs + in_sgs); n++) {
350 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
351 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
352 			if (vring_mapping_error(vq, addr))
353 				goto unmap_release;
354 
355 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
356 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
357 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
358 			prev = i;
359 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
360 		}
361 	}
362 	/* Last one doesn't continue. */
363 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
364 
365 	if (indirect) {
366 		/* Now that the indirect table is filled in, map it. */
367 		dma_addr_t addr = vring_map_single(
368 			vq, desc, total_sg * sizeof(struct vring_desc),
369 			DMA_TO_DEVICE);
370 		if (vring_mapping_error(vq, addr))
371 			goto unmap_release;
372 
373 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
374 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
375 
376 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
377 	}
378 
379 	/* We're using some buffers from the free list. */
380 	vq->vq.num_free -= descs_used;
381 
382 	/* Update free pointer */
383 	if (indirect)
384 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
385 	else
386 		vq->free_head = i;
387 
388 	/* Store token and indirect buffer state. */
389 	vq->desc_state[head].data = data;
390 	if (indirect)
391 		vq->desc_state[head].indir_desc = desc;
392 
393 	/* Put entry in available array (but don't update avail->idx until they
394 	 * do sync). */
395 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
396 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
397 
398 	/* Descriptors and available array need to be set before we expose the
399 	 * new available array entries. */
400 	virtio_wmb(vq->weak_barriers);
401 	vq->avail_idx_shadow++;
402 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
403 	vq->num_added++;
404 
405 	pr_debug("Added buffer head %i to %p\n", head, vq);
406 	END_USE(vq);
407 
408 	/* This is very unlikely, but theoretically possible.  Kick
409 	 * just in case. */
410 	if (unlikely(vq->num_added == (1 << 16) - 1))
411 		virtqueue_kick(_vq);
412 
413 	return 0;
414 
415 unmap_release:
416 	err_idx = i;
417 	i = head;
418 
419 	for (n = 0; n < total_sg; n++) {
420 		if (i == err_idx)
421 			break;
422 		vring_unmap_one(vq, &desc[i]);
423 		i = vq->vring.desc[i].next;
424 	}
425 
426 	if (indirect)
427 		kfree(desc);
428 
429 	END_USE(vq);
430 	return -EIO;
431 }
432 
433 /**
434  * virtqueue_add_sgs - expose buffers to other end
435  * @vq: the struct virtqueue we're talking about.
436  * @sgs: array of terminated scatterlists.
437  * @out_num: the number of scatterlists readable by other side
438  * @in_num: the number of scatterlists which are writable (after readable ones)
439  * @data: the token identifying the buffer.
440  * @gfp: how to do memory allocations (if necessary).
441  *
442  * Caller must ensure we don't call this with other virtqueue operations
443  * at the same time (except where noted).
444  *
445  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
446  */
virtqueue_add_sgs(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)447 int virtqueue_add_sgs(struct virtqueue *_vq,
448 		      struct scatterlist *sgs[],
449 		      unsigned int out_sgs,
450 		      unsigned int in_sgs,
451 		      void *data,
452 		      gfp_t gfp)
453 {
454 	unsigned int i, total_sg = 0;
455 
456 	/* Count them first. */
457 	for (i = 0; i < out_sgs + in_sgs; i++) {
458 		struct scatterlist *sg;
459 		for (sg = sgs[i]; sg; sg = sg_next(sg))
460 			total_sg++;
461 	}
462 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
463 }
464 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
465 
466 /**
467  * virtqueue_add_outbuf - expose output buffers to other end
468  * @vq: the struct virtqueue we're talking about.
469  * @sg: scatterlist (must be well-formed and terminated!)
470  * @num: the number of entries in @sg readable by other side
471  * @data: the token identifying the buffer.
472  * @gfp: how to do memory allocations (if necessary).
473  *
474  * Caller must ensure we don't call this with other virtqueue operations
475  * at the same time (except where noted).
476  *
477  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
478  */
virtqueue_add_outbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)479 int virtqueue_add_outbuf(struct virtqueue *vq,
480 			 struct scatterlist *sg, unsigned int num,
481 			 void *data,
482 			 gfp_t gfp)
483 {
484 	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
485 }
486 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
487 
488 /**
489  * virtqueue_add_inbuf - expose input buffers to other end
490  * @vq: the struct virtqueue we're talking about.
491  * @sg: scatterlist (must be well-formed and terminated!)
492  * @num: the number of entries in @sg writable by other side
493  * @data: the token identifying the buffer.
494  * @gfp: how to do memory allocations (if necessary).
495  *
496  * Caller must ensure we don't call this with other virtqueue operations
497  * at the same time (except where noted).
498  *
499  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
500  */
virtqueue_add_inbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)501 int virtqueue_add_inbuf(struct virtqueue *vq,
502 			struct scatterlist *sg, unsigned int num,
503 			void *data,
504 			gfp_t gfp)
505 {
506 	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
507 }
508 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
509 
510 /**
511  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
512  * @vq: the struct virtqueue
513  *
514  * Instead of virtqueue_kick(), you can do:
515  *	if (virtqueue_kick_prepare(vq))
516  *		virtqueue_notify(vq);
517  *
518  * This is sometimes useful because the virtqueue_kick_prepare() needs
519  * to be serialized, but the actual virtqueue_notify() call does not.
520  */
virtqueue_kick_prepare(struct virtqueue * _vq)521 bool virtqueue_kick_prepare(struct virtqueue *_vq)
522 {
523 	struct vring_virtqueue *vq = to_vvq(_vq);
524 	u16 new, old;
525 	bool needs_kick;
526 
527 	START_USE(vq);
528 	/* We need to expose available array entries before checking avail
529 	 * event. */
530 	virtio_mb(vq->weak_barriers);
531 
532 	old = vq->avail_idx_shadow - vq->num_added;
533 	new = vq->avail_idx_shadow;
534 	vq->num_added = 0;
535 
536 #ifdef DEBUG
537 	if (vq->last_add_time_valid) {
538 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
539 					      vq->last_add_time)) > 100);
540 	}
541 	vq->last_add_time_valid = false;
542 #endif
543 
544 	if (vq->event) {
545 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
546 					      new, old);
547 	} else {
548 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
549 	}
550 	END_USE(vq);
551 	return needs_kick;
552 }
553 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
554 
555 /**
556  * virtqueue_notify - second half of split virtqueue_kick call.
557  * @vq: the struct virtqueue
558  *
559  * This does not need to be serialized.
560  *
561  * Returns false if host notify failed or queue is broken, otherwise true.
562  */
virtqueue_notify(struct virtqueue * _vq)563 bool virtqueue_notify(struct virtqueue *_vq)
564 {
565 	struct vring_virtqueue *vq = to_vvq(_vq);
566 
567 	if (unlikely(vq->broken))
568 		return false;
569 
570 	/* Prod other side to tell it about changes. */
571 	if (!vq->notify(_vq)) {
572 		vq->broken = true;
573 		return false;
574 	}
575 	return true;
576 }
577 EXPORT_SYMBOL_GPL(virtqueue_notify);
578 
579 /**
580  * virtqueue_kick - update after add_buf
581  * @vq: the struct virtqueue
582  *
583  * After one or more virtqueue_add_* calls, invoke this to kick
584  * the other side.
585  *
586  * Caller must ensure we don't call this with other virtqueue
587  * operations at the same time (except where noted).
588  *
589  * Returns false if kick failed, otherwise true.
590  */
virtqueue_kick(struct virtqueue * vq)591 bool virtqueue_kick(struct virtqueue *vq)
592 {
593 	if (virtqueue_kick_prepare(vq))
594 		return virtqueue_notify(vq);
595 	return true;
596 }
597 EXPORT_SYMBOL_GPL(virtqueue_kick);
598 
detach_buf(struct vring_virtqueue * vq,unsigned int head)599 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
600 {
601 	unsigned int i, j;
602 	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
603 
604 	/* Clear data ptr. */
605 	vq->desc_state[head].data = NULL;
606 
607 	/* Put back on free list: unmap first-level descriptors and find end */
608 	i = head;
609 
610 	while (vq->vring.desc[i].flags & nextflag) {
611 		vring_unmap_one(vq, &vq->vring.desc[i]);
612 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
613 		vq->vq.num_free++;
614 	}
615 
616 	vring_unmap_one(vq, &vq->vring.desc[i]);
617 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
618 	vq->free_head = head;
619 
620 	/* Plus final descriptor */
621 	vq->vq.num_free++;
622 
623 	/* Free the indirect table, if any, now that it's unmapped. */
624 	if (vq->desc_state[head].indir_desc) {
625 		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
626 		u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
627 
628 		BUG_ON(!(vq->vring.desc[head].flags &
629 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
630 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
631 
632 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
633 			vring_unmap_one(vq, &indir_desc[j]);
634 
635 		kfree(vq->desc_state[head].indir_desc);
636 		vq->desc_state[head].indir_desc = NULL;
637 	}
638 }
639 
more_used(const struct vring_virtqueue * vq)640 static inline bool more_used(const struct vring_virtqueue *vq)
641 {
642 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
643 }
644 
645 /**
646  * virtqueue_get_buf - get the next used buffer
647  * @vq: the struct virtqueue we're talking about.
648  * @len: the length written into the buffer
649  *
650  * If the driver wrote data into the buffer, @len will be set to the
651  * amount written.  This means you don't need to clear the buffer
652  * beforehand to ensure there's no data leakage in the case of short
653  * writes.
654  *
655  * Caller must ensure we don't call this with other virtqueue
656  * operations at the same time (except where noted).
657  *
658  * Returns NULL if there are no used buffers, or the "data" token
659  * handed to virtqueue_add_*().
660  */
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)661 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
662 {
663 	struct vring_virtqueue *vq = to_vvq(_vq);
664 	void *ret;
665 	unsigned int i;
666 	u16 last_used;
667 
668 	START_USE(vq);
669 
670 	if (unlikely(vq->broken)) {
671 		END_USE(vq);
672 		return NULL;
673 	}
674 
675 	if (!more_used(vq)) {
676 		pr_debug("No more buffers in queue\n");
677 		END_USE(vq);
678 		return NULL;
679 	}
680 
681 	/* Only get used array entries after they have been exposed by host. */
682 	virtio_rmb(vq->weak_barriers);
683 
684 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
685 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
686 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
687 
688 	if (unlikely(i >= vq->vring.num)) {
689 		BAD_RING(vq, "id %u out of range\n", i);
690 		return NULL;
691 	}
692 	if (unlikely(!vq->desc_state[i].data)) {
693 		BAD_RING(vq, "id %u is not a head!\n", i);
694 		return NULL;
695 	}
696 
697 	/* detach_buf clears data, so grab it now. */
698 	ret = vq->desc_state[i].data;
699 	detach_buf(vq, i);
700 	vq->last_used_idx++;
701 	/* If we expect an interrupt for the next entry, tell host
702 	 * by writing event index and flush out the write before
703 	 * the read in the next get_buf call. */
704 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
705 		virtio_store_mb(vq->weak_barriers,
706 				&vring_used_event(&vq->vring),
707 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
708 
709 #ifdef DEBUG
710 	vq->last_add_time_valid = false;
711 #endif
712 
713 	END_USE(vq);
714 	return ret;
715 }
716 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
717 
718 /**
719  * virtqueue_disable_cb - disable callbacks
720  * @vq: the struct virtqueue we're talking about.
721  *
722  * Note that this is not necessarily synchronous, hence unreliable and only
723  * useful as an optimization.
724  *
725  * Unlike other operations, this need not be serialized.
726  */
virtqueue_disable_cb(struct virtqueue * _vq)727 void virtqueue_disable_cb(struct virtqueue *_vq)
728 {
729 	struct vring_virtqueue *vq = to_vvq(_vq);
730 
731 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
732 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
733 		if (!vq->event)
734 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
735 	}
736 
737 }
738 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
739 
740 /**
741  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
742  * @vq: the struct virtqueue we're talking about.
743  *
744  * This re-enables callbacks; it returns current queue state
745  * in an opaque unsigned value. This value should be later tested by
746  * virtqueue_poll, to detect a possible race between the driver checking for
747  * more work, and enabling callbacks.
748  *
749  * Caller must ensure we don't call this with other virtqueue
750  * operations at the same time (except where noted).
751  */
virtqueue_enable_cb_prepare(struct virtqueue * _vq)752 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
753 {
754 	struct vring_virtqueue *vq = to_vvq(_vq);
755 	u16 last_used_idx;
756 
757 	START_USE(vq);
758 
759 	/* We optimistically turn back on interrupts, then check if there was
760 	 * more to do. */
761 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
762 	 * either clear the flags bit or point the event index at the next
763 	 * entry. Always do both to keep code simple. */
764 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
765 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
766 		if (!vq->event)
767 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
768 	}
769 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
770 	END_USE(vq);
771 	return last_used_idx;
772 }
773 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
774 
775 /**
776  * virtqueue_poll - query pending used buffers
777  * @vq: the struct virtqueue we're talking about.
778  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
779  *
780  * Returns "true" if there are pending used buffers in the queue.
781  *
782  * This does not need to be serialized.
783  */
virtqueue_poll(struct virtqueue * _vq,unsigned last_used_idx)784 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
785 {
786 	struct vring_virtqueue *vq = to_vvq(_vq);
787 
788 	virtio_mb(vq->weak_barriers);
789 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
790 }
791 EXPORT_SYMBOL_GPL(virtqueue_poll);
792 
793 /**
794  * virtqueue_enable_cb - restart callbacks after disable_cb.
795  * @vq: the struct virtqueue we're talking about.
796  *
797  * This re-enables callbacks; it returns "false" if there are pending
798  * buffers in the queue, to detect a possible race between the driver
799  * checking for more work, and enabling callbacks.
800  *
801  * Caller must ensure we don't call this with other virtqueue
802  * operations at the same time (except where noted).
803  */
virtqueue_enable_cb(struct virtqueue * _vq)804 bool virtqueue_enable_cb(struct virtqueue *_vq)
805 {
806 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
807 	return !virtqueue_poll(_vq, last_used_idx);
808 }
809 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
810 
811 /**
812  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
813  * @vq: the struct virtqueue we're talking about.
814  *
815  * This re-enables callbacks but hints to the other side to delay
816  * interrupts until most of the available buffers have been processed;
817  * it returns "false" if there are many pending buffers in the queue,
818  * to detect a possible race between the driver checking for more work,
819  * and enabling callbacks.
820  *
821  * Caller must ensure we don't call this with other virtqueue
822  * operations at the same time (except where noted).
823  */
virtqueue_enable_cb_delayed(struct virtqueue * _vq)824 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
825 {
826 	struct vring_virtqueue *vq = to_vvq(_vq);
827 	u16 bufs;
828 
829 	START_USE(vq);
830 
831 	/* We optimistically turn back on interrupts, then check if there was
832 	 * more to do. */
833 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
834 	 * either clear the flags bit or point the event index at the next
835 	 * entry. Always update the event index to keep code simple. */
836 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
837 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
838 		if (!vq->event)
839 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
840 	}
841 	/* TODO: tune this threshold */
842 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
843 
844 	virtio_store_mb(vq->weak_barriers,
845 			&vring_used_event(&vq->vring),
846 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
847 
848 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
849 		END_USE(vq);
850 		return false;
851 	}
852 
853 	END_USE(vq);
854 	return true;
855 }
856 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
857 
858 /**
859  * virtqueue_detach_unused_buf - detach first unused buffer
860  * @vq: the struct virtqueue we're talking about.
861  *
862  * Returns NULL or the "data" token handed to virtqueue_add_*().
863  * This is not valid on an active queue; it is useful only for device
864  * shutdown.
865  */
virtqueue_detach_unused_buf(struct virtqueue * _vq)866 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
867 {
868 	struct vring_virtqueue *vq = to_vvq(_vq);
869 	unsigned int i;
870 	void *buf;
871 
872 	START_USE(vq);
873 
874 	for (i = 0; i < vq->vring.num; i++) {
875 		if (!vq->desc_state[i].data)
876 			continue;
877 		/* detach_buf clears data, so grab it now. */
878 		buf = vq->desc_state[i].data;
879 		detach_buf(vq, i);
880 		vq->avail_idx_shadow--;
881 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
882 		END_USE(vq);
883 		return buf;
884 	}
885 	/* That should have freed everything. */
886 	BUG_ON(vq->vq.num_free != vq->vring.num);
887 
888 	END_USE(vq);
889 	return NULL;
890 }
891 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
892 
vring_interrupt(int irq,void * _vq)893 irqreturn_t vring_interrupt(int irq, void *_vq)
894 {
895 	struct vring_virtqueue *vq = to_vvq(_vq);
896 
897 	if (!more_used(vq)) {
898 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
899 		return IRQ_NONE;
900 	}
901 
902 	if (unlikely(vq->broken))
903 		return IRQ_HANDLED;
904 
905 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
906 	if (vq->vq.callback)
907 		vq->vq.callback(&vq->vq);
908 
909 	return IRQ_HANDLED;
910 }
911 EXPORT_SYMBOL_GPL(vring_interrupt);
912 
__vring_new_virtqueue(unsigned int index,struct vring vring,struct virtio_device * vdev,bool weak_barriers,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)913 struct virtqueue *__vring_new_virtqueue(unsigned int index,
914 					struct vring vring,
915 					struct virtio_device *vdev,
916 					bool weak_barriers,
917 					bool (*notify)(struct virtqueue *),
918 					void (*callback)(struct virtqueue *),
919 					const char *name)
920 {
921 	unsigned int i;
922 	struct vring_virtqueue *vq;
923 
924 	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
925 		     GFP_KERNEL);
926 	if (!vq)
927 		return NULL;
928 
929 	vq->vring = vring;
930 	vq->vq.callback = callback;
931 	vq->vq.vdev = vdev;
932 	vq->vq.name = name;
933 	vq->vq.num_free = vring.num;
934 	vq->vq.index = index;
935 	vq->we_own_ring = false;
936 	vq->queue_dma_addr = 0;
937 	vq->queue_size_in_bytes = 0;
938 	vq->notify = notify;
939 	vq->weak_barriers = weak_barriers;
940 	vq->broken = false;
941 	vq->last_used_idx = 0;
942 	vq->avail_flags_shadow = 0;
943 	vq->avail_idx_shadow = 0;
944 	vq->num_added = 0;
945 	list_add_tail(&vq->vq.list, &vdev->vqs);
946 #ifdef DEBUG
947 	vq->in_use = false;
948 	vq->last_add_time_valid = false;
949 #endif
950 
951 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
952 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
953 
954 	/* No callback?  Tell other side not to bother us. */
955 	if (!callback) {
956 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
957 		if (!vq->event)
958 			vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
959 	}
960 
961 	/* Put everything in free lists. */
962 	vq->free_head = 0;
963 	for (i = 0; i < vring.num-1; i++)
964 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
965 	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
966 
967 	return &vq->vq;
968 }
969 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
970 
vring_alloc_queue(struct virtio_device * vdev,size_t size,dma_addr_t * dma_handle,gfp_t flag)971 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
972 			      dma_addr_t *dma_handle, gfp_t flag)
973 {
974 	if (vring_use_dma_api(vdev)) {
975 		return dma_alloc_coherent(vdev->dev.parent, size,
976 					  dma_handle, flag);
977 	} else {
978 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
979 		if (queue) {
980 			phys_addr_t phys_addr = virt_to_phys(queue);
981 			*dma_handle = (dma_addr_t)phys_addr;
982 
983 			/*
984 			 * Sanity check: make sure we dind't truncate
985 			 * the address.  The only arches I can find that
986 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
987 			 * are certain non-highmem MIPS and x86
988 			 * configurations, but these configurations
989 			 * should never allocate physical pages above 32
990 			 * bits, so this is fine.  Just in case, throw a
991 			 * warning and abort if we end up with an
992 			 * unrepresentable address.
993 			 */
994 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
995 				free_pages_exact(queue, PAGE_ALIGN(size));
996 				return NULL;
997 			}
998 		}
999 		return queue;
1000 	}
1001 }
1002 
vring_free_queue(struct virtio_device * vdev,size_t size,void * queue,dma_addr_t dma_handle)1003 static void vring_free_queue(struct virtio_device *vdev, size_t size,
1004 			     void *queue, dma_addr_t dma_handle)
1005 {
1006 	if (vring_use_dma_api(vdev)) {
1007 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1008 	} else {
1009 		free_pages_exact(queue, PAGE_ALIGN(size));
1010 	}
1011 }
1012 
vring_create_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)1013 struct virtqueue *vring_create_virtqueue(
1014 	unsigned int index,
1015 	unsigned int num,
1016 	unsigned int vring_align,
1017 	struct virtio_device *vdev,
1018 	bool weak_barriers,
1019 	bool may_reduce_num,
1020 	bool (*notify)(struct virtqueue *),
1021 	void (*callback)(struct virtqueue *),
1022 	const char *name)
1023 {
1024 	struct virtqueue *vq;
1025 	void *queue = NULL;
1026 	dma_addr_t dma_addr;
1027 	size_t queue_size_in_bytes;
1028 	struct vring vring;
1029 
1030 	/* We assume num is a power of 2. */
1031 	if (num & (num - 1)) {
1032 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1033 		return NULL;
1034 	}
1035 
1036 	/* TODO: allocate each queue chunk individually */
1037 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1038 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1039 					  &dma_addr,
1040 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1041 		if (queue)
1042 			break;
1043 	}
1044 
1045 	if (!num)
1046 		return NULL;
1047 
1048 	if (!queue) {
1049 		/* Try to get a single page. You are my only hope! */
1050 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1051 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
1052 	}
1053 	if (!queue)
1054 		return NULL;
1055 
1056 	queue_size_in_bytes = vring_size(num, vring_align);
1057 	vring_init(&vring, num, queue, vring_align);
1058 
1059 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1060 				   notify, callback, name);
1061 	if (!vq) {
1062 		vring_free_queue(vdev, queue_size_in_bytes, queue,
1063 				 dma_addr);
1064 		return NULL;
1065 	}
1066 
1067 	to_vvq(vq)->queue_dma_addr = dma_addr;
1068 	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1069 	to_vvq(vq)->we_own_ring = true;
1070 
1071 	return vq;
1072 }
1073 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1074 
vring_new_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,void * pages,bool (* notify)(struct virtqueue * vq),void (* callback)(struct virtqueue * vq),const char * name)1075 struct virtqueue *vring_new_virtqueue(unsigned int index,
1076 				      unsigned int num,
1077 				      unsigned int vring_align,
1078 				      struct virtio_device *vdev,
1079 				      bool weak_barriers,
1080 				      void *pages,
1081 				      bool (*notify)(struct virtqueue *vq),
1082 				      void (*callback)(struct virtqueue *vq),
1083 				      const char *name)
1084 {
1085 	struct vring vring;
1086 	vring_init(&vring, num, pages, vring_align);
1087 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1088 				     notify, callback, name);
1089 }
1090 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1091 
vring_del_virtqueue(struct virtqueue * _vq)1092 void vring_del_virtqueue(struct virtqueue *_vq)
1093 {
1094 	struct vring_virtqueue *vq = to_vvq(_vq);
1095 
1096 	if (vq->we_own_ring) {
1097 		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1098 				 vq->vring.desc, vq->queue_dma_addr);
1099 	}
1100 	list_del(&_vq->list);
1101 	kfree(vq);
1102 }
1103 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1104 
1105 /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)1106 void vring_transport_features(struct virtio_device *vdev)
1107 {
1108 	unsigned int i;
1109 
1110 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1111 		switch (i) {
1112 		case VIRTIO_RING_F_INDIRECT_DESC:
1113 			break;
1114 		case VIRTIO_RING_F_EVENT_IDX:
1115 			break;
1116 		case VIRTIO_F_VERSION_1:
1117 			break;
1118 		case VIRTIO_F_IOMMU_PLATFORM:
1119 			break;
1120 		default:
1121 			/* We don't understand this bit. */
1122 			__virtio_clear_bit(vdev, i);
1123 		}
1124 	}
1125 }
1126 EXPORT_SYMBOL_GPL(vring_transport_features);
1127 
1128 /**
1129  * virtqueue_get_vring_size - return the size of the virtqueue's vring
1130  * @vq: the struct virtqueue containing the vring of interest.
1131  *
1132  * Returns the size of the vring.  This is mainly used for boasting to
1133  * userspace.  Unlike other operations, this need not be serialized.
1134  */
virtqueue_get_vring_size(struct virtqueue * _vq)1135 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1136 {
1137 
1138 	struct vring_virtqueue *vq = to_vvq(_vq);
1139 
1140 	return vq->vring.num;
1141 }
1142 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1143 
virtqueue_is_broken(struct virtqueue * _vq)1144 bool virtqueue_is_broken(struct virtqueue *_vq)
1145 {
1146 	struct vring_virtqueue *vq = to_vvq(_vq);
1147 
1148 	return vq->broken;
1149 }
1150 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1151 
1152 /*
1153  * This should prevent the device from being used, allowing drivers to
1154  * recover.  You may need to grab appropriate locks to flush.
1155  */
virtio_break_device(struct virtio_device * dev)1156 void virtio_break_device(struct virtio_device *dev)
1157 {
1158 	struct virtqueue *_vq;
1159 
1160 	list_for_each_entry(_vq, &dev->vqs, list) {
1161 		struct vring_virtqueue *vq = to_vvq(_vq);
1162 		vq->broken = true;
1163 	}
1164 }
1165 EXPORT_SYMBOL_GPL(virtio_break_device);
1166 
virtqueue_get_desc_addr(struct virtqueue * _vq)1167 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1168 {
1169 	struct vring_virtqueue *vq = to_vvq(_vq);
1170 
1171 	BUG_ON(!vq->we_own_ring);
1172 
1173 	return vq->queue_dma_addr;
1174 }
1175 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1176 
virtqueue_get_avail_addr(struct virtqueue * _vq)1177 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1178 {
1179 	struct vring_virtqueue *vq = to_vvq(_vq);
1180 
1181 	BUG_ON(!vq->we_own_ring);
1182 
1183 	return vq->queue_dma_addr +
1184 		((char *)vq->vring.avail - (char *)vq->vring.desc);
1185 }
1186 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1187 
virtqueue_get_used_addr(struct virtqueue * _vq)1188 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1189 {
1190 	struct vring_virtqueue *vq = to_vvq(_vq);
1191 
1192 	BUG_ON(!vq->we_own_ring);
1193 
1194 	return vq->queue_dma_addr +
1195 		((char *)vq->vring.used - (char *)vq->vring.desc);
1196 }
1197 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1198 
virtqueue_get_vring(struct virtqueue * vq)1199 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1200 {
1201 	return &to_vvq(vq)->vring;
1202 }
1203 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1204 
1205 MODULE_LICENSE("GPL");
1206