1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
3 *
4 * Copyright 2007 Rusty Russell IBM Corporation
5 */
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/spinlock.h>
15 #include <xen/xen.h>
16
17 #ifdef DEBUG
18 /* For development, we want to crash whenever the ring is screwed. */
19 #define BAD_RING(_vq, fmt, args...) \
20 do { \
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
23 BUG(); \
24 } while (0)
25 /* Caller is supposed to guarantee no reentry. */
26 #define START_USE(_vq) \
27 do { \
28 if ((_vq)->in_use) \
29 panic("%s:in_use = %i\n", \
30 (_vq)->vq.name, (_vq)->in_use); \
31 (_vq)->in_use = __LINE__; \
32 } while (0)
33 #define END_USE(_vq) \
34 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
35 #define LAST_ADD_TIME_UPDATE(_vq) \
36 do { \
37 ktime_t now = ktime_get(); \
38 \
39 /* No kick or get, with .1 second between? Warn. */ \
40 if ((_vq)->last_add_time_valid) \
41 WARN_ON(ktime_to_ms(ktime_sub(now, \
42 (_vq)->last_add_time)) > 100); \
43 (_vq)->last_add_time = now; \
44 (_vq)->last_add_time_valid = true; \
45 } while (0)
46 #define LAST_ADD_TIME_CHECK(_vq) \
47 do { \
48 if ((_vq)->last_add_time_valid) { \
49 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
50 (_vq)->last_add_time)) > 100); \
51 } \
52 } while (0)
53 #define LAST_ADD_TIME_INVALID(_vq) \
54 ((_vq)->last_add_time_valid = false)
55 #else
56 #define BAD_RING(_vq, fmt, args...) \
57 do { \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
60 (_vq)->broken = true; \
61 } while (0)
62 #define START_USE(vq)
63 #define END_USE(vq)
64 #define LAST_ADD_TIME_UPDATE(vq)
65 #define LAST_ADD_TIME_CHECK(vq)
66 #define LAST_ADD_TIME_INVALID(vq)
67 #endif
68
69 struct vring_desc_state_split {
70 void *data; /* Data for callback. */
71 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
72 };
73
74 struct vring_desc_state_packed {
75 void *data; /* Data for callback. */
76 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
77 u16 num; /* Descriptor list length. */
78 u16 last; /* The last desc state in a list. */
79 };
80
81 struct vring_desc_extra {
82 dma_addr_t addr; /* Buffer DMA addr. */
83 u32 len; /* Buffer length. */
84 u16 flags; /* Descriptor flags. */
85 u16 next; /* The next desc state in a list. */
86 };
87
88 struct vring_virtqueue {
89 struct virtqueue vq;
90
91 /* Is this a packed ring? */
92 bool packed_ring;
93
94 /* Is DMA API used? */
95 bool use_dma_api;
96
97 /* Can we use weak barriers? */
98 bool weak_barriers;
99
100 /* Other side has made a mess, don't try any more. */
101 bool broken;
102
103 /* Host supports indirect buffers */
104 bool indirect;
105
106 /* Host publishes avail event idx */
107 bool event;
108
109 /* Head of free buffer list. */
110 unsigned int free_head;
111 /* Number we've added since last sync. */
112 unsigned int num_added;
113
114 /* Last used index we've seen. */
115 u16 last_used_idx;
116
117 /* Hint for event idx: already triggered no need to disable. */
118 bool event_triggered;
119
120 union {
121 /* Available for split ring */
122 struct {
123 /* Actual memory layout for this queue. */
124 struct vring vring;
125
126 /* Last written value to avail->flags */
127 u16 avail_flags_shadow;
128
129 /*
130 * Last written value to avail->idx in
131 * guest byte order.
132 */
133 u16 avail_idx_shadow;
134
135 /* Per-descriptor state. */
136 struct vring_desc_state_split *desc_state;
137 struct vring_desc_extra *desc_extra;
138
139 /* DMA address and size information */
140 dma_addr_t queue_dma_addr;
141 size_t queue_size_in_bytes;
142 } split;
143
144 /* Available for packed ring */
145 struct {
146 /* Actual memory layout for this queue. */
147 struct {
148 unsigned int num;
149 struct vring_packed_desc *desc;
150 struct vring_packed_desc_event *driver;
151 struct vring_packed_desc_event *device;
152 } vring;
153
154 /* Driver ring wrap counter. */
155 bool avail_wrap_counter;
156
157 /* Device ring wrap counter. */
158 bool used_wrap_counter;
159
160 /* Avail used flags. */
161 u16 avail_used_flags;
162
163 /* Index of the next avail descriptor. */
164 u16 next_avail_idx;
165
166 /*
167 * Last written value to driver->flags in
168 * guest byte order.
169 */
170 u16 event_flags_shadow;
171
172 /* Per-descriptor state. */
173 struct vring_desc_state_packed *desc_state;
174 struct vring_desc_extra *desc_extra;
175
176 /* DMA address and size information */
177 dma_addr_t ring_dma_addr;
178 dma_addr_t driver_event_dma_addr;
179 dma_addr_t device_event_dma_addr;
180 size_t ring_size_in_bytes;
181 size_t event_size_in_bytes;
182 } packed;
183 };
184
185 /* How to notify other side. FIXME: commonalize hcalls! */
186 bool (*notify)(struct virtqueue *vq);
187
188 /* DMA, allocation, and size information */
189 bool we_own_ring;
190
191 #ifdef DEBUG
192 /* They're supposed to lock for us. */
193 unsigned int in_use;
194
195 /* Figure out if their kicks are too delayed. */
196 bool last_add_time_valid;
197 ktime_t last_add_time;
198 #endif
199 };
200
201
202 /*
203 * Helpers.
204 */
205
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
207
virtqueue_use_indirect(struct virtqueue * _vq,unsigned int total_sg)208 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
209 unsigned int total_sg)
210 {
211 struct vring_virtqueue *vq = to_vvq(_vq);
212
213 /*
214 * If the host supports indirect descriptor tables, and we have multiple
215 * buffers, then go indirect. FIXME: tune this threshold
216 */
217 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
218 }
219
220 /*
221 * Modern virtio devices have feature bits to specify whether they need a
222 * quirk and bypass the IOMMU. If not there, just use the DMA API.
223 *
224 * If there, the interaction between virtio and DMA API is messy.
225 *
226 * On most systems with virtio, physical addresses match bus addresses,
227 * and it doesn't particularly matter whether we use the DMA API.
228 *
229 * On some systems, including Xen and any system with a physical device
230 * that speaks virtio behind a physical IOMMU, we must use the DMA API
231 * for virtio DMA to work at all.
232 *
233 * On other systems, including SPARC and PPC64, virtio-pci devices are
234 * enumerated as though they are behind an IOMMU, but the virtio host
235 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
236 * there or somehow map everything as the identity.
237 *
238 * For the time being, we preserve historic behavior and bypass the DMA
239 * API.
240 *
241 * TODO: install a per-device DMA ops structure that does the right thing
242 * taking into account all the above quirks, and use the DMA API
243 * unconditionally on data path.
244 */
245
vring_use_dma_api(struct virtio_device * vdev)246 static bool vring_use_dma_api(struct virtio_device *vdev)
247 {
248 if (!virtio_has_dma_quirk(vdev))
249 return true;
250
251 /* Otherwise, we are left to guess. */
252 /*
253 * In theory, it's possible to have a buggy QEMU-supposed
254 * emulated Q35 IOMMU and Xen enabled at the same time. On
255 * such a configuration, virtio has never worked and will
256 * not work without an even larger kludge. Instead, enable
257 * the DMA API if we're a Xen guest, which at least allows
258 * all of the sensible Xen configurations to work correctly.
259 */
260 if (xen_domain())
261 return true;
262
263 return false;
264 }
265
virtio_max_dma_size(struct virtio_device * vdev)266 size_t virtio_max_dma_size(struct virtio_device *vdev)
267 {
268 size_t max_segment_size = SIZE_MAX;
269
270 if (vring_use_dma_api(vdev))
271 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
272
273 return max_segment_size;
274 }
275 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
276
vring_alloc_queue(struct virtio_device * vdev,size_t size,dma_addr_t * dma_handle,gfp_t flag)277 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
278 dma_addr_t *dma_handle, gfp_t flag)
279 {
280 if (vring_use_dma_api(vdev)) {
281 return dma_alloc_coherent(vdev->dev.parent, size,
282 dma_handle, flag);
283 } else {
284 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
285
286 if (queue) {
287 phys_addr_t phys_addr = virt_to_phys(queue);
288 *dma_handle = (dma_addr_t)phys_addr;
289
290 /*
291 * Sanity check: make sure we dind't truncate
292 * the address. The only arches I can find that
293 * have 64-bit phys_addr_t but 32-bit dma_addr_t
294 * are certain non-highmem MIPS and x86
295 * configurations, but these configurations
296 * should never allocate physical pages above 32
297 * bits, so this is fine. Just in case, throw a
298 * warning and abort if we end up with an
299 * unrepresentable address.
300 */
301 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
302 free_pages_exact(queue, PAGE_ALIGN(size));
303 return NULL;
304 }
305 }
306 return queue;
307 }
308 }
309
vring_free_queue(struct virtio_device * vdev,size_t size,void * queue,dma_addr_t dma_handle)310 static void vring_free_queue(struct virtio_device *vdev, size_t size,
311 void *queue, dma_addr_t dma_handle)
312 {
313 if (vring_use_dma_api(vdev))
314 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
315 else
316 free_pages_exact(queue, PAGE_ALIGN(size));
317 }
318
319 /*
320 * The DMA ops on various arches are rather gnarly right now, and
321 * making all of the arch DMA ops work on the vring device itself
322 * is a mess. For now, we use the parent device for DMA ops.
323 */
vring_dma_dev(const struct vring_virtqueue * vq)324 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
325 {
326 return vq->vq.vdev->dev.parent;
327 }
328
329 /* Map one sg entry. */
vring_map_one_sg(const struct vring_virtqueue * vq,struct scatterlist * sg,enum dma_data_direction direction)330 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
331 struct scatterlist *sg,
332 enum dma_data_direction direction)
333 {
334 if (!vq->use_dma_api)
335 return (dma_addr_t)sg_phys(sg);
336
337 /*
338 * We can't use dma_map_sg, because we don't use scatterlists in
339 * the way it expects (we don't guarantee that the scatterlist
340 * will exist for the lifetime of the mapping).
341 */
342 return dma_map_page(vring_dma_dev(vq),
343 sg_page(sg), sg->offset, sg->length,
344 direction);
345 }
346
vring_map_single(const struct vring_virtqueue * vq,void * cpu_addr,size_t size,enum dma_data_direction direction)347 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
348 void *cpu_addr, size_t size,
349 enum dma_data_direction direction)
350 {
351 if (!vq->use_dma_api)
352 return (dma_addr_t)virt_to_phys(cpu_addr);
353
354 return dma_map_single(vring_dma_dev(vq),
355 cpu_addr, size, direction);
356 }
357
vring_mapping_error(const struct vring_virtqueue * vq,dma_addr_t addr)358 static int vring_mapping_error(const struct vring_virtqueue *vq,
359 dma_addr_t addr)
360 {
361 if (!vq->use_dma_api)
362 return 0;
363
364 return dma_mapping_error(vring_dma_dev(vq), addr);
365 }
366
367
368 /*
369 * Split ring specific functions - *_split().
370 */
371
vring_unmap_one_split_indirect(const struct vring_virtqueue * vq,struct vring_desc * desc)372 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
373 struct vring_desc *desc)
374 {
375 u16 flags;
376
377 if (!vq->use_dma_api)
378 return;
379
380 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
381
382 if (flags & VRING_DESC_F_INDIRECT) {
383 dma_unmap_single(vring_dma_dev(vq),
384 virtio64_to_cpu(vq->vq.vdev, desc->addr),
385 virtio32_to_cpu(vq->vq.vdev, desc->len),
386 (flags & VRING_DESC_F_WRITE) ?
387 DMA_FROM_DEVICE : DMA_TO_DEVICE);
388 } else {
389 dma_unmap_page(vring_dma_dev(vq),
390 virtio64_to_cpu(vq->vq.vdev, desc->addr),
391 virtio32_to_cpu(vq->vq.vdev, desc->len),
392 (flags & VRING_DESC_F_WRITE) ?
393 DMA_FROM_DEVICE : DMA_TO_DEVICE);
394 }
395 }
396
vring_unmap_one_split(const struct vring_virtqueue * vq,unsigned int i)397 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
398 unsigned int i)
399 {
400 struct vring_desc_extra *extra = vq->split.desc_extra;
401 u16 flags;
402
403 if (!vq->use_dma_api)
404 goto out;
405
406 flags = extra[i].flags;
407
408 if (flags & VRING_DESC_F_INDIRECT) {
409 dma_unmap_single(vring_dma_dev(vq),
410 extra[i].addr,
411 extra[i].len,
412 (flags & VRING_DESC_F_WRITE) ?
413 DMA_FROM_DEVICE : DMA_TO_DEVICE);
414 } else {
415 dma_unmap_page(vring_dma_dev(vq),
416 extra[i].addr,
417 extra[i].len,
418 (flags & VRING_DESC_F_WRITE) ?
419 DMA_FROM_DEVICE : DMA_TO_DEVICE);
420 }
421
422 out:
423 return extra[i].next;
424 }
425
alloc_indirect_split(struct virtqueue * _vq,unsigned int total_sg,gfp_t gfp)426 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
427 unsigned int total_sg,
428 gfp_t gfp)
429 {
430 struct vring_desc *desc;
431 unsigned int i;
432
433 /*
434 * We require lowmem mappings for the descriptors because
435 * otherwise virt_to_phys will give us bogus addresses in the
436 * virtqueue.
437 */
438 gfp &= ~__GFP_HIGHMEM;
439
440 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
441 if (!desc)
442 return NULL;
443
444 for (i = 0; i < total_sg; i++)
445 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
446 return desc;
447 }
448
virtqueue_add_desc_split(struct virtqueue * vq,struct vring_desc * desc,unsigned int i,dma_addr_t addr,unsigned int len,u16 flags,bool indirect)449 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
450 struct vring_desc *desc,
451 unsigned int i,
452 dma_addr_t addr,
453 unsigned int len,
454 u16 flags,
455 bool indirect)
456 {
457 struct vring_virtqueue *vring = to_vvq(vq);
458 struct vring_desc_extra *extra = vring->split.desc_extra;
459 u16 next;
460
461 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
462 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
463 desc[i].len = cpu_to_virtio32(vq->vdev, len);
464
465 if (!indirect) {
466 next = extra[i].next;
467 desc[i].next = cpu_to_virtio16(vq->vdev, next);
468
469 extra[i].addr = addr;
470 extra[i].len = len;
471 extra[i].flags = flags;
472 } else
473 next = virtio16_to_cpu(vq->vdev, desc[i].next);
474
475 return next;
476 }
477
virtqueue_add_split(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)478 static inline int virtqueue_add_split(struct virtqueue *_vq,
479 struct scatterlist *sgs[],
480 unsigned int total_sg,
481 unsigned int out_sgs,
482 unsigned int in_sgs,
483 void *data,
484 void *ctx,
485 gfp_t gfp)
486 {
487 struct vring_virtqueue *vq = to_vvq(_vq);
488 struct scatterlist *sg;
489 struct vring_desc *desc;
490 unsigned int i, n, avail, descs_used, prev, err_idx;
491 int head;
492 bool indirect;
493
494 START_USE(vq);
495
496 BUG_ON(data == NULL);
497 BUG_ON(ctx && vq->indirect);
498
499 if (unlikely(vq->broken)) {
500 END_USE(vq);
501 return -EIO;
502 }
503
504 LAST_ADD_TIME_UPDATE(vq);
505
506 BUG_ON(total_sg == 0);
507
508 head = vq->free_head;
509
510 if (virtqueue_use_indirect(_vq, total_sg))
511 desc = alloc_indirect_split(_vq, total_sg, gfp);
512 else {
513 desc = NULL;
514 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
515 }
516
517 if (desc) {
518 /* Use a single buffer which doesn't continue */
519 indirect = true;
520 /* Set up rest to use this indirect table. */
521 i = 0;
522 descs_used = 1;
523 } else {
524 indirect = false;
525 desc = vq->split.vring.desc;
526 i = head;
527 descs_used = total_sg;
528 }
529
530 if (vq->vq.num_free < descs_used) {
531 pr_debug("Can't add buf len %i - avail = %i\n",
532 descs_used, vq->vq.num_free);
533 /* FIXME: for historical reasons, we force a notify here if
534 * there are outgoing parts to the buffer. Presumably the
535 * host should service the ring ASAP. */
536 if (out_sgs)
537 vq->notify(&vq->vq);
538 if (indirect)
539 kfree(desc);
540 END_USE(vq);
541 return -ENOSPC;
542 }
543
544 for (n = 0; n < out_sgs; n++) {
545 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
546 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
547 if (vring_mapping_error(vq, addr))
548 goto unmap_release;
549
550 prev = i;
551 /* Note that we trust indirect descriptor
552 * table since it use stream DMA mapping.
553 */
554 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
555 VRING_DESC_F_NEXT,
556 indirect);
557 }
558 }
559 for (; n < (out_sgs + in_sgs); n++) {
560 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
561 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
562 if (vring_mapping_error(vq, addr))
563 goto unmap_release;
564
565 prev = i;
566 /* Note that we trust indirect descriptor
567 * table since it use stream DMA mapping.
568 */
569 i = virtqueue_add_desc_split(_vq, desc, i, addr,
570 sg->length,
571 VRING_DESC_F_NEXT |
572 VRING_DESC_F_WRITE,
573 indirect);
574 }
575 }
576 /* Last one doesn't continue. */
577 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
578 if (!indirect && vq->use_dma_api)
579 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
580 ~VRING_DESC_F_NEXT;
581
582 if (indirect) {
583 /* Now that the indirect table is filled in, map it. */
584 dma_addr_t addr = vring_map_single(
585 vq, desc, total_sg * sizeof(struct vring_desc),
586 DMA_TO_DEVICE);
587 if (vring_mapping_error(vq, addr))
588 goto unmap_release;
589
590 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
591 head, addr,
592 total_sg * sizeof(struct vring_desc),
593 VRING_DESC_F_INDIRECT,
594 false);
595 }
596
597 /* We're using some buffers from the free list. */
598 vq->vq.num_free -= descs_used;
599
600 /* Update free pointer */
601 if (indirect)
602 vq->free_head = vq->split.desc_extra[head].next;
603 else
604 vq->free_head = i;
605
606 /* Store token and indirect buffer state. */
607 vq->split.desc_state[head].data = data;
608 if (indirect)
609 vq->split.desc_state[head].indir_desc = desc;
610 else
611 vq->split.desc_state[head].indir_desc = ctx;
612
613 /* Put entry in available array (but don't update avail->idx until they
614 * do sync). */
615 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
616 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
617
618 /* Descriptors and available array need to be set before we expose the
619 * new available array entries. */
620 virtio_wmb(vq->weak_barriers);
621 vq->split.avail_idx_shadow++;
622 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
623 vq->split.avail_idx_shadow);
624 vq->num_added++;
625
626 pr_debug("Added buffer head %i to %p\n", head, vq);
627 END_USE(vq);
628
629 /* This is very unlikely, but theoretically possible. Kick
630 * just in case. */
631 if (unlikely(vq->num_added == (1 << 16) - 1))
632 virtqueue_kick(_vq);
633
634 return 0;
635
636 unmap_release:
637 err_idx = i;
638
639 if (indirect)
640 i = 0;
641 else
642 i = head;
643
644 for (n = 0; n < total_sg; n++) {
645 if (i == err_idx)
646 break;
647 if (indirect) {
648 vring_unmap_one_split_indirect(vq, &desc[i]);
649 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
650 } else
651 i = vring_unmap_one_split(vq, i);
652 }
653
654 if (indirect)
655 kfree(desc);
656
657 END_USE(vq);
658 return -ENOMEM;
659 }
660
virtqueue_kick_prepare_split(struct virtqueue * _vq)661 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
662 {
663 struct vring_virtqueue *vq = to_vvq(_vq);
664 u16 new, old;
665 bool needs_kick;
666
667 START_USE(vq);
668 /* We need to expose available array entries before checking avail
669 * event. */
670 virtio_mb(vq->weak_barriers);
671
672 old = vq->split.avail_idx_shadow - vq->num_added;
673 new = vq->split.avail_idx_shadow;
674 vq->num_added = 0;
675
676 LAST_ADD_TIME_CHECK(vq);
677 LAST_ADD_TIME_INVALID(vq);
678
679 if (vq->event) {
680 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
681 vring_avail_event(&vq->split.vring)),
682 new, old);
683 } else {
684 needs_kick = !(vq->split.vring.used->flags &
685 cpu_to_virtio16(_vq->vdev,
686 VRING_USED_F_NO_NOTIFY));
687 }
688 END_USE(vq);
689 return needs_kick;
690 }
691
detach_buf_split(struct vring_virtqueue * vq,unsigned int head,void ** ctx)692 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
693 void **ctx)
694 {
695 unsigned int i, j;
696 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
697
698 /* Clear data ptr. */
699 vq->split.desc_state[head].data = NULL;
700
701 /* Put back on free list: unmap first-level descriptors and find end */
702 i = head;
703
704 while (vq->split.vring.desc[i].flags & nextflag) {
705 vring_unmap_one_split(vq, i);
706 i = vq->split.desc_extra[i].next;
707 vq->vq.num_free++;
708 }
709
710 vring_unmap_one_split(vq, i);
711 vq->split.desc_extra[i].next = vq->free_head;
712 vq->free_head = head;
713
714 /* Plus final descriptor */
715 vq->vq.num_free++;
716
717 if (vq->indirect) {
718 struct vring_desc *indir_desc =
719 vq->split.desc_state[head].indir_desc;
720 u32 len;
721
722 /* Free the indirect table, if any, now that it's unmapped. */
723 if (!indir_desc)
724 return;
725
726 len = vq->split.desc_extra[head].len;
727
728 BUG_ON(!(vq->split.desc_extra[head].flags &
729 VRING_DESC_F_INDIRECT));
730 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
731
732 for (j = 0; j < len / sizeof(struct vring_desc); j++)
733 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
734
735 kfree(indir_desc);
736 vq->split.desc_state[head].indir_desc = NULL;
737 } else if (ctx) {
738 *ctx = vq->split.desc_state[head].indir_desc;
739 }
740 }
741
more_used_split(const struct vring_virtqueue * vq)742 static inline bool more_used_split(const struct vring_virtqueue *vq)
743 {
744 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
745 vq->split.vring.used->idx);
746 }
747
virtqueue_get_buf_ctx_split(struct virtqueue * _vq,unsigned int * len,void ** ctx)748 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
749 unsigned int *len,
750 void **ctx)
751 {
752 struct vring_virtqueue *vq = to_vvq(_vq);
753 void *ret;
754 unsigned int i;
755 u16 last_used;
756
757 START_USE(vq);
758
759 if (unlikely(vq->broken)) {
760 END_USE(vq);
761 return NULL;
762 }
763
764 if (!more_used_split(vq)) {
765 pr_debug("No more buffers in queue\n");
766 END_USE(vq);
767 return NULL;
768 }
769
770 /* Only get used array entries after they have been exposed by host. */
771 virtio_rmb(vq->weak_barriers);
772
773 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
774 i = virtio32_to_cpu(_vq->vdev,
775 vq->split.vring.used->ring[last_used].id);
776 *len = virtio32_to_cpu(_vq->vdev,
777 vq->split.vring.used->ring[last_used].len);
778
779 if (unlikely(i >= vq->split.vring.num)) {
780 BAD_RING(vq, "id %u out of range\n", i);
781 return NULL;
782 }
783 if (unlikely(!vq->split.desc_state[i].data)) {
784 BAD_RING(vq, "id %u is not a head!\n", i);
785 return NULL;
786 }
787
788 /* detach_buf_split clears data, so grab it now. */
789 ret = vq->split.desc_state[i].data;
790 detach_buf_split(vq, i, ctx);
791 vq->last_used_idx++;
792 /* If we expect an interrupt for the next entry, tell host
793 * by writing event index and flush out the write before
794 * the read in the next get_buf call. */
795 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
796 virtio_store_mb(vq->weak_barriers,
797 &vring_used_event(&vq->split.vring),
798 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
799
800 LAST_ADD_TIME_INVALID(vq);
801
802 END_USE(vq);
803 return ret;
804 }
805
virtqueue_disable_cb_split(struct virtqueue * _vq)806 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
807 {
808 struct vring_virtqueue *vq = to_vvq(_vq);
809
810 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
811 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
812
813 /*
814 * If device triggered an event already it won't trigger one again:
815 * no need to disable.
816 */
817 if (vq->event_triggered)
818 return;
819
820 if (vq->event)
821 /* TODO: this is a hack. Figure out a cleaner value to write. */
822 vring_used_event(&vq->split.vring) = 0x0;
823 else
824 vq->split.vring.avail->flags =
825 cpu_to_virtio16(_vq->vdev,
826 vq->split.avail_flags_shadow);
827 }
828 }
829
virtqueue_enable_cb_prepare_split(struct virtqueue * _vq)830 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
831 {
832 struct vring_virtqueue *vq = to_vvq(_vq);
833 u16 last_used_idx;
834
835 START_USE(vq);
836
837 /* We optimistically turn back on interrupts, then check if there was
838 * more to do. */
839 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
840 * either clear the flags bit or point the event index at the next
841 * entry. Always do both to keep code simple. */
842 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
843 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
844 if (!vq->event)
845 vq->split.vring.avail->flags =
846 cpu_to_virtio16(_vq->vdev,
847 vq->split.avail_flags_shadow);
848 }
849 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
850 last_used_idx = vq->last_used_idx);
851 END_USE(vq);
852 return last_used_idx;
853 }
854
virtqueue_poll_split(struct virtqueue * _vq,unsigned last_used_idx)855 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
856 {
857 struct vring_virtqueue *vq = to_vvq(_vq);
858
859 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
860 vq->split.vring.used->idx);
861 }
862
virtqueue_enable_cb_delayed_split(struct virtqueue * _vq)863 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
864 {
865 struct vring_virtqueue *vq = to_vvq(_vq);
866 u16 bufs;
867
868 START_USE(vq);
869
870 /* We optimistically turn back on interrupts, then check if there was
871 * more to do. */
872 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
873 * either clear the flags bit or point the event index at the next
874 * entry. Always update the event index to keep code simple. */
875 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
876 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
877 if (!vq->event)
878 vq->split.vring.avail->flags =
879 cpu_to_virtio16(_vq->vdev,
880 vq->split.avail_flags_shadow);
881 }
882 /* TODO: tune this threshold */
883 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
884
885 virtio_store_mb(vq->weak_barriers,
886 &vring_used_event(&vq->split.vring),
887 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
888
889 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
890 - vq->last_used_idx) > bufs)) {
891 END_USE(vq);
892 return false;
893 }
894
895 END_USE(vq);
896 return true;
897 }
898
virtqueue_detach_unused_buf_split(struct virtqueue * _vq)899 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
900 {
901 struct vring_virtqueue *vq = to_vvq(_vq);
902 unsigned int i;
903 void *buf;
904
905 START_USE(vq);
906
907 for (i = 0; i < vq->split.vring.num; i++) {
908 if (!vq->split.desc_state[i].data)
909 continue;
910 /* detach_buf_split clears data, so grab it now. */
911 buf = vq->split.desc_state[i].data;
912 detach_buf_split(vq, i, NULL);
913 vq->split.avail_idx_shadow--;
914 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
915 vq->split.avail_idx_shadow);
916 END_USE(vq);
917 return buf;
918 }
919 /* That should have freed everything. */
920 BUG_ON(vq->vq.num_free != vq->split.vring.num);
921
922 END_USE(vq);
923 return NULL;
924 }
925
vring_create_virtqueue_split(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)926 static struct virtqueue *vring_create_virtqueue_split(
927 unsigned int index,
928 unsigned int num,
929 unsigned int vring_align,
930 struct virtio_device *vdev,
931 bool weak_barriers,
932 bool may_reduce_num,
933 bool context,
934 bool (*notify)(struct virtqueue *),
935 void (*callback)(struct virtqueue *),
936 const char *name)
937 {
938 struct virtqueue *vq;
939 void *queue = NULL;
940 dma_addr_t dma_addr;
941 size_t queue_size_in_bytes;
942 struct vring vring;
943
944 /* We assume num is a power of 2. */
945 if (num & (num - 1)) {
946 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
947 return NULL;
948 }
949
950 /* TODO: allocate each queue chunk individually */
951 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
952 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
953 &dma_addr,
954 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
955 if (queue)
956 break;
957 if (!may_reduce_num)
958 return NULL;
959 }
960
961 if (!num)
962 return NULL;
963
964 if (!queue) {
965 /* Try to get a single page. You are my only hope! */
966 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
967 &dma_addr, GFP_KERNEL|__GFP_ZERO);
968 }
969 if (!queue)
970 return NULL;
971
972 queue_size_in_bytes = vring_size(num, vring_align);
973 vring_init(&vring, num, queue, vring_align);
974
975 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
976 notify, callback, name);
977 if (!vq) {
978 vring_free_queue(vdev, queue_size_in_bytes, queue,
979 dma_addr);
980 return NULL;
981 }
982
983 to_vvq(vq)->split.queue_dma_addr = dma_addr;
984 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
985 to_vvq(vq)->we_own_ring = true;
986
987 return vq;
988 }
989
990
991 /*
992 * Packed ring specific functions - *_packed().
993 */
994
vring_unmap_state_packed(const struct vring_virtqueue * vq,struct vring_desc_extra * state)995 static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
996 struct vring_desc_extra *state)
997 {
998 u16 flags;
999
1000 if (!vq->use_dma_api)
1001 return;
1002
1003 flags = state->flags;
1004
1005 if (flags & VRING_DESC_F_INDIRECT) {
1006 dma_unmap_single(vring_dma_dev(vq),
1007 state->addr, state->len,
1008 (flags & VRING_DESC_F_WRITE) ?
1009 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1010 } else {
1011 dma_unmap_page(vring_dma_dev(vq),
1012 state->addr, state->len,
1013 (flags & VRING_DESC_F_WRITE) ?
1014 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1015 }
1016 }
1017
vring_unmap_desc_packed(const struct vring_virtqueue * vq,struct vring_packed_desc * desc)1018 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1019 struct vring_packed_desc *desc)
1020 {
1021 u16 flags;
1022
1023 if (!vq->use_dma_api)
1024 return;
1025
1026 flags = le16_to_cpu(desc->flags);
1027
1028 if (flags & VRING_DESC_F_INDIRECT) {
1029 dma_unmap_single(vring_dma_dev(vq),
1030 le64_to_cpu(desc->addr),
1031 le32_to_cpu(desc->len),
1032 (flags & VRING_DESC_F_WRITE) ?
1033 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1034 } else {
1035 dma_unmap_page(vring_dma_dev(vq),
1036 le64_to_cpu(desc->addr),
1037 le32_to_cpu(desc->len),
1038 (flags & VRING_DESC_F_WRITE) ?
1039 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1040 }
1041 }
1042
alloc_indirect_packed(unsigned int total_sg,gfp_t gfp)1043 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1044 gfp_t gfp)
1045 {
1046 struct vring_packed_desc *desc;
1047
1048 /*
1049 * We require lowmem mappings for the descriptors because
1050 * otherwise virt_to_phys will give us bogus addresses in the
1051 * virtqueue.
1052 */
1053 gfp &= ~__GFP_HIGHMEM;
1054
1055 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1056
1057 return desc;
1058 }
1059
virtqueue_add_indirect_packed(struct vring_virtqueue * vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)1060 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1061 struct scatterlist *sgs[],
1062 unsigned int total_sg,
1063 unsigned int out_sgs,
1064 unsigned int in_sgs,
1065 void *data,
1066 gfp_t gfp)
1067 {
1068 struct vring_packed_desc *desc;
1069 struct scatterlist *sg;
1070 unsigned int i, n, err_idx;
1071 u16 head, id;
1072 dma_addr_t addr;
1073
1074 head = vq->packed.next_avail_idx;
1075 desc = alloc_indirect_packed(total_sg, gfp);
1076 if (!desc)
1077 return -ENOMEM;
1078
1079 if (unlikely(vq->vq.num_free < 1)) {
1080 pr_debug("Can't add buf len 1 - avail = 0\n");
1081 kfree(desc);
1082 END_USE(vq);
1083 return -ENOSPC;
1084 }
1085
1086 i = 0;
1087 id = vq->free_head;
1088 BUG_ON(id == vq->packed.vring.num);
1089
1090 for (n = 0; n < out_sgs + in_sgs; n++) {
1091 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1092 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1093 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1094 if (vring_mapping_error(vq, addr))
1095 goto unmap_release;
1096
1097 desc[i].flags = cpu_to_le16(n < out_sgs ?
1098 0 : VRING_DESC_F_WRITE);
1099 desc[i].addr = cpu_to_le64(addr);
1100 desc[i].len = cpu_to_le32(sg->length);
1101 i++;
1102 }
1103 }
1104
1105 /* Now that the indirect table is filled in, map it. */
1106 addr = vring_map_single(vq, desc,
1107 total_sg * sizeof(struct vring_packed_desc),
1108 DMA_TO_DEVICE);
1109 if (vring_mapping_error(vq, addr))
1110 goto unmap_release;
1111
1112 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1113 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1114 sizeof(struct vring_packed_desc));
1115 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1116
1117 if (vq->use_dma_api) {
1118 vq->packed.desc_extra[id].addr = addr;
1119 vq->packed.desc_extra[id].len = total_sg *
1120 sizeof(struct vring_packed_desc);
1121 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1122 vq->packed.avail_used_flags;
1123 }
1124
1125 /*
1126 * A driver MUST NOT make the first descriptor in the list
1127 * available before all subsequent descriptors comprising
1128 * the list are made available.
1129 */
1130 virtio_wmb(vq->weak_barriers);
1131 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1132 vq->packed.avail_used_flags);
1133
1134 /* We're using some buffers from the free list. */
1135 vq->vq.num_free -= 1;
1136
1137 /* Update free pointer */
1138 n = head + 1;
1139 if (n >= vq->packed.vring.num) {
1140 n = 0;
1141 vq->packed.avail_wrap_counter ^= 1;
1142 vq->packed.avail_used_flags ^=
1143 1 << VRING_PACKED_DESC_F_AVAIL |
1144 1 << VRING_PACKED_DESC_F_USED;
1145 }
1146 vq->packed.next_avail_idx = n;
1147 vq->free_head = vq->packed.desc_extra[id].next;
1148
1149 /* Store token and indirect buffer state. */
1150 vq->packed.desc_state[id].num = 1;
1151 vq->packed.desc_state[id].data = data;
1152 vq->packed.desc_state[id].indir_desc = desc;
1153 vq->packed.desc_state[id].last = id;
1154
1155 vq->num_added += 1;
1156
1157 pr_debug("Added buffer head %i to %p\n", head, vq);
1158 END_USE(vq);
1159
1160 return 0;
1161
1162 unmap_release:
1163 err_idx = i;
1164
1165 for (i = 0; i < err_idx; i++)
1166 vring_unmap_desc_packed(vq, &desc[i]);
1167
1168 kfree(desc);
1169
1170 END_USE(vq);
1171 return -ENOMEM;
1172 }
1173
virtqueue_add_packed(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)1174 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1175 struct scatterlist *sgs[],
1176 unsigned int total_sg,
1177 unsigned int out_sgs,
1178 unsigned int in_sgs,
1179 void *data,
1180 void *ctx,
1181 gfp_t gfp)
1182 {
1183 struct vring_virtqueue *vq = to_vvq(_vq);
1184 struct vring_packed_desc *desc;
1185 struct scatterlist *sg;
1186 unsigned int i, n, c, descs_used, err_idx;
1187 __le16 head_flags, flags;
1188 u16 head, id, prev, curr, avail_used_flags;
1189 int err;
1190
1191 START_USE(vq);
1192
1193 BUG_ON(data == NULL);
1194 BUG_ON(ctx && vq->indirect);
1195
1196 if (unlikely(vq->broken)) {
1197 END_USE(vq);
1198 return -EIO;
1199 }
1200
1201 LAST_ADD_TIME_UPDATE(vq);
1202
1203 BUG_ON(total_sg == 0);
1204
1205 if (virtqueue_use_indirect(_vq, total_sg)) {
1206 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1207 in_sgs, data, gfp);
1208 if (err != -ENOMEM) {
1209 END_USE(vq);
1210 return err;
1211 }
1212
1213 /* fall back on direct */
1214 }
1215
1216 head = vq->packed.next_avail_idx;
1217 avail_used_flags = vq->packed.avail_used_flags;
1218
1219 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1220
1221 desc = vq->packed.vring.desc;
1222 i = head;
1223 descs_used = total_sg;
1224
1225 if (unlikely(vq->vq.num_free < descs_used)) {
1226 pr_debug("Can't add buf len %i - avail = %i\n",
1227 descs_used, vq->vq.num_free);
1228 END_USE(vq);
1229 return -ENOSPC;
1230 }
1231
1232 id = vq->free_head;
1233 BUG_ON(id == vq->packed.vring.num);
1234
1235 curr = id;
1236 c = 0;
1237 for (n = 0; n < out_sgs + in_sgs; n++) {
1238 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1239 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1240 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1241 if (vring_mapping_error(vq, addr))
1242 goto unmap_release;
1243
1244 flags = cpu_to_le16(vq->packed.avail_used_flags |
1245 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1246 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1247 if (i == head)
1248 head_flags = flags;
1249 else
1250 desc[i].flags = flags;
1251
1252 desc[i].addr = cpu_to_le64(addr);
1253 desc[i].len = cpu_to_le32(sg->length);
1254 desc[i].id = cpu_to_le16(id);
1255
1256 if (unlikely(vq->use_dma_api)) {
1257 vq->packed.desc_extra[curr].addr = addr;
1258 vq->packed.desc_extra[curr].len = sg->length;
1259 vq->packed.desc_extra[curr].flags =
1260 le16_to_cpu(flags);
1261 }
1262 prev = curr;
1263 curr = vq->packed.desc_extra[curr].next;
1264
1265 if ((unlikely(++i >= vq->packed.vring.num))) {
1266 i = 0;
1267 vq->packed.avail_used_flags ^=
1268 1 << VRING_PACKED_DESC_F_AVAIL |
1269 1 << VRING_PACKED_DESC_F_USED;
1270 }
1271 }
1272 }
1273
1274 if (i <= head)
1275 vq->packed.avail_wrap_counter ^= 1;
1276
1277 /* We're using some buffers from the free list. */
1278 vq->vq.num_free -= descs_used;
1279
1280 /* Update free pointer */
1281 vq->packed.next_avail_idx = i;
1282 vq->free_head = curr;
1283
1284 /* Store token. */
1285 vq->packed.desc_state[id].num = descs_used;
1286 vq->packed.desc_state[id].data = data;
1287 vq->packed.desc_state[id].indir_desc = ctx;
1288 vq->packed.desc_state[id].last = prev;
1289
1290 /*
1291 * A driver MUST NOT make the first descriptor in the list
1292 * available before all subsequent descriptors comprising
1293 * the list are made available.
1294 */
1295 virtio_wmb(vq->weak_barriers);
1296 vq->packed.vring.desc[head].flags = head_flags;
1297 vq->num_added += descs_used;
1298
1299 pr_debug("Added buffer head %i to %p\n", head, vq);
1300 END_USE(vq);
1301
1302 return 0;
1303
1304 unmap_release:
1305 err_idx = i;
1306 i = head;
1307 curr = vq->free_head;
1308
1309 vq->packed.avail_used_flags = avail_used_flags;
1310
1311 for (n = 0; n < total_sg; n++) {
1312 if (i == err_idx)
1313 break;
1314 vring_unmap_state_packed(vq,
1315 &vq->packed.desc_extra[curr]);
1316 curr = vq->packed.desc_extra[curr].next;
1317 i++;
1318 if (i >= vq->packed.vring.num)
1319 i = 0;
1320 }
1321
1322 END_USE(vq);
1323 return -EIO;
1324 }
1325
virtqueue_kick_prepare_packed(struct virtqueue * _vq)1326 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1327 {
1328 struct vring_virtqueue *vq = to_vvq(_vq);
1329 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1330 bool needs_kick;
1331 union {
1332 struct {
1333 __le16 off_wrap;
1334 __le16 flags;
1335 };
1336 u32 u32;
1337 } snapshot;
1338
1339 START_USE(vq);
1340
1341 /*
1342 * We need to expose the new flags value before checking notification
1343 * suppressions.
1344 */
1345 virtio_mb(vq->weak_barriers);
1346
1347 old = vq->packed.next_avail_idx - vq->num_added;
1348 new = vq->packed.next_avail_idx;
1349 vq->num_added = 0;
1350
1351 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1352 flags = le16_to_cpu(snapshot.flags);
1353
1354 LAST_ADD_TIME_CHECK(vq);
1355 LAST_ADD_TIME_INVALID(vq);
1356
1357 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1358 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1359 goto out;
1360 }
1361
1362 off_wrap = le16_to_cpu(snapshot.off_wrap);
1363
1364 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1365 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1366 if (wrap_counter != vq->packed.avail_wrap_counter)
1367 event_idx -= vq->packed.vring.num;
1368
1369 needs_kick = vring_need_event(event_idx, new, old);
1370 out:
1371 END_USE(vq);
1372 return needs_kick;
1373 }
1374
detach_buf_packed(struct vring_virtqueue * vq,unsigned int id,void ** ctx)1375 static void detach_buf_packed(struct vring_virtqueue *vq,
1376 unsigned int id, void **ctx)
1377 {
1378 struct vring_desc_state_packed *state = NULL;
1379 struct vring_packed_desc *desc;
1380 unsigned int i, curr;
1381
1382 state = &vq->packed.desc_state[id];
1383
1384 /* Clear data ptr. */
1385 state->data = NULL;
1386
1387 vq->packed.desc_extra[state->last].next = vq->free_head;
1388 vq->free_head = id;
1389 vq->vq.num_free += state->num;
1390
1391 if (unlikely(vq->use_dma_api)) {
1392 curr = id;
1393 for (i = 0; i < state->num; i++) {
1394 vring_unmap_state_packed(vq,
1395 &vq->packed.desc_extra[curr]);
1396 curr = vq->packed.desc_extra[curr].next;
1397 }
1398 }
1399
1400 if (vq->indirect) {
1401 u32 len;
1402
1403 /* Free the indirect table, if any, now that it's unmapped. */
1404 desc = state->indir_desc;
1405 if (!desc)
1406 return;
1407
1408 if (vq->use_dma_api) {
1409 len = vq->packed.desc_extra[id].len;
1410 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1411 i++)
1412 vring_unmap_desc_packed(vq, &desc[i]);
1413 }
1414 kfree(desc);
1415 state->indir_desc = NULL;
1416 } else if (ctx) {
1417 *ctx = state->indir_desc;
1418 }
1419 }
1420
is_used_desc_packed(const struct vring_virtqueue * vq,u16 idx,bool used_wrap_counter)1421 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1422 u16 idx, bool used_wrap_counter)
1423 {
1424 bool avail, used;
1425 u16 flags;
1426
1427 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1428 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1429 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1430
1431 return avail == used && used == used_wrap_counter;
1432 }
1433
more_used_packed(const struct vring_virtqueue * vq)1434 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1435 {
1436 return is_used_desc_packed(vq, vq->last_used_idx,
1437 vq->packed.used_wrap_counter);
1438 }
1439
virtqueue_get_buf_ctx_packed(struct virtqueue * _vq,unsigned int * len,void ** ctx)1440 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1441 unsigned int *len,
1442 void **ctx)
1443 {
1444 struct vring_virtqueue *vq = to_vvq(_vq);
1445 u16 last_used, id;
1446 void *ret;
1447
1448 START_USE(vq);
1449
1450 if (unlikely(vq->broken)) {
1451 END_USE(vq);
1452 return NULL;
1453 }
1454
1455 if (!more_used_packed(vq)) {
1456 pr_debug("No more buffers in queue\n");
1457 END_USE(vq);
1458 return NULL;
1459 }
1460
1461 /* Only get used elements after they have been exposed by host. */
1462 virtio_rmb(vq->weak_barriers);
1463
1464 last_used = vq->last_used_idx;
1465 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1466 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1467
1468 if (unlikely(id >= vq->packed.vring.num)) {
1469 BAD_RING(vq, "id %u out of range\n", id);
1470 return NULL;
1471 }
1472 if (unlikely(!vq->packed.desc_state[id].data)) {
1473 BAD_RING(vq, "id %u is not a head!\n", id);
1474 return NULL;
1475 }
1476
1477 /* detach_buf_packed clears data, so grab it now. */
1478 ret = vq->packed.desc_state[id].data;
1479 detach_buf_packed(vq, id, ctx);
1480
1481 vq->last_used_idx += vq->packed.desc_state[id].num;
1482 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1483 vq->last_used_idx -= vq->packed.vring.num;
1484 vq->packed.used_wrap_counter ^= 1;
1485 }
1486
1487 /*
1488 * If we expect an interrupt for the next entry, tell host
1489 * by writing event index and flush out the write before
1490 * the read in the next get_buf call.
1491 */
1492 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1493 virtio_store_mb(vq->weak_barriers,
1494 &vq->packed.vring.driver->off_wrap,
1495 cpu_to_le16(vq->last_used_idx |
1496 (vq->packed.used_wrap_counter <<
1497 VRING_PACKED_EVENT_F_WRAP_CTR)));
1498
1499 LAST_ADD_TIME_INVALID(vq);
1500
1501 END_USE(vq);
1502 return ret;
1503 }
1504
virtqueue_disable_cb_packed(struct virtqueue * _vq)1505 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1506 {
1507 struct vring_virtqueue *vq = to_vvq(_vq);
1508
1509 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1510 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1511
1512 /*
1513 * If device triggered an event already it won't trigger one again:
1514 * no need to disable.
1515 */
1516 if (vq->event_triggered)
1517 return;
1518
1519 vq->packed.vring.driver->flags =
1520 cpu_to_le16(vq->packed.event_flags_shadow);
1521 }
1522 }
1523
virtqueue_enable_cb_prepare_packed(struct virtqueue * _vq)1524 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1525 {
1526 struct vring_virtqueue *vq = to_vvq(_vq);
1527
1528 START_USE(vq);
1529
1530 /*
1531 * We optimistically turn back on interrupts, then check if there was
1532 * more to do.
1533 */
1534
1535 if (vq->event) {
1536 vq->packed.vring.driver->off_wrap =
1537 cpu_to_le16(vq->last_used_idx |
1538 (vq->packed.used_wrap_counter <<
1539 VRING_PACKED_EVENT_F_WRAP_CTR));
1540 /*
1541 * We need to update event offset and event wrap
1542 * counter first before updating event flags.
1543 */
1544 virtio_wmb(vq->weak_barriers);
1545 }
1546
1547 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1548 vq->packed.event_flags_shadow = vq->event ?
1549 VRING_PACKED_EVENT_FLAG_DESC :
1550 VRING_PACKED_EVENT_FLAG_ENABLE;
1551 vq->packed.vring.driver->flags =
1552 cpu_to_le16(vq->packed.event_flags_shadow);
1553 }
1554
1555 END_USE(vq);
1556 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1557 VRING_PACKED_EVENT_F_WRAP_CTR);
1558 }
1559
virtqueue_poll_packed(struct virtqueue * _vq,u16 off_wrap)1560 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1561 {
1562 struct vring_virtqueue *vq = to_vvq(_vq);
1563 bool wrap_counter;
1564 u16 used_idx;
1565
1566 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1567 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1568
1569 return is_used_desc_packed(vq, used_idx, wrap_counter);
1570 }
1571
virtqueue_enable_cb_delayed_packed(struct virtqueue * _vq)1572 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1573 {
1574 struct vring_virtqueue *vq = to_vvq(_vq);
1575 u16 used_idx, wrap_counter;
1576 u16 bufs;
1577
1578 START_USE(vq);
1579
1580 /*
1581 * We optimistically turn back on interrupts, then check if there was
1582 * more to do.
1583 */
1584
1585 if (vq->event) {
1586 /* TODO: tune this threshold */
1587 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1588 wrap_counter = vq->packed.used_wrap_counter;
1589
1590 used_idx = vq->last_used_idx + bufs;
1591 if (used_idx >= vq->packed.vring.num) {
1592 used_idx -= vq->packed.vring.num;
1593 wrap_counter ^= 1;
1594 }
1595
1596 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1597 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1598
1599 /*
1600 * We need to update event offset and event wrap
1601 * counter first before updating event flags.
1602 */
1603 virtio_wmb(vq->weak_barriers);
1604 }
1605
1606 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1607 vq->packed.event_flags_shadow = vq->event ?
1608 VRING_PACKED_EVENT_FLAG_DESC :
1609 VRING_PACKED_EVENT_FLAG_ENABLE;
1610 vq->packed.vring.driver->flags =
1611 cpu_to_le16(vq->packed.event_flags_shadow);
1612 }
1613
1614 /*
1615 * We need to update event suppression structure first
1616 * before re-checking for more used buffers.
1617 */
1618 virtio_mb(vq->weak_barriers);
1619
1620 if (is_used_desc_packed(vq,
1621 vq->last_used_idx,
1622 vq->packed.used_wrap_counter)) {
1623 END_USE(vq);
1624 return false;
1625 }
1626
1627 END_USE(vq);
1628 return true;
1629 }
1630
virtqueue_detach_unused_buf_packed(struct virtqueue * _vq)1631 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1632 {
1633 struct vring_virtqueue *vq = to_vvq(_vq);
1634 unsigned int i;
1635 void *buf;
1636
1637 START_USE(vq);
1638
1639 for (i = 0; i < vq->packed.vring.num; i++) {
1640 if (!vq->packed.desc_state[i].data)
1641 continue;
1642 /* detach_buf clears data, so grab it now. */
1643 buf = vq->packed.desc_state[i].data;
1644 detach_buf_packed(vq, i, NULL);
1645 END_USE(vq);
1646 return buf;
1647 }
1648 /* That should have freed everything. */
1649 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1650
1651 END_USE(vq);
1652 return NULL;
1653 }
1654
vring_alloc_desc_extra(struct vring_virtqueue * vq,unsigned int num)1655 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1656 unsigned int num)
1657 {
1658 struct vring_desc_extra *desc_extra;
1659 unsigned int i;
1660
1661 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1662 GFP_KERNEL);
1663 if (!desc_extra)
1664 return NULL;
1665
1666 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1667
1668 for (i = 0; i < num - 1; i++)
1669 desc_extra[i].next = i + 1;
1670
1671 return desc_extra;
1672 }
1673
vring_create_virtqueue_packed(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)1674 static struct virtqueue *vring_create_virtqueue_packed(
1675 unsigned int index,
1676 unsigned int num,
1677 unsigned int vring_align,
1678 struct virtio_device *vdev,
1679 bool weak_barriers,
1680 bool may_reduce_num,
1681 bool context,
1682 bool (*notify)(struct virtqueue *),
1683 void (*callback)(struct virtqueue *),
1684 const char *name)
1685 {
1686 struct vring_virtqueue *vq;
1687 struct vring_packed_desc *ring;
1688 struct vring_packed_desc_event *driver, *device;
1689 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1690 size_t ring_size_in_bytes, event_size_in_bytes;
1691
1692 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1693
1694 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1695 &ring_dma_addr,
1696 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1697 if (!ring)
1698 goto err_ring;
1699
1700 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1701
1702 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1703 &driver_event_dma_addr,
1704 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1705 if (!driver)
1706 goto err_driver;
1707
1708 device = vring_alloc_queue(vdev, event_size_in_bytes,
1709 &device_event_dma_addr,
1710 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1711 if (!device)
1712 goto err_device;
1713
1714 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1715 if (!vq)
1716 goto err_vq;
1717
1718 vq->vq.callback = callback;
1719 vq->vq.vdev = vdev;
1720 vq->vq.name = name;
1721 vq->vq.num_free = num;
1722 vq->vq.index = index;
1723 vq->we_own_ring = true;
1724 vq->notify = notify;
1725 vq->weak_barriers = weak_barriers;
1726 vq->broken = false;
1727 vq->last_used_idx = 0;
1728 vq->event_triggered = false;
1729 vq->num_added = 0;
1730 vq->packed_ring = true;
1731 vq->use_dma_api = vring_use_dma_api(vdev);
1732 #ifdef DEBUG
1733 vq->in_use = false;
1734 vq->last_add_time_valid = false;
1735 #endif
1736
1737 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1738 !context;
1739 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1740
1741 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1742 vq->weak_barriers = false;
1743
1744 vq->packed.ring_dma_addr = ring_dma_addr;
1745 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1746 vq->packed.device_event_dma_addr = device_event_dma_addr;
1747
1748 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1749 vq->packed.event_size_in_bytes = event_size_in_bytes;
1750
1751 vq->packed.vring.num = num;
1752 vq->packed.vring.desc = ring;
1753 vq->packed.vring.driver = driver;
1754 vq->packed.vring.device = device;
1755
1756 vq->packed.next_avail_idx = 0;
1757 vq->packed.avail_wrap_counter = 1;
1758 vq->packed.used_wrap_counter = 1;
1759 vq->packed.event_flags_shadow = 0;
1760 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1761
1762 vq->packed.desc_state = kmalloc_array(num,
1763 sizeof(struct vring_desc_state_packed),
1764 GFP_KERNEL);
1765 if (!vq->packed.desc_state)
1766 goto err_desc_state;
1767
1768 memset(vq->packed.desc_state, 0,
1769 num * sizeof(struct vring_desc_state_packed));
1770
1771 /* Put everything in free lists. */
1772 vq->free_head = 0;
1773
1774 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1775 if (!vq->packed.desc_extra)
1776 goto err_desc_extra;
1777
1778 /* No callback? Tell other side not to bother us. */
1779 if (!callback) {
1780 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1781 vq->packed.vring.driver->flags =
1782 cpu_to_le16(vq->packed.event_flags_shadow);
1783 }
1784
1785 spin_lock(&vdev->vqs_list_lock);
1786 list_add_tail(&vq->vq.list, &vdev->vqs);
1787 spin_unlock(&vdev->vqs_list_lock);
1788 return &vq->vq;
1789
1790 err_desc_extra:
1791 kfree(vq->packed.desc_state);
1792 err_desc_state:
1793 kfree(vq);
1794 err_vq:
1795 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1796 err_device:
1797 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1798 err_driver:
1799 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1800 err_ring:
1801 return NULL;
1802 }
1803
1804
1805 /*
1806 * Generic functions and exported symbols.
1807 */
1808
virtqueue_add(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)1809 static inline int virtqueue_add(struct virtqueue *_vq,
1810 struct scatterlist *sgs[],
1811 unsigned int total_sg,
1812 unsigned int out_sgs,
1813 unsigned int in_sgs,
1814 void *data,
1815 void *ctx,
1816 gfp_t gfp)
1817 {
1818 struct vring_virtqueue *vq = to_vvq(_vq);
1819
1820 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1821 out_sgs, in_sgs, data, ctx, gfp) :
1822 virtqueue_add_split(_vq, sgs, total_sg,
1823 out_sgs, in_sgs, data, ctx, gfp);
1824 }
1825
1826 /**
1827 * virtqueue_add_sgs - expose buffers to other end
1828 * @_vq: the struct virtqueue we're talking about.
1829 * @sgs: array of terminated scatterlists.
1830 * @out_sgs: the number of scatterlists readable by other side
1831 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1832 * @data: the token identifying the buffer.
1833 * @gfp: how to do memory allocations (if necessary).
1834 *
1835 * Caller must ensure we don't call this with other virtqueue operations
1836 * at the same time (except where noted).
1837 *
1838 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1839 */
virtqueue_add_sgs(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)1840 int virtqueue_add_sgs(struct virtqueue *_vq,
1841 struct scatterlist *sgs[],
1842 unsigned int out_sgs,
1843 unsigned int in_sgs,
1844 void *data,
1845 gfp_t gfp)
1846 {
1847 unsigned int i, total_sg = 0;
1848
1849 /* Count them first. */
1850 for (i = 0; i < out_sgs + in_sgs; i++) {
1851 struct scatterlist *sg;
1852
1853 for (sg = sgs[i]; sg; sg = sg_next(sg))
1854 total_sg++;
1855 }
1856 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1857 data, NULL, gfp);
1858 }
1859 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1860
1861 /**
1862 * virtqueue_add_outbuf - expose output buffers to other end
1863 * @vq: the struct virtqueue we're talking about.
1864 * @sg: scatterlist (must be well-formed and terminated!)
1865 * @num: the number of entries in @sg readable by other side
1866 * @data: the token identifying the buffer.
1867 * @gfp: how to do memory allocations (if necessary).
1868 *
1869 * Caller must ensure we don't call this with other virtqueue operations
1870 * at the same time (except where noted).
1871 *
1872 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1873 */
virtqueue_add_outbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)1874 int virtqueue_add_outbuf(struct virtqueue *vq,
1875 struct scatterlist *sg, unsigned int num,
1876 void *data,
1877 gfp_t gfp)
1878 {
1879 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1880 }
1881 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1882
1883 /**
1884 * virtqueue_add_inbuf - expose input buffers to other end
1885 * @vq: the struct virtqueue we're talking about.
1886 * @sg: scatterlist (must be well-formed and terminated!)
1887 * @num: the number of entries in @sg writable by other side
1888 * @data: the token identifying the buffer.
1889 * @gfp: how to do memory allocations (if necessary).
1890 *
1891 * Caller must ensure we don't call this with other virtqueue operations
1892 * at the same time (except where noted).
1893 *
1894 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1895 */
virtqueue_add_inbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)1896 int virtqueue_add_inbuf(struct virtqueue *vq,
1897 struct scatterlist *sg, unsigned int num,
1898 void *data,
1899 gfp_t gfp)
1900 {
1901 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1902 }
1903 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1904
1905 /**
1906 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1907 * @vq: the struct virtqueue we're talking about.
1908 * @sg: scatterlist (must be well-formed and terminated!)
1909 * @num: the number of entries in @sg writable by other side
1910 * @data: the token identifying the buffer.
1911 * @ctx: extra context for the token
1912 * @gfp: how to do memory allocations (if necessary).
1913 *
1914 * Caller must ensure we don't call this with other virtqueue operations
1915 * at the same time (except where noted).
1916 *
1917 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1918 */
virtqueue_add_inbuf_ctx(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,void * ctx,gfp_t gfp)1919 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1920 struct scatterlist *sg, unsigned int num,
1921 void *data,
1922 void *ctx,
1923 gfp_t gfp)
1924 {
1925 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1926 }
1927 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1928
1929 /**
1930 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1931 * @_vq: the struct virtqueue
1932 *
1933 * Instead of virtqueue_kick(), you can do:
1934 * if (virtqueue_kick_prepare(vq))
1935 * virtqueue_notify(vq);
1936 *
1937 * This is sometimes useful because the virtqueue_kick_prepare() needs
1938 * to be serialized, but the actual virtqueue_notify() call does not.
1939 */
virtqueue_kick_prepare(struct virtqueue * _vq)1940 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1941 {
1942 struct vring_virtqueue *vq = to_vvq(_vq);
1943
1944 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1945 virtqueue_kick_prepare_split(_vq);
1946 }
1947 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1948
1949 /**
1950 * virtqueue_notify - second half of split virtqueue_kick call.
1951 * @_vq: the struct virtqueue
1952 *
1953 * This does not need to be serialized.
1954 *
1955 * Returns false if host notify failed or queue is broken, otherwise true.
1956 */
virtqueue_notify(struct virtqueue * _vq)1957 bool virtqueue_notify(struct virtqueue *_vq)
1958 {
1959 struct vring_virtqueue *vq = to_vvq(_vq);
1960
1961 if (unlikely(vq->broken))
1962 return false;
1963
1964 /* Prod other side to tell it about changes. */
1965 if (!vq->notify(_vq)) {
1966 vq->broken = true;
1967 return false;
1968 }
1969 return true;
1970 }
1971 EXPORT_SYMBOL_GPL(virtqueue_notify);
1972
1973 /**
1974 * virtqueue_kick - update after add_buf
1975 * @vq: the struct virtqueue
1976 *
1977 * After one or more virtqueue_add_* calls, invoke this to kick
1978 * the other side.
1979 *
1980 * Caller must ensure we don't call this with other virtqueue
1981 * operations at the same time (except where noted).
1982 *
1983 * Returns false if kick failed, otherwise true.
1984 */
virtqueue_kick(struct virtqueue * vq)1985 bool virtqueue_kick(struct virtqueue *vq)
1986 {
1987 if (virtqueue_kick_prepare(vq))
1988 return virtqueue_notify(vq);
1989 return true;
1990 }
1991 EXPORT_SYMBOL_GPL(virtqueue_kick);
1992
1993 /**
1994 * virtqueue_get_buf_ctx - get the next used buffer
1995 * @_vq: the struct virtqueue we're talking about.
1996 * @len: the length written into the buffer
1997 * @ctx: extra context for the token
1998 *
1999 * If the device wrote data into the buffer, @len will be set to the
2000 * amount written. This means you don't need to clear the buffer
2001 * beforehand to ensure there's no data leakage in the case of short
2002 * writes.
2003 *
2004 * Caller must ensure we don't call this with other virtqueue
2005 * operations at the same time (except where noted).
2006 *
2007 * Returns NULL if there are no used buffers, or the "data" token
2008 * handed to virtqueue_add_*().
2009 */
virtqueue_get_buf_ctx(struct virtqueue * _vq,unsigned int * len,void ** ctx)2010 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2011 void **ctx)
2012 {
2013 struct vring_virtqueue *vq = to_vvq(_vq);
2014
2015 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2016 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2017 }
2018 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2019
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)2020 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2021 {
2022 return virtqueue_get_buf_ctx(_vq, len, NULL);
2023 }
2024 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2025 /**
2026 * virtqueue_disable_cb - disable callbacks
2027 * @_vq: the struct virtqueue we're talking about.
2028 *
2029 * Note that this is not necessarily synchronous, hence unreliable and only
2030 * useful as an optimization.
2031 *
2032 * Unlike other operations, this need not be serialized.
2033 */
virtqueue_disable_cb(struct virtqueue * _vq)2034 void virtqueue_disable_cb(struct virtqueue *_vq)
2035 {
2036 struct vring_virtqueue *vq = to_vvq(_vq);
2037
2038 if (vq->packed_ring)
2039 virtqueue_disable_cb_packed(_vq);
2040 else
2041 virtqueue_disable_cb_split(_vq);
2042 }
2043 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2044
2045 /**
2046 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2047 * @_vq: the struct virtqueue we're talking about.
2048 *
2049 * This re-enables callbacks; it returns current queue state
2050 * in an opaque unsigned value. This value should be later tested by
2051 * virtqueue_poll, to detect a possible race between the driver checking for
2052 * more work, and enabling callbacks.
2053 *
2054 * Caller must ensure we don't call this with other virtqueue
2055 * operations at the same time (except where noted).
2056 */
virtqueue_enable_cb_prepare(struct virtqueue * _vq)2057 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2058 {
2059 struct vring_virtqueue *vq = to_vvq(_vq);
2060
2061 if (vq->event_triggered)
2062 vq->event_triggered = false;
2063
2064 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2065 virtqueue_enable_cb_prepare_split(_vq);
2066 }
2067 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2068
2069 /**
2070 * virtqueue_poll - query pending used buffers
2071 * @_vq: the struct virtqueue we're talking about.
2072 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2073 *
2074 * Returns "true" if there are pending used buffers in the queue.
2075 *
2076 * This does not need to be serialized.
2077 */
virtqueue_poll(struct virtqueue * _vq,unsigned last_used_idx)2078 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2079 {
2080 struct vring_virtqueue *vq = to_vvq(_vq);
2081
2082 if (unlikely(vq->broken))
2083 return false;
2084
2085 virtio_mb(vq->weak_barriers);
2086 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2087 virtqueue_poll_split(_vq, last_used_idx);
2088 }
2089 EXPORT_SYMBOL_GPL(virtqueue_poll);
2090
2091 /**
2092 * virtqueue_enable_cb - restart callbacks after disable_cb.
2093 * @_vq: the struct virtqueue we're talking about.
2094 *
2095 * This re-enables callbacks; it returns "false" if there are pending
2096 * buffers in the queue, to detect a possible race between the driver
2097 * checking for more work, and enabling callbacks.
2098 *
2099 * Caller must ensure we don't call this with other virtqueue
2100 * operations at the same time (except where noted).
2101 */
virtqueue_enable_cb(struct virtqueue * _vq)2102 bool virtqueue_enable_cb(struct virtqueue *_vq)
2103 {
2104 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2105
2106 return !virtqueue_poll(_vq, last_used_idx);
2107 }
2108 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2109
2110 /**
2111 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2112 * @_vq: the struct virtqueue we're talking about.
2113 *
2114 * This re-enables callbacks but hints to the other side to delay
2115 * interrupts until most of the available buffers have been processed;
2116 * it returns "false" if there are many pending buffers in the queue,
2117 * to detect a possible race between the driver checking for more work,
2118 * and enabling callbacks.
2119 *
2120 * Caller must ensure we don't call this with other virtqueue
2121 * operations at the same time (except where noted).
2122 */
virtqueue_enable_cb_delayed(struct virtqueue * _vq)2123 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2124 {
2125 struct vring_virtqueue *vq = to_vvq(_vq);
2126
2127 if (vq->event_triggered)
2128 vq->event_triggered = false;
2129
2130 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2131 virtqueue_enable_cb_delayed_split(_vq);
2132 }
2133 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2134
2135 /**
2136 * virtqueue_detach_unused_buf - detach first unused buffer
2137 * @_vq: the struct virtqueue we're talking about.
2138 *
2139 * Returns NULL or the "data" token handed to virtqueue_add_*().
2140 * This is not valid on an active queue; it is useful only for device
2141 * shutdown.
2142 */
virtqueue_detach_unused_buf(struct virtqueue * _vq)2143 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2144 {
2145 struct vring_virtqueue *vq = to_vvq(_vq);
2146
2147 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2148 virtqueue_detach_unused_buf_split(_vq);
2149 }
2150 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2151
more_used(const struct vring_virtqueue * vq)2152 static inline bool more_used(const struct vring_virtqueue *vq)
2153 {
2154 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2155 }
2156
vring_interrupt(int irq,void * _vq)2157 irqreturn_t vring_interrupt(int irq, void *_vq)
2158 {
2159 struct vring_virtqueue *vq = to_vvq(_vq);
2160
2161 if (!more_used(vq)) {
2162 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2163 return IRQ_NONE;
2164 }
2165
2166 if (unlikely(vq->broken))
2167 return IRQ_HANDLED;
2168
2169 /* Just a hint for performance: so it's ok that this can be racy! */
2170 if (vq->event)
2171 vq->event_triggered = true;
2172
2173 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2174 if (vq->vq.callback)
2175 vq->vq.callback(&vq->vq);
2176
2177 return IRQ_HANDLED;
2178 }
2179 EXPORT_SYMBOL_GPL(vring_interrupt);
2180
2181 /* Only available for split ring */
__vring_new_virtqueue(unsigned int index,struct vring vring,struct virtio_device * vdev,bool weak_barriers,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)2182 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2183 struct vring vring,
2184 struct virtio_device *vdev,
2185 bool weak_barriers,
2186 bool context,
2187 bool (*notify)(struct virtqueue *),
2188 void (*callback)(struct virtqueue *),
2189 const char *name)
2190 {
2191 struct vring_virtqueue *vq;
2192
2193 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2194 return NULL;
2195
2196 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2197 if (!vq)
2198 return NULL;
2199
2200 vq->packed_ring = false;
2201 vq->vq.callback = callback;
2202 vq->vq.vdev = vdev;
2203 vq->vq.name = name;
2204 vq->vq.num_free = vring.num;
2205 vq->vq.index = index;
2206 vq->we_own_ring = false;
2207 vq->notify = notify;
2208 vq->weak_barriers = weak_barriers;
2209 vq->broken = false;
2210 vq->last_used_idx = 0;
2211 vq->event_triggered = false;
2212 vq->num_added = 0;
2213 vq->use_dma_api = vring_use_dma_api(vdev);
2214 #ifdef DEBUG
2215 vq->in_use = false;
2216 vq->last_add_time_valid = false;
2217 #endif
2218
2219 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2220 !context;
2221 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2222
2223 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2224 vq->weak_barriers = false;
2225
2226 vq->split.queue_dma_addr = 0;
2227 vq->split.queue_size_in_bytes = 0;
2228
2229 vq->split.vring = vring;
2230 vq->split.avail_flags_shadow = 0;
2231 vq->split.avail_idx_shadow = 0;
2232
2233 /* No callback? Tell other side not to bother us. */
2234 if (!callback) {
2235 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2236 if (!vq->event)
2237 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2238 vq->split.avail_flags_shadow);
2239 }
2240
2241 vq->split.desc_state = kmalloc_array(vring.num,
2242 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2243 if (!vq->split.desc_state)
2244 goto err_state;
2245
2246 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
2247 if (!vq->split.desc_extra)
2248 goto err_extra;
2249
2250 /* Put everything in free lists. */
2251 vq->free_head = 0;
2252 memset(vq->split.desc_state, 0, vring.num *
2253 sizeof(struct vring_desc_state_split));
2254
2255 spin_lock(&vdev->vqs_list_lock);
2256 list_add_tail(&vq->vq.list, &vdev->vqs);
2257 spin_unlock(&vdev->vqs_list_lock);
2258 return &vq->vq;
2259
2260 err_extra:
2261 kfree(vq->split.desc_state);
2262 err_state:
2263 kfree(vq);
2264 return NULL;
2265 }
2266 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2267
vring_create_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)2268 struct virtqueue *vring_create_virtqueue(
2269 unsigned int index,
2270 unsigned int num,
2271 unsigned int vring_align,
2272 struct virtio_device *vdev,
2273 bool weak_barriers,
2274 bool may_reduce_num,
2275 bool context,
2276 bool (*notify)(struct virtqueue *),
2277 void (*callback)(struct virtqueue *),
2278 const char *name)
2279 {
2280
2281 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2282 return vring_create_virtqueue_packed(index, num, vring_align,
2283 vdev, weak_barriers, may_reduce_num,
2284 context, notify, callback, name);
2285
2286 return vring_create_virtqueue_split(index, num, vring_align,
2287 vdev, weak_barriers, may_reduce_num,
2288 context, notify, callback, name);
2289 }
2290 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2291
2292 /* Only available for split ring */
vring_new_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool context,void * pages,bool (* notify)(struct virtqueue * vq),void (* callback)(struct virtqueue * vq),const char * name)2293 struct virtqueue *vring_new_virtqueue(unsigned int index,
2294 unsigned int num,
2295 unsigned int vring_align,
2296 struct virtio_device *vdev,
2297 bool weak_barriers,
2298 bool context,
2299 void *pages,
2300 bool (*notify)(struct virtqueue *vq),
2301 void (*callback)(struct virtqueue *vq),
2302 const char *name)
2303 {
2304 struct vring vring;
2305
2306 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2307 return NULL;
2308
2309 vring_init(&vring, num, pages, vring_align);
2310 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2311 notify, callback, name);
2312 }
2313 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2314
vring_del_virtqueue(struct virtqueue * _vq)2315 void vring_del_virtqueue(struct virtqueue *_vq)
2316 {
2317 struct vring_virtqueue *vq = to_vvq(_vq);
2318
2319 spin_lock(&vq->vq.vdev->vqs_list_lock);
2320 list_del(&_vq->list);
2321 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2322
2323 if (vq->we_own_ring) {
2324 if (vq->packed_ring) {
2325 vring_free_queue(vq->vq.vdev,
2326 vq->packed.ring_size_in_bytes,
2327 vq->packed.vring.desc,
2328 vq->packed.ring_dma_addr);
2329
2330 vring_free_queue(vq->vq.vdev,
2331 vq->packed.event_size_in_bytes,
2332 vq->packed.vring.driver,
2333 vq->packed.driver_event_dma_addr);
2334
2335 vring_free_queue(vq->vq.vdev,
2336 vq->packed.event_size_in_bytes,
2337 vq->packed.vring.device,
2338 vq->packed.device_event_dma_addr);
2339
2340 kfree(vq->packed.desc_state);
2341 kfree(vq->packed.desc_extra);
2342 } else {
2343 vring_free_queue(vq->vq.vdev,
2344 vq->split.queue_size_in_bytes,
2345 vq->split.vring.desc,
2346 vq->split.queue_dma_addr);
2347 }
2348 }
2349 if (!vq->packed_ring) {
2350 kfree(vq->split.desc_state);
2351 kfree(vq->split.desc_extra);
2352 }
2353 kfree(vq);
2354 }
2355 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2356
2357 /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)2358 void vring_transport_features(struct virtio_device *vdev)
2359 {
2360 unsigned int i;
2361
2362 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2363 switch (i) {
2364 case VIRTIO_RING_F_INDIRECT_DESC:
2365 break;
2366 case VIRTIO_RING_F_EVENT_IDX:
2367 break;
2368 case VIRTIO_F_VERSION_1:
2369 break;
2370 case VIRTIO_F_ACCESS_PLATFORM:
2371 break;
2372 case VIRTIO_F_RING_PACKED:
2373 break;
2374 case VIRTIO_F_ORDER_PLATFORM:
2375 break;
2376 default:
2377 /* We don't understand this bit. */
2378 __virtio_clear_bit(vdev, i);
2379 }
2380 }
2381 }
2382 EXPORT_SYMBOL_GPL(vring_transport_features);
2383
2384 /**
2385 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2386 * @_vq: the struct virtqueue containing the vring of interest.
2387 *
2388 * Returns the size of the vring. This is mainly used for boasting to
2389 * userspace. Unlike other operations, this need not be serialized.
2390 */
virtqueue_get_vring_size(struct virtqueue * _vq)2391 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2392 {
2393
2394 struct vring_virtqueue *vq = to_vvq(_vq);
2395
2396 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2397 }
2398 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2399
virtqueue_is_broken(struct virtqueue * _vq)2400 bool virtqueue_is_broken(struct virtqueue *_vq)
2401 {
2402 struct vring_virtqueue *vq = to_vvq(_vq);
2403
2404 return READ_ONCE(vq->broken);
2405 }
2406 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2407
2408 /*
2409 * This should prevent the device from being used, allowing drivers to
2410 * recover. You may need to grab appropriate locks to flush.
2411 */
virtio_break_device(struct virtio_device * dev)2412 void virtio_break_device(struct virtio_device *dev)
2413 {
2414 struct virtqueue *_vq;
2415
2416 spin_lock(&dev->vqs_list_lock);
2417 list_for_each_entry(_vq, &dev->vqs, list) {
2418 struct vring_virtqueue *vq = to_vvq(_vq);
2419
2420 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2421 WRITE_ONCE(vq->broken, true);
2422 }
2423 spin_unlock(&dev->vqs_list_lock);
2424 }
2425 EXPORT_SYMBOL_GPL(virtio_break_device);
2426
virtqueue_get_desc_addr(struct virtqueue * _vq)2427 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2428 {
2429 struct vring_virtqueue *vq = to_vvq(_vq);
2430
2431 BUG_ON(!vq->we_own_ring);
2432
2433 if (vq->packed_ring)
2434 return vq->packed.ring_dma_addr;
2435
2436 return vq->split.queue_dma_addr;
2437 }
2438 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2439
virtqueue_get_avail_addr(struct virtqueue * _vq)2440 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2441 {
2442 struct vring_virtqueue *vq = to_vvq(_vq);
2443
2444 BUG_ON(!vq->we_own_ring);
2445
2446 if (vq->packed_ring)
2447 return vq->packed.driver_event_dma_addr;
2448
2449 return vq->split.queue_dma_addr +
2450 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2451 }
2452 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2453
virtqueue_get_used_addr(struct virtqueue * _vq)2454 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2455 {
2456 struct vring_virtqueue *vq = to_vvq(_vq);
2457
2458 BUG_ON(!vq->we_own_ring);
2459
2460 if (vq->packed_ring)
2461 return vq->packed.device_event_dma_addr;
2462
2463 return vq->split.queue_dma_addr +
2464 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2465 }
2466 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2467
2468 /* Only available for split ring */
virtqueue_get_vring(struct virtqueue * vq)2469 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2470 {
2471 return &to_vvq(vq)->split.vring;
2472 }
2473 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2474
2475 /*
2476 * Prevents use of DMA API for buffers passed via the specified virtqueue.
2477 * DMA API may still be used for the vrings themselves.
2478 */
virtqueue_disable_dma_api_for_buffers(struct virtqueue * vq)2479 void virtqueue_disable_dma_api_for_buffers(struct virtqueue *vq)
2480 {
2481 to_vvq(vq)->use_dma_api = false;
2482 }
2483 EXPORT_SYMBOL_GPL(virtqueue_disable_dma_api_for_buffers);
2484
2485 MODULE_LICENSE("GPL");
2486