• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Copyright (C) 2006 Rusty Russell IBM Corporation
4  *
5  * Author: Michael S. Tsirkin <mst@redhat.com>
6  *
7  * Inspiration, some code, and most witty comments come from
8  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9  *
10  * Generic code for virtio server in host kernel.
11  */
12 
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
33 
34 #include "vhost.h"
35 
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39 	"Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43 	"Maximum number of iotlb entries. (default: 2048)");
44 
45 enum {
46 	VHOST_MEMORY_F_LOG = 0x1,
47 };
48 
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51 
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_disable_cross_endian(struct vhost_virtqueue * vq)53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
54 {
55 	vq->user_be = !virtio_legacy_is_little_endian();
56 }
57 
vhost_enable_cross_endian_big(struct vhost_virtqueue * vq)58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59 {
60 	vq->user_be = true;
61 }
62 
vhost_enable_cross_endian_little(struct vhost_virtqueue * vq)63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64 {
65 	vq->user_be = false;
66 }
67 
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69 {
70 	struct vhost_vring_state s;
71 
72 	if (vq->private_data)
73 		return -EBUSY;
74 
75 	if (copy_from_user(&s, argp, sizeof(s)))
76 		return -EFAULT;
77 
78 	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79 	    s.num != VHOST_VRING_BIG_ENDIAN)
80 		return -EINVAL;
81 
82 	if (s.num == VHOST_VRING_BIG_ENDIAN)
83 		vhost_enable_cross_endian_big(vq);
84 	else
85 		vhost_enable_cross_endian_little(vq);
86 
87 	return 0;
88 }
89 
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91 				   int __user *argp)
92 {
93 	struct vhost_vring_state s = {
94 		.index = idx,
95 		.num = vq->user_be
96 	};
97 
98 	if (copy_to_user(argp, &s, sizeof(s)))
99 		return -EFAULT;
100 
101 	return 0;
102 }
103 
vhost_init_is_le(struct vhost_virtqueue * vq)104 static void vhost_init_is_le(struct vhost_virtqueue *vq)
105 {
106 	/* Note for legacy virtio: user_be is initialized at reset time
107 	 * according to the host endianness. If userspace does not set an
108 	 * explicit endianness, the default behavior is native endian, as
109 	 * expected by legacy virtio.
110 	 */
111 	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112 }
113 #else
vhost_disable_cross_endian(struct vhost_virtqueue * vq)114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
115 {
116 }
117 
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119 {
120 	return -ENOIOCTLCMD;
121 }
122 
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124 				   int __user *argp)
125 {
126 	return -ENOIOCTLCMD;
127 }
128 
vhost_init_is_le(struct vhost_virtqueue * vq)129 static void vhost_init_is_le(struct vhost_virtqueue *vq)
130 {
131 	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132 		|| virtio_legacy_is_little_endian();
133 }
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135 
vhost_reset_is_le(struct vhost_virtqueue * vq)136 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137 {
138 	vhost_init_is_le(vq);
139 }
140 
141 struct vhost_flush_struct {
142 	struct vhost_work work;
143 	struct completion wait_event;
144 };
145 
vhost_flush_work(struct vhost_work * work)146 static void vhost_flush_work(struct vhost_work *work)
147 {
148 	struct vhost_flush_struct *s;
149 
150 	s = container_of(work, struct vhost_flush_struct, work);
151 	complete(&s->wait_event);
152 }
153 
vhost_poll_func(struct file * file,wait_queue_head_t * wqh,poll_table * pt)154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155 			    poll_table *pt)
156 {
157 	struct vhost_poll *poll;
158 
159 	poll = container_of(pt, struct vhost_poll, table);
160 	poll->wqh = wqh;
161 	add_wait_queue(wqh, &poll->wait);
162 }
163 
vhost_poll_wakeup(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)164 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
165 			     void *key)
166 {
167 	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
168 	struct vhost_work *work = &poll->work;
169 
170 	if (!(key_to_poll(key) & poll->mask))
171 		return 0;
172 
173 	if (!poll->dev->use_worker)
174 		work->fn(work);
175 	else
176 		vhost_poll_queue(poll);
177 
178 	return 0;
179 }
180 
vhost_work_init(struct vhost_work * work,vhost_work_fn_t fn)181 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
182 {
183 	clear_bit(VHOST_WORK_QUEUED, &work->flags);
184 	work->fn = fn;
185 }
186 EXPORT_SYMBOL_GPL(vhost_work_init);
187 
188 /* Init poll structure */
vhost_poll_init(struct vhost_poll * poll,vhost_work_fn_t fn,__poll_t mask,struct vhost_dev * dev)189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
190 		     __poll_t mask, struct vhost_dev *dev)
191 {
192 	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193 	init_poll_funcptr(&poll->table, vhost_poll_func);
194 	poll->mask = mask;
195 	poll->dev = dev;
196 	poll->wqh = NULL;
197 
198 	vhost_work_init(&poll->work, fn);
199 }
200 EXPORT_SYMBOL_GPL(vhost_poll_init);
201 
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203  * keep a reference to a file until after vhost_poll_stop is called. */
vhost_poll_start(struct vhost_poll * poll,struct file * file)204 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
205 {
206 	__poll_t mask;
207 
208 	if (poll->wqh)
209 		return 0;
210 
211 	mask = vfs_poll(file, &poll->table);
212 	if (mask)
213 		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214 	if (mask & EPOLLERR) {
215 		vhost_poll_stop(poll);
216 		return -EINVAL;
217 	}
218 
219 	return 0;
220 }
221 EXPORT_SYMBOL_GPL(vhost_poll_start);
222 
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224  * file reference. You must also flush afterwards. */
vhost_poll_stop(struct vhost_poll * poll)225 void vhost_poll_stop(struct vhost_poll *poll)
226 {
227 	if (poll->wqh) {
228 		remove_wait_queue(poll->wqh, &poll->wait);
229 		poll->wqh = NULL;
230 	}
231 }
232 EXPORT_SYMBOL_GPL(vhost_poll_stop);
233 
vhost_work_dev_flush(struct vhost_dev * dev)234 void vhost_work_dev_flush(struct vhost_dev *dev)
235 {
236 	struct vhost_flush_struct flush;
237 
238 	if (dev->worker) {
239 		init_completion(&flush.wait_event);
240 		vhost_work_init(&flush.work, vhost_flush_work);
241 
242 		vhost_work_queue(dev, &flush.work);
243 		wait_for_completion(&flush.wait_event);
244 	}
245 }
246 EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
247 
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249  * locks that are also used by the callback. */
vhost_poll_flush(struct vhost_poll * poll)250 void vhost_poll_flush(struct vhost_poll *poll)
251 {
252 	vhost_work_dev_flush(poll->dev);
253 }
254 EXPORT_SYMBOL_GPL(vhost_poll_flush);
255 
vhost_work_queue(struct vhost_dev * dev,struct vhost_work * work)256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
257 {
258 	if (!dev->worker)
259 		return;
260 
261 	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262 		/* We can only add the work to the list after we're
263 		 * sure it was not in the list.
264 		 * test_and_set_bit() implies a memory barrier.
265 		 */
266 		llist_add(&work->node, &dev->work_list);
267 		wake_up_process(dev->worker);
268 	}
269 }
270 EXPORT_SYMBOL_GPL(vhost_work_queue);
271 
272 /* A lockless hint for busy polling code to exit the loop */
vhost_has_work(struct vhost_dev * dev)273 bool vhost_has_work(struct vhost_dev *dev)
274 {
275 	return !llist_empty(&dev->work_list);
276 }
277 EXPORT_SYMBOL_GPL(vhost_has_work);
278 
vhost_poll_queue(struct vhost_poll * poll)279 void vhost_poll_queue(struct vhost_poll *poll)
280 {
281 	vhost_work_queue(poll->dev, &poll->work);
282 }
283 EXPORT_SYMBOL_GPL(vhost_poll_queue);
284 
__vhost_vq_meta_reset(struct vhost_virtqueue * vq)285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286 {
287 	int j;
288 
289 	for (j = 0; j < VHOST_NUM_ADDRS; j++)
290 		vq->meta_iotlb[j] = NULL;
291 }
292 
vhost_vq_meta_reset(struct vhost_dev * d)293 static void vhost_vq_meta_reset(struct vhost_dev *d)
294 {
295 	int i;
296 
297 	for (i = 0; i < d->nvqs; ++i)
298 		__vhost_vq_meta_reset(d->vqs[i]);
299 }
300 
vhost_vring_call_reset(struct vhost_vring_call * call_ctx)301 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302 {
303 	call_ctx->ctx = NULL;
304 	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
305 }
306 
vhost_vq_is_setup(struct vhost_virtqueue * vq)307 bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
308 {
309 	return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
310 }
311 EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
312 
vhost_vq_reset(struct vhost_dev * dev,struct vhost_virtqueue * vq)313 static void vhost_vq_reset(struct vhost_dev *dev,
314 			   struct vhost_virtqueue *vq)
315 {
316 	vq->num = 1;
317 	vq->desc = NULL;
318 	vq->avail = NULL;
319 	vq->used = NULL;
320 	vq->last_avail_idx = 0;
321 	vq->avail_idx = 0;
322 	vq->last_used_idx = 0;
323 	vq->signalled_used = 0;
324 	vq->signalled_used_valid = false;
325 	vq->used_flags = 0;
326 	vq->log_used = false;
327 	vq->log_addr = -1ull;
328 	vq->private_data = NULL;
329 	vq->acked_features = 0;
330 	vq->acked_backend_features = 0;
331 	vq->log_base = NULL;
332 	vq->error_ctx = NULL;
333 	vq->kick = NULL;
334 	vq->log_ctx = NULL;
335 	vhost_disable_cross_endian(vq);
336 	vhost_reset_is_le(vq);
337 	vq->busyloop_timeout = 0;
338 	vq->umem = NULL;
339 	vq->iotlb = NULL;
340 	vhost_vring_call_reset(&vq->call_ctx);
341 	__vhost_vq_meta_reset(vq);
342 }
343 
vhost_worker(void * data)344 static int vhost_worker(void *data)
345 {
346 	struct vhost_dev *dev = data;
347 	struct vhost_work *work, *work_next;
348 	struct llist_node *node;
349 
350 	kthread_use_mm(dev->mm);
351 
352 	for (;;) {
353 		/* mb paired w/ kthread_stop */
354 		set_current_state(TASK_INTERRUPTIBLE);
355 
356 		if (kthread_should_stop()) {
357 			__set_current_state(TASK_RUNNING);
358 			break;
359 		}
360 
361 		node = llist_del_all(&dev->work_list);
362 		if (!node)
363 			schedule();
364 
365 		node = llist_reverse_order(node);
366 		/* make sure flag is seen after deletion */
367 		smp_wmb();
368 		llist_for_each_entry_safe(work, work_next, node, node) {
369 			clear_bit(VHOST_WORK_QUEUED, &work->flags);
370 			__set_current_state(TASK_RUNNING);
371 			kcov_remote_start_common(dev->kcov_handle);
372 			work->fn(work);
373 			kcov_remote_stop();
374 			if (need_resched())
375 				schedule();
376 		}
377 	}
378 	kthread_unuse_mm(dev->mm);
379 	return 0;
380 }
381 
vhost_vq_free_iovecs(struct vhost_virtqueue * vq)382 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
383 {
384 	kfree(vq->indirect);
385 	vq->indirect = NULL;
386 	kfree(vq->log);
387 	vq->log = NULL;
388 	kfree(vq->heads);
389 	vq->heads = NULL;
390 }
391 
392 /* Helper to allocate iovec buffers for all vqs. */
vhost_dev_alloc_iovecs(struct vhost_dev * dev)393 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
394 {
395 	struct vhost_virtqueue *vq;
396 	int i;
397 
398 	for (i = 0; i < dev->nvqs; ++i) {
399 		vq = dev->vqs[i];
400 		vq->indirect = kmalloc_array(UIO_MAXIOV,
401 					     sizeof(*vq->indirect),
402 					     GFP_KERNEL);
403 		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
404 					GFP_KERNEL);
405 		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
406 					  GFP_KERNEL);
407 		if (!vq->indirect || !vq->log || !vq->heads)
408 			goto err_nomem;
409 	}
410 	return 0;
411 
412 err_nomem:
413 	for (; i >= 0; --i)
414 		vhost_vq_free_iovecs(dev->vqs[i]);
415 	return -ENOMEM;
416 }
417 
vhost_dev_free_iovecs(struct vhost_dev * dev)418 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
419 {
420 	int i;
421 
422 	for (i = 0; i < dev->nvqs; ++i)
423 		vhost_vq_free_iovecs(dev->vqs[i]);
424 }
425 
vhost_exceeds_weight(struct vhost_virtqueue * vq,int pkts,int total_len)426 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
427 			  int pkts, int total_len)
428 {
429 	struct vhost_dev *dev = vq->dev;
430 
431 	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
432 	    pkts >= dev->weight) {
433 		vhost_poll_queue(&vq->poll);
434 		return true;
435 	}
436 
437 	return false;
438 }
439 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
440 
vhost_get_avail_size(struct vhost_virtqueue * vq,unsigned int num)441 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
442 				   unsigned int num)
443 {
444 	size_t event __maybe_unused =
445 	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
446 
447 	return sizeof(*vq->avail) +
448 	       sizeof(*vq->avail->ring) * num + event;
449 }
450 
vhost_get_used_size(struct vhost_virtqueue * vq,unsigned int num)451 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
452 				  unsigned int num)
453 {
454 	size_t event __maybe_unused =
455 	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
456 
457 	return sizeof(*vq->used) +
458 	       sizeof(*vq->used->ring) * num + event;
459 }
460 
vhost_get_desc_size(struct vhost_virtqueue * vq,unsigned int num)461 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
462 				  unsigned int num)
463 {
464 	return sizeof(*vq->desc) * num;
465 }
466 
vhost_dev_init(struct vhost_dev * dev,struct vhost_virtqueue ** vqs,int nvqs,int iov_limit,int weight,int byte_weight,bool use_worker,int (* msg_handler)(struct vhost_dev * dev,struct vhost_iotlb_msg * msg))467 void vhost_dev_init(struct vhost_dev *dev,
468 		    struct vhost_virtqueue **vqs, int nvqs,
469 		    int iov_limit, int weight, int byte_weight,
470 		    bool use_worker,
471 		    int (*msg_handler)(struct vhost_dev *dev,
472 				       struct vhost_iotlb_msg *msg))
473 {
474 	struct vhost_virtqueue *vq;
475 	int i;
476 
477 	dev->vqs = vqs;
478 	dev->nvqs = nvqs;
479 	mutex_init(&dev->mutex);
480 	dev->log_ctx = NULL;
481 	dev->umem = NULL;
482 	dev->iotlb = NULL;
483 	dev->mm = NULL;
484 	dev->worker = NULL;
485 	dev->iov_limit = iov_limit;
486 	dev->weight = weight;
487 	dev->byte_weight = byte_weight;
488 	dev->use_worker = use_worker;
489 	dev->msg_handler = msg_handler;
490 	init_llist_head(&dev->work_list);
491 	init_waitqueue_head(&dev->wait);
492 	INIT_LIST_HEAD(&dev->read_list);
493 	INIT_LIST_HEAD(&dev->pending_list);
494 	spin_lock_init(&dev->iotlb_lock);
495 
496 
497 	for (i = 0; i < dev->nvqs; ++i) {
498 		vq = dev->vqs[i];
499 		vq->log = NULL;
500 		vq->indirect = NULL;
501 		vq->heads = NULL;
502 		vq->dev = dev;
503 		mutex_init(&vq->mutex);
504 		vhost_vq_reset(dev, vq);
505 		if (vq->handle_kick)
506 			vhost_poll_init(&vq->poll, vq->handle_kick,
507 					EPOLLIN, dev);
508 	}
509 }
510 EXPORT_SYMBOL_GPL(vhost_dev_init);
511 
512 /* Caller should have device mutex */
vhost_dev_check_owner(struct vhost_dev * dev)513 long vhost_dev_check_owner(struct vhost_dev *dev)
514 {
515 	/* Are you the owner? If not, I don't think you mean to do that */
516 	return dev->mm == current->mm ? 0 : -EPERM;
517 }
518 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
519 
520 struct vhost_attach_cgroups_struct {
521 	struct vhost_work work;
522 	struct task_struct *owner;
523 	int ret;
524 };
525 
vhost_attach_cgroups_work(struct vhost_work * work)526 static void vhost_attach_cgroups_work(struct vhost_work *work)
527 {
528 	struct vhost_attach_cgroups_struct *s;
529 
530 	s = container_of(work, struct vhost_attach_cgroups_struct, work);
531 	s->ret = cgroup_attach_task_all(s->owner, current);
532 }
533 
vhost_attach_cgroups(struct vhost_dev * dev)534 static int vhost_attach_cgroups(struct vhost_dev *dev)
535 {
536 	struct vhost_attach_cgroups_struct attach;
537 
538 	attach.owner = current;
539 	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
540 	vhost_work_queue(dev, &attach.work);
541 	vhost_work_dev_flush(dev);
542 	return attach.ret;
543 }
544 
545 /* Caller should have device mutex */
vhost_dev_has_owner(struct vhost_dev * dev)546 bool vhost_dev_has_owner(struct vhost_dev *dev)
547 {
548 	return dev->mm;
549 }
550 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
551 
vhost_attach_mm(struct vhost_dev * dev)552 static void vhost_attach_mm(struct vhost_dev *dev)
553 {
554 	/* No owner, become one */
555 	if (dev->use_worker) {
556 		dev->mm = get_task_mm(current);
557 	} else {
558 		/* vDPA device does not use worker thead, so there's
559 		 * no need to hold the address space for mm. This help
560 		 * to avoid deadlock in the case of mmap() which may
561 		 * held the refcnt of the file and depends on release
562 		 * method to remove vma.
563 		 */
564 		dev->mm = current->mm;
565 		mmgrab(dev->mm);
566 	}
567 }
568 
vhost_detach_mm(struct vhost_dev * dev)569 static void vhost_detach_mm(struct vhost_dev *dev)
570 {
571 	if (!dev->mm)
572 		return;
573 
574 	if (dev->use_worker)
575 		mmput(dev->mm);
576 	else
577 		mmdrop(dev->mm);
578 
579 	dev->mm = NULL;
580 }
581 
582 /* Caller should have device mutex */
vhost_dev_set_owner(struct vhost_dev * dev)583 long vhost_dev_set_owner(struct vhost_dev *dev)
584 {
585 	struct task_struct *worker;
586 	int err;
587 
588 	/* Is there an owner already? */
589 	if (vhost_dev_has_owner(dev)) {
590 		err = -EBUSY;
591 		goto err_mm;
592 	}
593 
594 	vhost_attach_mm(dev);
595 
596 	dev->kcov_handle = kcov_common_handle();
597 	if (dev->use_worker) {
598 		worker = kthread_create(vhost_worker, dev,
599 					"vhost-%d", current->pid);
600 		if (IS_ERR(worker)) {
601 			err = PTR_ERR(worker);
602 			goto err_worker;
603 		}
604 
605 		dev->worker = worker;
606 		wake_up_process(worker); /* avoid contributing to loadavg */
607 
608 		err = vhost_attach_cgroups(dev);
609 		if (err)
610 			goto err_cgroup;
611 	}
612 
613 	err = vhost_dev_alloc_iovecs(dev);
614 	if (err)
615 		goto err_cgroup;
616 
617 	return 0;
618 err_cgroup:
619 	if (dev->worker) {
620 		kthread_stop(dev->worker);
621 		dev->worker = NULL;
622 	}
623 err_worker:
624 	vhost_detach_mm(dev);
625 	dev->kcov_handle = 0;
626 err_mm:
627 	return err;
628 }
629 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
630 
iotlb_alloc(void)631 static struct vhost_iotlb *iotlb_alloc(void)
632 {
633 	return vhost_iotlb_alloc(max_iotlb_entries,
634 				 VHOST_IOTLB_FLAG_RETIRE);
635 }
636 
vhost_dev_reset_owner_prepare(void)637 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
638 {
639 	return iotlb_alloc();
640 }
641 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
642 
643 /* Caller should have device mutex */
vhost_dev_reset_owner(struct vhost_dev * dev,struct vhost_iotlb * umem)644 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
645 {
646 	int i;
647 
648 	vhost_dev_cleanup(dev);
649 
650 	dev->umem = umem;
651 	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
652 	 * VQs aren't running.
653 	 */
654 	for (i = 0; i < dev->nvqs; ++i)
655 		dev->vqs[i]->umem = umem;
656 }
657 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
658 
vhost_dev_stop(struct vhost_dev * dev)659 void vhost_dev_stop(struct vhost_dev *dev)
660 {
661 	int i;
662 
663 	for (i = 0; i < dev->nvqs; ++i) {
664 		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
665 			vhost_poll_stop(&dev->vqs[i]->poll);
666 			vhost_poll_flush(&dev->vqs[i]->poll);
667 		}
668 	}
669 }
670 EXPORT_SYMBOL_GPL(vhost_dev_stop);
671 
vhost_clear_msg(struct vhost_dev * dev)672 void vhost_clear_msg(struct vhost_dev *dev)
673 {
674 	struct vhost_msg_node *node, *n;
675 
676 	spin_lock(&dev->iotlb_lock);
677 
678 	list_for_each_entry_safe(node, n, &dev->read_list, node) {
679 		list_del(&node->node);
680 		kfree(node);
681 	}
682 
683 	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
684 		list_del(&node->node);
685 		kfree(node);
686 	}
687 
688 	spin_unlock(&dev->iotlb_lock);
689 }
690 EXPORT_SYMBOL_GPL(vhost_clear_msg);
691 
vhost_dev_cleanup(struct vhost_dev * dev)692 void vhost_dev_cleanup(struct vhost_dev *dev)
693 {
694 	int i;
695 
696 	for (i = 0; i < dev->nvqs; ++i) {
697 		if (dev->vqs[i]->error_ctx)
698 			eventfd_ctx_put(dev->vqs[i]->error_ctx);
699 		if (dev->vqs[i]->kick)
700 			fput(dev->vqs[i]->kick);
701 		if (dev->vqs[i]->call_ctx.ctx)
702 			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
703 		vhost_vq_reset(dev, dev->vqs[i]);
704 	}
705 	vhost_dev_free_iovecs(dev);
706 	if (dev->log_ctx)
707 		eventfd_ctx_put(dev->log_ctx);
708 	dev->log_ctx = NULL;
709 	/* No one will access memory at this point */
710 	vhost_iotlb_free(dev->umem);
711 	dev->umem = NULL;
712 	vhost_iotlb_free(dev->iotlb);
713 	dev->iotlb = NULL;
714 	vhost_clear_msg(dev);
715 	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
716 	WARN_ON(!llist_empty(&dev->work_list));
717 	if (dev->worker) {
718 		kthread_stop(dev->worker);
719 		dev->worker = NULL;
720 		dev->kcov_handle = 0;
721 	}
722 	vhost_detach_mm(dev);
723 }
724 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
725 
log_access_ok(void __user * log_base,u64 addr,unsigned long sz)726 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
727 {
728 	u64 a = addr / VHOST_PAGE_SIZE / 8;
729 
730 	/* Make sure 64 bit math will not overflow. */
731 	if (a > ULONG_MAX - (unsigned long)log_base ||
732 	    a + (unsigned long)log_base > ULONG_MAX)
733 		return false;
734 
735 	return access_ok(log_base + a,
736 			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
737 }
738 
739 /* Make sure 64 bit math will not overflow. */
vhost_overflow(u64 uaddr,u64 size)740 static bool vhost_overflow(u64 uaddr, u64 size)
741 {
742 	if (uaddr > ULONG_MAX || size > ULONG_MAX)
743 		return true;
744 
745 	if (!size)
746 		return false;
747 
748 	return uaddr > ULONG_MAX - size + 1;
749 }
750 
751 /* Caller should have vq mutex and device mutex. */
vq_memory_access_ok(void __user * log_base,struct vhost_iotlb * umem,int log_all)752 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
753 				int log_all)
754 {
755 	struct vhost_iotlb_map *map;
756 
757 	if (!umem)
758 		return false;
759 
760 	list_for_each_entry(map, &umem->list, link) {
761 		unsigned long a = map->addr;
762 
763 		if (vhost_overflow(map->addr, map->size))
764 			return false;
765 
766 
767 		if (!access_ok((void __user *)a, map->size))
768 			return false;
769 		else if (log_all && !log_access_ok(log_base,
770 						   map->start,
771 						   map->size))
772 			return false;
773 	}
774 	return true;
775 }
776 
vhost_vq_meta_fetch(struct vhost_virtqueue * vq,u64 addr,unsigned int size,int type)777 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
778 					       u64 addr, unsigned int size,
779 					       int type)
780 {
781 	const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
782 
783 	if (!map)
784 		return NULL;
785 
786 	return (void __user *)(uintptr_t)(map->addr + addr - map->start);
787 }
788 
789 /* Can we switch to this memory table? */
790 /* Caller should have device mutex but not vq mutex */
memory_access_ok(struct vhost_dev * d,struct vhost_iotlb * umem,int log_all)791 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
792 			     int log_all)
793 {
794 	int i;
795 
796 	for (i = 0; i < d->nvqs; ++i) {
797 		bool ok;
798 		bool log;
799 
800 		mutex_lock(&d->vqs[i]->mutex);
801 		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
802 		/* If ring is inactive, will check when it's enabled. */
803 		if (d->vqs[i]->private_data)
804 			ok = vq_memory_access_ok(d->vqs[i]->log_base,
805 						 umem, log);
806 		else
807 			ok = true;
808 		mutex_unlock(&d->vqs[i]->mutex);
809 		if (!ok)
810 			return false;
811 	}
812 	return true;
813 }
814 
815 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
816 			  struct iovec iov[], int iov_size, int access);
817 
vhost_copy_to_user(struct vhost_virtqueue * vq,void __user * to,const void * from,unsigned size)818 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
819 			      const void *from, unsigned size)
820 {
821 	int ret;
822 
823 	if (!vq->iotlb)
824 		return __copy_to_user(to, from, size);
825 	else {
826 		/* This function should be called after iotlb
827 		 * prefetch, which means we're sure that all vq
828 		 * could be access through iotlb. So -EAGAIN should
829 		 * not happen in this case.
830 		 */
831 		struct iov_iter t;
832 		void __user *uaddr = vhost_vq_meta_fetch(vq,
833 				     (u64)(uintptr_t)to, size,
834 				     VHOST_ADDR_USED);
835 
836 		if (uaddr)
837 			return __copy_to_user(uaddr, from, size);
838 
839 		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
840 				     ARRAY_SIZE(vq->iotlb_iov),
841 				     VHOST_ACCESS_WO);
842 		if (ret < 0)
843 			goto out;
844 		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
845 		ret = copy_to_iter(from, size, &t);
846 		if (ret == size)
847 			ret = 0;
848 	}
849 out:
850 	return ret;
851 }
852 
vhost_copy_from_user(struct vhost_virtqueue * vq,void * to,void __user * from,unsigned size)853 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
854 				void __user *from, unsigned size)
855 {
856 	int ret;
857 
858 	if (!vq->iotlb)
859 		return __copy_from_user(to, from, size);
860 	else {
861 		/* This function should be called after iotlb
862 		 * prefetch, which means we're sure that vq
863 		 * could be access through iotlb. So -EAGAIN should
864 		 * not happen in this case.
865 		 */
866 		void __user *uaddr = vhost_vq_meta_fetch(vq,
867 				     (u64)(uintptr_t)from, size,
868 				     VHOST_ADDR_DESC);
869 		struct iov_iter f;
870 
871 		if (uaddr)
872 			return __copy_from_user(to, uaddr, size);
873 
874 		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
875 				     ARRAY_SIZE(vq->iotlb_iov),
876 				     VHOST_ACCESS_RO);
877 		if (ret < 0) {
878 			vq_err(vq, "IOTLB translation failure: uaddr "
879 			       "%p size 0x%llx\n", from,
880 			       (unsigned long long) size);
881 			goto out;
882 		}
883 		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
884 		ret = copy_from_iter(to, size, &f);
885 		if (ret == size)
886 			ret = 0;
887 	}
888 
889 out:
890 	return ret;
891 }
892 
__vhost_get_user_slow(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)893 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
894 					  void __user *addr, unsigned int size,
895 					  int type)
896 {
897 	int ret;
898 
899 	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
900 			     ARRAY_SIZE(vq->iotlb_iov),
901 			     VHOST_ACCESS_RO);
902 	if (ret < 0) {
903 		vq_err(vq, "IOTLB translation failure: uaddr "
904 			"%p size 0x%llx\n", addr,
905 			(unsigned long long) size);
906 		return NULL;
907 	}
908 
909 	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
910 		vq_err(vq, "Non atomic userspace memory access: uaddr "
911 			"%p size 0x%llx\n", addr,
912 			(unsigned long long) size);
913 		return NULL;
914 	}
915 
916 	return vq->iotlb_iov[0].iov_base;
917 }
918 
919 /* This function should be called after iotlb
920  * prefetch, which means we're sure that vq
921  * could be access through iotlb. So -EAGAIN should
922  * not happen in this case.
923  */
__vhost_get_user(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)924 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
925 					    void __user *addr, unsigned int size,
926 					    int type)
927 {
928 	void __user *uaddr = vhost_vq_meta_fetch(vq,
929 			     (u64)(uintptr_t)addr, size, type);
930 	if (uaddr)
931 		return uaddr;
932 
933 	return __vhost_get_user_slow(vq, addr, size, type);
934 }
935 
936 #define vhost_put_user(vq, x, ptr)		\
937 ({ \
938 	int ret; \
939 	if (!vq->iotlb) { \
940 		ret = __put_user(x, ptr); \
941 	} else { \
942 		__typeof__(ptr) to = \
943 			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
944 					  sizeof(*ptr), VHOST_ADDR_USED); \
945 		if (to != NULL) \
946 			ret = __put_user(x, to); \
947 		else \
948 			ret = -EFAULT;	\
949 	} \
950 	ret; \
951 })
952 
vhost_put_avail_event(struct vhost_virtqueue * vq)953 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
954 {
955 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
956 			      vhost_avail_event(vq));
957 }
958 
vhost_put_used(struct vhost_virtqueue * vq,struct vring_used_elem * head,int idx,int count)959 static inline int vhost_put_used(struct vhost_virtqueue *vq,
960 				 struct vring_used_elem *head, int idx,
961 				 int count)
962 {
963 	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
964 				  count * sizeof(*head));
965 }
966 
vhost_put_used_flags(struct vhost_virtqueue * vq)967 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
968 
969 {
970 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
971 			      &vq->used->flags);
972 }
973 
vhost_put_used_idx(struct vhost_virtqueue * vq)974 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
975 
976 {
977 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
978 			      &vq->used->idx);
979 }
980 
981 #define vhost_get_user(vq, x, ptr, type)		\
982 ({ \
983 	int ret; \
984 	if (!vq->iotlb) { \
985 		ret = __get_user(x, ptr); \
986 	} else { \
987 		__typeof__(ptr) from = \
988 			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
989 							   sizeof(*ptr), \
990 							   type); \
991 		if (from != NULL) \
992 			ret = __get_user(x, from); \
993 		else \
994 			ret = -EFAULT; \
995 	} \
996 	ret; \
997 })
998 
999 #define vhost_get_avail(vq, x, ptr) \
1000 	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1001 
1002 #define vhost_get_used(vq, x, ptr) \
1003 	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1004 
vhost_dev_lock_vqs(struct vhost_dev * d)1005 static void vhost_dev_lock_vqs(struct vhost_dev *d)
1006 {
1007 	int i = 0;
1008 	for (i = 0; i < d->nvqs; ++i)
1009 		mutex_lock_nested(&d->vqs[i]->mutex, i);
1010 }
1011 
vhost_dev_unlock_vqs(struct vhost_dev * d)1012 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1013 {
1014 	int i = 0;
1015 	for (i = 0; i < d->nvqs; ++i)
1016 		mutex_unlock(&d->vqs[i]->mutex);
1017 }
1018 
vhost_get_avail_idx(struct vhost_virtqueue * vq,__virtio16 * idx)1019 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1020 				      __virtio16 *idx)
1021 {
1022 	return vhost_get_avail(vq, *idx, &vq->avail->idx);
1023 }
1024 
vhost_get_avail_head(struct vhost_virtqueue * vq,__virtio16 * head,int idx)1025 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1026 				       __virtio16 *head, int idx)
1027 {
1028 	return vhost_get_avail(vq, *head,
1029 			       &vq->avail->ring[idx & (vq->num - 1)]);
1030 }
1031 
vhost_get_avail_flags(struct vhost_virtqueue * vq,__virtio16 * flags)1032 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1033 					__virtio16 *flags)
1034 {
1035 	return vhost_get_avail(vq, *flags, &vq->avail->flags);
1036 }
1037 
vhost_get_used_event(struct vhost_virtqueue * vq,__virtio16 * event)1038 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1039 				       __virtio16 *event)
1040 {
1041 	return vhost_get_avail(vq, *event, vhost_used_event(vq));
1042 }
1043 
vhost_get_used_idx(struct vhost_virtqueue * vq,__virtio16 * idx)1044 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1045 				     __virtio16 *idx)
1046 {
1047 	return vhost_get_used(vq, *idx, &vq->used->idx);
1048 }
1049 
vhost_get_desc(struct vhost_virtqueue * vq,struct vring_desc * desc,int idx)1050 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1051 				 struct vring_desc *desc, int idx)
1052 {
1053 	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1054 }
1055 
vhost_iotlb_notify_vq(struct vhost_dev * d,struct vhost_iotlb_msg * msg)1056 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1057 				  struct vhost_iotlb_msg *msg)
1058 {
1059 	struct vhost_msg_node *node, *n;
1060 
1061 	spin_lock(&d->iotlb_lock);
1062 
1063 	list_for_each_entry_safe(node, n, &d->pending_list, node) {
1064 		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1065 		if (msg->iova <= vq_msg->iova &&
1066 		    msg->iova + msg->size - 1 >= vq_msg->iova &&
1067 		    vq_msg->type == VHOST_IOTLB_MISS) {
1068 			vhost_poll_queue(&node->vq->poll);
1069 			list_del(&node->node);
1070 			kfree(node);
1071 		}
1072 	}
1073 
1074 	spin_unlock(&d->iotlb_lock);
1075 }
1076 
umem_access_ok(u64 uaddr,u64 size,int access)1077 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1078 {
1079 	unsigned long a = uaddr;
1080 
1081 	/* Make sure 64 bit math will not overflow. */
1082 	if (vhost_overflow(uaddr, size))
1083 		return false;
1084 
1085 	if ((access & VHOST_ACCESS_RO) &&
1086 	    !access_ok((void __user *)a, size))
1087 		return false;
1088 	if ((access & VHOST_ACCESS_WO) &&
1089 	    !access_ok((void __user *)a, size))
1090 		return false;
1091 	return true;
1092 }
1093 
vhost_process_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * msg)1094 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1095 				   struct vhost_iotlb_msg *msg)
1096 {
1097 	int ret = 0;
1098 
1099 	mutex_lock(&dev->mutex);
1100 	vhost_dev_lock_vqs(dev);
1101 	switch (msg->type) {
1102 	case VHOST_IOTLB_UPDATE:
1103 		if (!dev->iotlb) {
1104 			ret = -EFAULT;
1105 			break;
1106 		}
1107 		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1108 			ret = -EFAULT;
1109 			break;
1110 		}
1111 		vhost_vq_meta_reset(dev);
1112 		if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1113 					  msg->iova + msg->size - 1,
1114 					  msg->uaddr, msg->perm)) {
1115 			ret = -ENOMEM;
1116 			break;
1117 		}
1118 		vhost_iotlb_notify_vq(dev, msg);
1119 		break;
1120 	case VHOST_IOTLB_INVALIDATE:
1121 		if (!dev->iotlb) {
1122 			ret = -EFAULT;
1123 			break;
1124 		}
1125 		vhost_vq_meta_reset(dev);
1126 		vhost_iotlb_del_range(dev->iotlb, msg->iova,
1127 				      msg->iova + msg->size - 1);
1128 		break;
1129 	default:
1130 		ret = -EINVAL;
1131 		break;
1132 	}
1133 
1134 	vhost_dev_unlock_vqs(dev);
1135 	mutex_unlock(&dev->mutex);
1136 
1137 	return ret;
1138 }
vhost_chr_write_iter(struct vhost_dev * dev,struct iov_iter * from)1139 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1140 			     struct iov_iter *from)
1141 {
1142 	struct vhost_iotlb_msg msg;
1143 	size_t offset;
1144 	int type, ret;
1145 
1146 	ret = copy_from_iter(&type, sizeof(type), from);
1147 	if (ret != sizeof(type)) {
1148 		ret = -EINVAL;
1149 		goto done;
1150 	}
1151 
1152 	switch (type) {
1153 	case VHOST_IOTLB_MSG:
1154 		/* There maybe a hole after type for V1 message type,
1155 		 * so skip it here.
1156 		 */
1157 		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1158 		break;
1159 	case VHOST_IOTLB_MSG_V2:
1160 		offset = sizeof(__u32);
1161 		break;
1162 	default:
1163 		ret = -EINVAL;
1164 		goto done;
1165 	}
1166 
1167 	iov_iter_advance(from, offset);
1168 	ret = copy_from_iter(&msg, sizeof(msg), from);
1169 	if (ret != sizeof(msg)) {
1170 		ret = -EINVAL;
1171 		goto done;
1172 	}
1173 
1174 	if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
1175 		ret = -EINVAL;
1176 		goto done;
1177 	}
1178 
1179 	if (dev->msg_handler)
1180 		ret = dev->msg_handler(dev, &msg);
1181 	else
1182 		ret = vhost_process_iotlb_msg(dev, &msg);
1183 	if (ret) {
1184 		ret = -EFAULT;
1185 		goto done;
1186 	}
1187 
1188 	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1189 	      sizeof(struct vhost_msg_v2);
1190 done:
1191 	return ret;
1192 }
1193 EXPORT_SYMBOL(vhost_chr_write_iter);
1194 
vhost_chr_poll(struct file * file,struct vhost_dev * dev,poll_table * wait)1195 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1196 			    poll_table *wait)
1197 {
1198 	__poll_t mask = 0;
1199 
1200 	poll_wait(file, &dev->wait, wait);
1201 
1202 	if (!list_empty(&dev->read_list))
1203 		mask |= EPOLLIN | EPOLLRDNORM;
1204 
1205 	return mask;
1206 }
1207 EXPORT_SYMBOL(vhost_chr_poll);
1208 
vhost_chr_read_iter(struct vhost_dev * dev,struct iov_iter * to,int noblock)1209 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1210 			    int noblock)
1211 {
1212 	DEFINE_WAIT(wait);
1213 	struct vhost_msg_node *node;
1214 	ssize_t ret = 0;
1215 	unsigned size = sizeof(struct vhost_msg);
1216 
1217 	if (iov_iter_count(to) < size)
1218 		return 0;
1219 
1220 	while (1) {
1221 		if (!noblock)
1222 			prepare_to_wait(&dev->wait, &wait,
1223 					TASK_INTERRUPTIBLE);
1224 
1225 		node = vhost_dequeue_msg(dev, &dev->read_list);
1226 		if (node)
1227 			break;
1228 		if (noblock) {
1229 			ret = -EAGAIN;
1230 			break;
1231 		}
1232 		if (signal_pending(current)) {
1233 			ret = -ERESTARTSYS;
1234 			break;
1235 		}
1236 		if (!dev->iotlb) {
1237 			ret = -EBADFD;
1238 			break;
1239 		}
1240 
1241 		schedule();
1242 	}
1243 
1244 	if (!noblock)
1245 		finish_wait(&dev->wait, &wait);
1246 
1247 	if (node) {
1248 		struct vhost_iotlb_msg *msg;
1249 		void *start = &node->msg;
1250 
1251 		switch (node->msg.type) {
1252 		case VHOST_IOTLB_MSG:
1253 			size = sizeof(node->msg);
1254 			msg = &node->msg.iotlb;
1255 			break;
1256 		case VHOST_IOTLB_MSG_V2:
1257 			size = sizeof(node->msg_v2);
1258 			msg = &node->msg_v2.iotlb;
1259 			break;
1260 		default:
1261 			BUG();
1262 			break;
1263 		}
1264 
1265 		ret = copy_to_iter(start, size, to);
1266 		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1267 			kfree(node);
1268 			return ret;
1269 		}
1270 		vhost_enqueue_msg(dev, &dev->pending_list, node);
1271 	}
1272 
1273 	return ret;
1274 }
1275 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1276 
vhost_iotlb_miss(struct vhost_virtqueue * vq,u64 iova,int access)1277 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1278 {
1279 	struct vhost_dev *dev = vq->dev;
1280 	struct vhost_msg_node *node;
1281 	struct vhost_iotlb_msg *msg;
1282 	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1283 
1284 	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1285 	if (!node)
1286 		return -ENOMEM;
1287 
1288 	if (v2) {
1289 		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1290 		msg = &node->msg_v2.iotlb;
1291 	} else {
1292 		msg = &node->msg.iotlb;
1293 	}
1294 
1295 	msg->type = VHOST_IOTLB_MISS;
1296 	msg->iova = iova;
1297 	msg->perm = access;
1298 
1299 	vhost_enqueue_msg(dev, &dev->read_list, node);
1300 
1301 	return 0;
1302 }
1303 
vq_access_ok(struct vhost_virtqueue * vq,unsigned int num,vring_desc_t __user * desc,vring_avail_t __user * avail,vring_used_t __user * used)1304 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1305 			 vring_desc_t __user *desc,
1306 			 vring_avail_t __user *avail,
1307 			 vring_used_t __user *used)
1308 
1309 {
1310 	/* If an IOTLB device is present, the vring addresses are
1311 	 * GIOVAs. Access validation occurs at prefetch time. */
1312 	if (vq->iotlb)
1313 		return true;
1314 
1315 	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1316 	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
1317 	       access_ok(used, vhost_get_used_size(vq, num));
1318 }
1319 
vhost_vq_meta_update(struct vhost_virtqueue * vq,const struct vhost_iotlb_map * map,int type)1320 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1321 				 const struct vhost_iotlb_map *map,
1322 				 int type)
1323 {
1324 	int access = (type == VHOST_ADDR_USED) ?
1325 		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1326 
1327 	if (likely(map->perm & access))
1328 		vq->meta_iotlb[type] = map;
1329 }
1330 
iotlb_access_ok(struct vhost_virtqueue * vq,int access,u64 addr,u64 len,int type)1331 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1332 			    int access, u64 addr, u64 len, int type)
1333 {
1334 	const struct vhost_iotlb_map *map;
1335 	struct vhost_iotlb *umem = vq->iotlb;
1336 	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1337 
1338 	if (vhost_vq_meta_fetch(vq, addr, len, type))
1339 		return true;
1340 
1341 	while (len > s) {
1342 		map = vhost_iotlb_itree_first(umem, addr, last);
1343 		if (map == NULL || map->start > addr) {
1344 			vhost_iotlb_miss(vq, addr, access);
1345 			return false;
1346 		} else if (!(map->perm & access)) {
1347 			/* Report the possible access violation by
1348 			 * request another translation from userspace.
1349 			 */
1350 			return false;
1351 		}
1352 
1353 		size = map->size - addr + map->start;
1354 
1355 		if (orig_addr == addr && size >= len)
1356 			vhost_vq_meta_update(vq, map, type);
1357 
1358 		s += size;
1359 		addr += size;
1360 	}
1361 
1362 	return true;
1363 }
1364 
vq_meta_prefetch(struct vhost_virtqueue * vq)1365 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1366 {
1367 	unsigned int num = vq->num;
1368 
1369 	if (!vq->iotlb)
1370 		return 1;
1371 
1372 	return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1373 			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1374 	       iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1375 			       vhost_get_avail_size(vq, num),
1376 			       VHOST_ADDR_AVAIL) &&
1377 	       iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1378 			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1379 }
1380 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1381 
1382 /* Can we log writes? */
1383 /* Caller should have device mutex but not vq mutex */
vhost_log_access_ok(struct vhost_dev * dev)1384 bool vhost_log_access_ok(struct vhost_dev *dev)
1385 {
1386 	return memory_access_ok(dev, dev->umem, 1);
1387 }
1388 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1389 
vq_log_used_access_ok(struct vhost_virtqueue * vq,void __user * log_base,bool log_used,u64 log_addr)1390 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1391 				  void __user *log_base,
1392 				  bool log_used,
1393 				  u64 log_addr)
1394 {
1395 	/* If an IOTLB device is present, log_addr is a GIOVA that
1396 	 * will never be logged by log_used(). */
1397 	if (vq->iotlb)
1398 		return true;
1399 
1400 	return !log_used || log_access_ok(log_base, log_addr,
1401 					  vhost_get_used_size(vq, vq->num));
1402 }
1403 
1404 /* Verify access for write logging. */
1405 /* Caller should have vq mutex and device mutex */
vq_log_access_ok(struct vhost_virtqueue * vq,void __user * log_base)1406 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1407 			     void __user *log_base)
1408 {
1409 	return vq_memory_access_ok(log_base, vq->umem,
1410 				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1411 		vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
1412 }
1413 
1414 /* Can we start vq? */
1415 /* Caller should have vq mutex and device mutex */
vhost_vq_access_ok(struct vhost_virtqueue * vq)1416 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1417 {
1418 	if (!vq_log_access_ok(vq, vq->log_base))
1419 		return false;
1420 
1421 	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1422 }
1423 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1424 
vhost_set_memory(struct vhost_dev * d,struct vhost_memory __user * m)1425 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1426 {
1427 	struct vhost_memory mem, *newmem;
1428 	struct vhost_memory_region *region;
1429 	struct vhost_iotlb *newumem, *oldumem;
1430 	unsigned long size = offsetof(struct vhost_memory, regions);
1431 	int i;
1432 
1433 	if (copy_from_user(&mem, m, size))
1434 		return -EFAULT;
1435 	if (mem.padding)
1436 		return -EOPNOTSUPP;
1437 	if (mem.nregions > max_mem_regions)
1438 		return -E2BIG;
1439 	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1440 			GFP_KERNEL);
1441 	if (!newmem)
1442 		return -ENOMEM;
1443 
1444 	memcpy(newmem, &mem, size);
1445 	if (copy_from_user(newmem->regions, m->regions,
1446 			   flex_array_size(newmem, regions, mem.nregions))) {
1447 		kvfree(newmem);
1448 		return -EFAULT;
1449 	}
1450 
1451 	newumem = iotlb_alloc();
1452 	if (!newumem) {
1453 		kvfree(newmem);
1454 		return -ENOMEM;
1455 	}
1456 
1457 	for (region = newmem->regions;
1458 	     region < newmem->regions + mem.nregions;
1459 	     region++) {
1460 		if (vhost_iotlb_add_range(newumem,
1461 					  region->guest_phys_addr,
1462 					  region->guest_phys_addr +
1463 					  region->memory_size - 1,
1464 					  region->userspace_addr,
1465 					  VHOST_MAP_RW))
1466 			goto err;
1467 	}
1468 
1469 	if (!memory_access_ok(d, newumem, 0))
1470 		goto err;
1471 
1472 	oldumem = d->umem;
1473 	d->umem = newumem;
1474 
1475 	/* All memory accesses are done under some VQ mutex. */
1476 	for (i = 0; i < d->nvqs; ++i) {
1477 		mutex_lock(&d->vqs[i]->mutex);
1478 		d->vqs[i]->umem = newumem;
1479 		mutex_unlock(&d->vqs[i]->mutex);
1480 	}
1481 
1482 	kvfree(newmem);
1483 	vhost_iotlb_free(oldumem);
1484 	return 0;
1485 
1486 err:
1487 	vhost_iotlb_free(newumem);
1488 	kvfree(newmem);
1489 	return -EFAULT;
1490 }
1491 
vhost_vring_set_num(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)1492 static long vhost_vring_set_num(struct vhost_dev *d,
1493 				struct vhost_virtqueue *vq,
1494 				void __user *argp)
1495 {
1496 	struct vhost_vring_state s;
1497 
1498 	/* Resizing ring with an active backend?
1499 	 * You don't want to do that. */
1500 	if (vq->private_data)
1501 		return -EBUSY;
1502 
1503 	if (copy_from_user(&s, argp, sizeof s))
1504 		return -EFAULT;
1505 
1506 	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1507 		return -EINVAL;
1508 	vq->num = s.num;
1509 
1510 	return 0;
1511 }
1512 
vhost_vring_set_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)1513 static long vhost_vring_set_addr(struct vhost_dev *d,
1514 				 struct vhost_virtqueue *vq,
1515 				 void __user *argp)
1516 {
1517 	struct vhost_vring_addr a;
1518 
1519 	if (copy_from_user(&a, argp, sizeof a))
1520 		return -EFAULT;
1521 	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1522 		return -EOPNOTSUPP;
1523 
1524 	/* For 32bit, verify that the top 32bits of the user
1525 	   data are set to zero. */
1526 	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1527 	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1528 	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1529 		return -EFAULT;
1530 
1531 	/* Make sure it's safe to cast pointers to vring types. */
1532 	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1533 	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1534 	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1535 	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1536 	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1537 		return -EINVAL;
1538 
1539 	/* We only verify access here if backend is configured.
1540 	 * If it is not, we don't as size might not have been setup.
1541 	 * We will verify when backend is configured. */
1542 	if (vq->private_data) {
1543 		if (!vq_access_ok(vq, vq->num,
1544 			(void __user *)(unsigned long)a.desc_user_addr,
1545 			(void __user *)(unsigned long)a.avail_user_addr,
1546 			(void __user *)(unsigned long)a.used_user_addr))
1547 			return -EINVAL;
1548 
1549 		/* Also validate log access for used ring if enabled. */
1550 		if (!vq_log_used_access_ok(vq, vq->log_base,
1551 				a.flags & (0x1 << VHOST_VRING_F_LOG),
1552 				a.log_guest_addr))
1553 			return -EINVAL;
1554 	}
1555 
1556 	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1557 	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1558 	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1559 	vq->log_addr = a.log_guest_addr;
1560 	vq->used = (void __user *)(unsigned long)a.used_user_addr;
1561 
1562 	return 0;
1563 }
1564 
vhost_vring_set_num_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,unsigned int ioctl,void __user * argp)1565 static long vhost_vring_set_num_addr(struct vhost_dev *d,
1566 				     struct vhost_virtqueue *vq,
1567 				     unsigned int ioctl,
1568 				     void __user *argp)
1569 {
1570 	long r;
1571 
1572 	mutex_lock(&vq->mutex);
1573 
1574 	switch (ioctl) {
1575 	case VHOST_SET_VRING_NUM:
1576 		r = vhost_vring_set_num(d, vq, argp);
1577 		break;
1578 	case VHOST_SET_VRING_ADDR:
1579 		r = vhost_vring_set_addr(d, vq, argp);
1580 		break;
1581 	default:
1582 		BUG();
1583 	}
1584 
1585 	mutex_unlock(&vq->mutex);
1586 
1587 	return r;
1588 }
vhost_vring_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)1589 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1590 {
1591 	struct file *eventfp, *filep = NULL;
1592 	bool pollstart = false, pollstop = false;
1593 	struct eventfd_ctx *ctx = NULL;
1594 	u32 __user *idxp = argp;
1595 	struct vhost_virtqueue *vq;
1596 	struct vhost_vring_state s;
1597 	struct vhost_vring_file f;
1598 	u32 idx;
1599 	long r;
1600 
1601 	r = get_user(idx, idxp);
1602 	if (r < 0)
1603 		return r;
1604 	if (idx >= d->nvqs)
1605 		return -ENOBUFS;
1606 
1607 	idx = array_index_nospec(idx, d->nvqs);
1608 	vq = d->vqs[idx];
1609 
1610 	if (ioctl == VHOST_SET_VRING_NUM ||
1611 	    ioctl == VHOST_SET_VRING_ADDR) {
1612 		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1613 	}
1614 
1615 	mutex_lock(&vq->mutex);
1616 
1617 	switch (ioctl) {
1618 	case VHOST_SET_VRING_BASE:
1619 		/* Moving base with an active backend?
1620 		 * You don't want to do that. */
1621 		if (vq->private_data) {
1622 			r = -EBUSY;
1623 			break;
1624 		}
1625 		if (copy_from_user(&s, argp, sizeof s)) {
1626 			r = -EFAULT;
1627 			break;
1628 		}
1629 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
1630 			vq->last_avail_idx = s.num & 0xffff;
1631 			vq->last_used_idx = (s.num >> 16) & 0xffff;
1632 		} else {
1633 			if (s.num > 0xffff) {
1634 				r = -EINVAL;
1635 				break;
1636 			}
1637 			vq->last_avail_idx = s.num;
1638 		}
1639 		/* Forget the cached index value. */
1640 		vq->avail_idx = vq->last_avail_idx;
1641 		break;
1642 	case VHOST_GET_VRING_BASE:
1643 		s.index = idx;
1644 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
1645 			s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
1646 		else
1647 			s.num = vq->last_avail_idx;
1648 		if (copy_to_user(argp, &s, sizeof s))
1649 			r = -EFAULT;
1650 		break;
1651 	case VHOST_SET_VRING_KICK:
1652 		if (copy_from_user(&f, argp, sizeof f)) {
1653 			r = -EFAULT;
1654 			break;
1655 		}
1656 		eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1657 		if (IS_ERR(eventfp)) {
1658 			r = PTR_ERR(eventfp);
1659 			break;
1660 		}
1661 		if (eventfp != vq->kick) {
1662 			pollstop = (filep = vq->kick) != NULL;
1663 			pollstart = (vq->kick = eventfp) != NULL;
1664 		} else
1665 			filep = eventfp;
1666 		break;
1667 	case VHOST_SET_VRING_CALL:
1668 		if (copy_from_user(&f, argp, sizeof f)) {
1669 			r = -EFAULT;
1670 			break;
1671 		}
1672 		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1673 		if (IS_ERR(ctx)) {
1674 			r = PTR_ERR(ctx);
1675 			break;
1676 		}
1677 
1678 		swap(ctx, vq->call_ctx.ctx);
1679 		break;
1680 	case VHOST_SET_VRING_ERR:
1681 		if (copy_from_user(&f, argp, sizeof f)) {
1682 			r = -EFAULT;
1683 			break;
1684 		}
1685 		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1686 		if (IS_ERR(ctx)) {
1687 			r = PTR_ERR(ctx);
1688 			break;
1689 		}
1690 		swap(ctx, vq->error_ctx);
1691 		break;
1692 	case VHOST_SET_VRING_ENDIAN:
1693 		r = vhost_set_vring_endian(vq, argp);
1694 		break;
1695 	case VHOST_GET_VRING_ENDIAN:
1696 		r = vhost_get_vring_endian(vq, idx, argp);
1697 		break;
1698 	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1699 		if (copy_from_user(&s, argp, sizeof(s))) {
1700 			r = -EFAULT;
1701 			break;
1702 		}
1703 		vq->busyloop_timeout = s.num;
1704 		break;
1705 	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1706 		s.index = idx;
1707 		s.num = vq->busyloop_timeout;
1708 		if (copy_to_user(argp, &s, sizeof(s)))
1709 			r = -EFAULT;
1710 		break;
1711 	default:
1712 		r = -ENOIOCTLCMD;
1713 	}
1714 
1715 	if (pollstop && vq->handle_kick)
1716 		vhost_poll_stop(&vq->poll);
1717 
1718 	if (!IS_ERR_OR_NULL(ctx))
1719 		eventfd_ctx_put(ctx);
1720 	if (filep)
1721 		fput(filep);
1722 
1723 	if (pollstart && vq->handle_kick)
1724 		r = vhost_poll_start(&vq->poll, vq->kick);
1725 
1726 	mutex_unlock(&vq->mutex);
1727 
1728 	if (pollstop && vq->handle_kick)
1729 		vhost_poll_flush(&vq->poll);
1730 	return r;
1731 }
1732 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1733 
vhost_init_device_iotlb(struct vhost_dev * d,bool enabled)1734 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1735 {
1736 	struct vhost_iotlb *niotlb, *oiotlb;
1737 	int i;
1738 
1739 	niotlb = iotlb_alloc();
1740 	if (!niotlb)
1741 		return -ENOMEM;
1742 
1743 	oiotlb = d->iotlb;
1744 	d->iotlb = niotlb;
1745 
1746 	for (i = 0; i < d->nvqs; ++i) {
1747 		struct vhost_virtqueue *vq = d->vqs[i];
1748 
1749 		mutex_lock(&vq->mutex);
1750 		vq->iotlb = niotlb;
1751 		__vhost_vq_meta_reset(vq);
1752 		mutex_unlock(&vq->mutex);
1753 	}
1754 
1755 	vhost_iotlb_free(oiotlb);
1756 
1757 	return 0;
1758 }
1759 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1760 
1761 /* Caller must have device mutex */
vhost_dev_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)1762 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1763 {
1764 	struct eventfd_ctx *ctx;
1765 	u64 p;
1766 	long r;
1767 	int i, fd;
1768 
1769 	/* If you are not the owner, you can become one */
1770 	if (ioctl == VHOST_SET_OWNER) {
1771 		r = vhost_dev_set_owner(d);
1772 		goto done;
1773 	}
1774 
1775 	/* You must be the owner to do anything else */
1776 	r = vhost_dev_check_owner(d);
1777 	if (r)
1778 		goto done;
1779 
1780 	switch (ioctl) {
1781 	case VHOST_SET_MEM_TABLE:
1782 		r = vhost_set_memory(d, argp);
1783 		break;
1784 	case VHOST_SET_LOG_BASE:
1785 		if (copy_from_user(&p, argp, sizeof p)) {
1786 			r = -EFAULT;
1787 			break;
1788 		}
1789 		if ((u64)(unsigned long)p != p) {
1790 			r = -EFAULT;
1791 			break;
1792 		}
1793 		for (i = 0; i < d->nvqs; ++i) {
1794 			struct vhost_virtqueue *vq;
1795 			void __user *base = (void __user *)(unsigned long)p;
1796 			vq = d->vqs[i];
1797 			mutex_lock(&vq->mutex);
1798 			/* If ring is inactive, will check when it's enabled. */
1799 			if (vq->private_data && !vq_log_access_ok(vq, base))
1800 				r = -EFAULT;
1801 			else
1802 				vq->log_base = base;
1803 			mutex_unlock(&vq->mutex);
1804 		}
1805 		break;
1806 	case VHOST_SET_LOG_FD:
1807 		r = get_user(fd, (int __user *)argp);
1808 		if (r < 0)
1809 			break;
1810 		ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1811 		if (IS_ERR(ctx)) {
1812 			r = PTR_ERR(ctx);
1813 			break;
1814 		}
1815 		swap(ctx, d->log_ctx);
1816 		for (i = 0; i < d->nvqs; ++i) {
1817 			mutex_lock(&d->vqs[i]->mutex);
1818 			d->vqs[i]->log_ctx = d->log_ctx;
1819 			mutex_unlock(&d->vqs[i]->mutex);
1820 		}
1821 		if (ctx)
1822 			eventfd_ctx_put(ctx);
1823 		break;
1824 	default:
1825 		r = -ENOIOCTLCMD;
1826 		break;
1827 	}
1828 done:
1829 	return r;
1830 }
1831 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1832 
1833 /* TODO: This is really inefficient.  We need something like get_user()
1834  * (instruction directly accesses the data, with an exception table entry
1835  * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1836  */
set_bit_to_user(int nr,void __user * addr)1837 static int set_bit_to_user(int nr, void __user *addr)
1838 {
1839 	unsigned long log = (unsigned long)addr;
1840 	struct page *page;
1841 	void *base;
1842 	int bit = nr + (log % PAGE_SIZE) * 8;
1843 	int r;
1844 
1845 	r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1846 	if (r < 0)
1847 		return r;
1848 	BUG_ON(r != 1);
1849 	base = kmap_atomic(page);
1850 	set_bit(bit, base);
1851 	kunmap_atomic(base);
1852 	unpin_user_pages_dirty_lock(&page, 1, true);
1853 	return 0;
1854 }
1855 
log_write(void __user * log_base,u64 write_address,u64 write_length)1856 static int log_write(void __user *log_base,
1857 		     u64 write_address, u64 write_length)
1858 {
1859 	u64 write_page = write_address / VHOST_PAGE_SIZE;
1860 	int r;
1861 
1862 	if (!write_length)
1863 		return 0;
1864 	write_length += write_address % VHOST_PAGE_SIZE;
1865 	for (;;) {
1866 		u64 base = (u64)(unsigned long)log_base;
1867 		u64 log = base + write_page / 8;
1868 		int bit = write_page % 8;
1869 		if ((u64)(unsigned long)log != log)
1870 			return -EFAULT;
1871 		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1872 		if (r < 0)
1873 			return r;
1874 		if (write_length <= VHOST_PAGE_SIZE)
1875 			break;
1876 		write_length -= VHOST_PAGE_SIZE;
1877 		write_page += 1;
1878 	}
1879 	return r;
1880 }
1881 
log_write_hva(struct vhost_virtqueue * vq,u64 hva,u64 len)1882 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1883 {
1884 	struct vhost_iotlb *umem = vq->umem;
1885 	struct vhost_iotlb_map *u;
1886 	u64 start, end, l, min;
1887 	int r;
1888 	bool hit = false;
1889 
1890 	while (len) {
1891 		min = len;
1892 		/* More than one GPAs can be mapped into a single HVA. So
1893 		 * iterate all possible umems here to be safe.
1894 		 */
1895 		list_for_each_entry(u, &umem->list, link) {
1896 			if (u->addr > hva - 1 + len ||
1897 			    u->addr - 1 + u->size < hva)
1898 				continue;
1899 			start = max(u->addr, hva);
1900 			end = min(u->addr - 1 + u->size, hva - 1 + len);
1901 			l = end - start + 1;
1902 			r = log_write(vq->log_base,
1903 				      u->start + start - u->addr,
1904 				      l);
1905 			if (r < 0)
1906 				return r;
1907 			hit = true;
1908 			min = min(l, min);
1909 		}
1910 
1911 		if (!hit)
1912 			return -EFAULT;
1913 
1914 		len -= min;
1915 		hva += min;
1916 	}
1917 
1918 	return 0;
1919 }
1920 
log_used(struct vhost_virtqueue * vq,u64 used_offset,u64 len)1921 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1922 {
1923 	struct iovec *iov = vq->log_iov;
1924 	int i, ret;
1925 
1926 	if (!vq->iotlb)
1927 		return log_write(vq->log_base, vq->log_addr + used_offset, len);
1928 
1929 	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1930 			     len, iov, 64, VHOST_ACCESS_WO);
1931 	if (ret < 0)
1932 		return ret;
1933 
1934 	for (i = 0; i < ret; i++) {
1935 		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
1936 				    iov[i].iov_len);
1937 		if (ret)
1938 			return ret;
1939 	}
1940 
1941 	return 0;
1942 }
1943 
vhost_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num,u64 len,struct iovec * iov,int count)1944 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1945 		    unsigned int log_num, u64 len, struct iovec *iov, int count)
1946 {
1947 	int i, r;
1948 
1949 	/* Make sure data written is seen before log. */
1950 	smp_wmb();
1951 
1952 	if (vq->iotlb) {
1953 		for (i = 0; i < count; i++) {
1954 			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1955 					  iov[i].iov_len);
1956 			if (r < 0)
1957 				return r;
1958 		}
1959 		return 0;
1960 	}
1961 
1962 	for (i = 0; i < log_num; ++i) {
1963 		u64 l = min(log[i].len, len);
1964 		r = log_write(vq->log_base, log[i].addr, l);
1965 		if (r < 0)
1966 			return r;
1967 		len -= l;
1968 		if (!len) {
1969 			if (vq->log_ctx)
1970 				eventfd_signal(vq->log_ctx, 1);
1971 			return 0;
1972 		}
1973 	}
1974 	/* Length written exceeds what we have stored. This is a bug. */
1975 	BUG();
1976 	return 0;
1977 }
1978 EXPORT_SYMBOL_GPL(vhost_log_write);
1979 
vhost_update_used_flags(struct vhost_virtqueue * vq)1980 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1981 {
1982 	void __user *used;
1983 	if (vhost_put_used_flags(vq))
1984 		return -EFAULT;
1985 	if (unlikely(vq->log_used)) {
1986 		/* Make sure the flag is seen before log. */
1987 		smp_wmb();
1988 		/* Log used flag write. */
1989 		used = &vq->used->flags;
1990 		log_used(vq, (used - (void __user *)vq->used),
1991 			 sizeof vq->used->flags);
1992 		if (vq->log_ctx)
1993 			eventfd_signal(vq->log_ctx, 1);
1994 	}
1995 	return 0;
1996 }
1997 
vhost_update_avail_event(struct vhost_virtqueue * vq,u16 avail_event)1998 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1999 {
2000 	if (vhost_put_avail_event(vq))
2001 		return -EFAULT;
2002 	if (unlikely(vq->log_used)) {
2003 		void __user *used;
2004 		/* Make sure the event is seen before log. */
2005 		smp_wmb();
2006 		/* Log avail event write */
2007 		used = vhost_avail_event(vq);
2008 		log_used(vq, (used - (void __user *)vq->used),
2009 			 sizeof *vhost_avail_event(vq));
2010 		if (vq->log_ctx)
2011 			eventfd_signal(vq->log_ctx, 1);
2012 	}
2013 	return 0;
2014 }
2015 
vhost_vq_init_access(struct vhost_virtqueue * vq)2016 int vhost_vq_init_access(struct vhost_virtqueue *vq)
2017 {
2018 	__virtio16 last_used_idx;
2019 	int r;
2020 	bool is_le = vq->is_le;
2021 
2022 	if (!vq->private_data)
2023 		return 0;
2024 
2025 	vhost_init_is_le(vq);
2026 
2027 	r = vhost_update_used_flags(vq);
2028 	if (r)
2029 		goto err;
2030 	vq->signalled_used_valid = false;
2031 	if (!vq->iotlb &&
2032 	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2033 		r = -EFAULT;
2034 		goto err;
2035 	}
2036 	r = vhost_get_used_idx(vq, &last_used_idx);
2037 	if (r) {
2038 		vq_err(vq, "Can't access used idx at %p\n",
2039 		       &vq->used->idx);
2040 		goto err;
2041 	}
2042 	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2043 	return 0;
2044 
2045 err:
2046 	vq->is_le = is_le;
2047 	return r;
2048 }
2049 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2050 
translate_desc(struct vhost_virtqueue * vq,u64 addr,u32 len,struct iovec iov[],int iov_size,int access)2051 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2052 			  struct iovec iov[], int iov_size, int access)
2053 {
2054 	const struct vhost_iotlb_map *map;
2055 	struct vhost_dev *dev = vq->dev;
2056 	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2057 	struct iovec *_iov;
2058 	u64 s = 0, last = addr + len - 1;
2059 	int ret = 0;
2060 
2061 	while ((u64)len > s) {
2062 		u64 size;
2063 		if (unlikely(ret >= iov_size)) {
2064 			ret = -ENOBUFS;
2065 			break;
2066 		}
2067 
2068 		map = vhost_iotlb_itree_first(umem, addr, last);
2069 		if (map == NULL || map->start > addr) {
2070 			if (umem != dev->iotlb) {
2071 				ret = -EFAULT;
2072 				break;
2073 			}
2074 			ret = -EAGAIN;
2075 			break;
2076 		} else if (!(map->perm & access)) {
2077 			ret = -EPERM;
2078 			break;
2079 		}
2080 
2081 		_iov = iov + ret;
2082 		size = map->size - addr + map->start;
2083 		_iov->iov_len = min((u64)len - s, size);
2084 		_iov->iov_base = (void __user *)(unsigned long)
2085 				 (map->addr + addr - map->start);
2086 		s += size;
2087 		addr += size;
2088 		++ret;
2089 	}
2090 
2091 	if (ret == -EAGAIN)
2092 		vhost_iotlb_miss(vq, addr, access);
2093 	return ret;
2094 }
2095 
2096 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
2097  * function returns the next descriptor in the chain,
2098  * or -1U if we're at the end. */
next_desc(struct vhost_virtqueue * vq,struct vring_desc * desc)2099 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2100 {
2101 	unsigned int next;
2102 
2103 	/* If this descriptor says it doesn't chain, we're done. */
2104 	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2105 		return -1U;
2106 
2107 	/* Check they're not leading us off end of descriptors. */
2108 	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2109 	return next;
2110 }
2111 
get_indirect(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num,struct vring_desc * indirect)2112 static int get_indirect(struct vhost_virtqueue *vq,
2113 			struct iovec iov[], unsigned int iov_size,
2114 			unsigned int *out_num, unsigned int *in_num,
2115 			struct vhost_log *log, unsigned int *log_num,
2116 			struct vring_desc *indirect)
2117 {
2118 	struct vring_desc desc;
2119 	unsigned int i = 0, count, found = 0;
2120 	u32 len = vhost32_to_cpu(vq, indirect->len);
2121 	struct iov_iter from;
2122 	int ret, access;
2123 
2124 	/* Sanity check */
2125 	if (unlikely(len % sizeof desc)) {
2126 		vq_err(vq, "Invalid length in indirect descriptor: "
2127 		       "len 0x%llx not multiple of 0x%zx\n",
2128 		       (unsigned long long)len,
2129 		       sizeof desc);
2130 		return -EINVAL;
2131 	}
2132 
2133 	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2134 			     UIO_MAXIOV, VHOST_ACCESS_RO);
2135 	if (unlikely(ret < 0)) {
2136 		if (ret != -EAGAIN)
2137 			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2138 		return ret;
2139 	}
2140 	iov_iter_init(&from, READ, vq->indirect, ret, len);
2141 	count = len / sizeof desc;
2142 	/* Buffers are chained via a 16 bit next field, so
2143 	 * we can have at most 2^16 of these. */
2144 	if (unlikely(count > USHRT_MAX + 1)) {
2145 		vq_err(vq, "Indirect buffer length too big: %d\n",
2146 		       indirect->len);
2147 		return -E2BIG;
2148 	}
2149 
2150 	do {
2151 		unsigned iov_count = *in_num + *out_num;
2152 		if (unlikely(++found > count)) {
2153 			vq_err(vq, "Loop detected: last one at %u "
2154 			       "indirect size %u\n",
2155 			       i, count);
2156 			return -EINVAL;
2157 		}
2158 		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2159 			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2160 			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2161 			return -EINVAL;
2162 		}
2163 		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2164 			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2165 			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2166 			return -EINVAL;
2167 		}
2168 
2169 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2170 			access = VHOST_ACCESS_WO;
2171 		else
2172 			access = VHOST_ACCESS_RO;
2173 
2174 		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2175 				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2176 				     iov_size - iov_count, access);
2177 		if (unlikely(ret < 0)) {
2178 			if (ret != -EAGAIN)
2179 				vq_err(vq, "Translation failure %d indirect idx %d\n",
2180 					ret, i);
2181 			return ret;
2182 		}
2183 		/* If this is an input descriptor, increment that count. */
2184 		if (access == VHOST_ACCESS_WO) {
2185 			*in_num += ret;
2186 			if (unlikely(log && ret)) {
2187 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2188 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2189 				++*log_num;
2190 			}
2191 		} else {
2192 			/* If it's an output descriptor, they're all supposed
2193 			 * to come before any input descriptors. */
2194 			if (unlikely(*in_num)) {
2195 				vq_err(vq, "Indirect descriptor "
2196 				       "has out after in: idx %d\n", i);
2197 				return -EINVAL;
2198 			}
2199 			*out_num += ret;
2200 		}
2201 	} while ((i = next_desc(vq, &desc)) != -1);
2202 	return 0;
2203 }
2204 
2205 /* This looks in the virtqueue and for the first available buffer, and converts
2206  * it to an iovec for convenient access.  Since descriptors consist of some
2207  * number of output then some number of input descriptors, it's actually two
2208  * iovecs, but we pack them into one and note how many of each there were.
2209  *
2210  * This function returns the descriptor number found, or vq->num (which is
2211  * never a valid descriptor number) if none was found.  A negative code is
2212  * returned on error. */
vhost_get_vq_desc(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num)2213 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2214 		      struct iovec iov[], unsigned int iov_size,
2215 		      unsigned int *out_num, unsigned int *in_num,
2216 		      struct vhost_log *log, unsigned int *log_num)
2217 {
2218 	struct vring_desc desc;
2219 	unsigned int i, head, found = 0;
2220 	u16 last_avail_idx;
2221 	__virtio16 avail_idx;
2222 	__virtio16 ring_head;
2223 	int ret, access;
2224 
2225 	/* Check it isn't doing very strange things with descriptor numbers. */
2226 	last_avail_idx = vq->last_avail_idx;
2227 
2228 	if (vq->avail_idx == vq->last_avail_idx) {
2229 		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2230 			vq_err(vq, "Failed to access avail idx at %p\n",
2231 				&vq->avail->idx);
2232 			return -EFAULT;
2233 		}
2234 		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2235 
2236 		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2237 			vq_err(vq, "Guest moved used index from %u to %u",
2238 				last_avail_idx, vq->avail_idx);
2239 			return -EFAULT;
2240 		}
2241 
2242 		/* If there's nothing new since last we looked, return
2243 		 * invalid.
2244 		 */
2245 		if (vq->avail_idx == last_avail_idx)
2246 			return vq->num;
2247 
2248 		/* Only get avail ring entries after they have been
2249 		 * exposed by guest.
2250 		 */
2251 		smp_rmb();
2252 	}
2253 
2254 	/* Grab the next descriptor number they're advertising, and increment
2255 	 * the index we've seen. */
2256 	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2257 		vq_err(vq, "Failed to read head: idx %d address %p\n",
2258 		       last_avail_idx,
2259 		       &vq->avail->ring[last_avail_idx % vq->num]);
2260 		return -EFAULT;
2261 	}
2262 
2263 	head = vhost16_to_cpu(vq, ring_head);
2264 
2265 	/* If their number is silly, that's an error. */
2266 	if (unlikely(head >= vq->num)) {
2267 		vq_err(vq, "Guest says index %u > %u is available",
2268 		       head, vq->num);
2269 		return -EINVAL;
2270 	}
2271 
2272 	/* When we start there are none of either input nor output. */
2273 	*out_num = *in_num = 0;
2274 	if (unlikely(log))
2275 		*log_num = 0;
2276 
2277 	i = head;
2278 	do {
2279 		unsigned iov_count = *in_num + *out_num;
2280 		if (unlikely(i >= vq->num)) {
2281 			vq_err(vq, "Desc index is %u > %u, head = %u",
2282 			       i, vq->num, head);
2283 			return -EINVAL;
2284 		}
2285 		if (unlikely(++found > vq->num)) {
2286 			vq_err(vq, "Loop detected: last one at %u "
2287 			       "vq size %u head %u\n",
2288 			       i, vq->num, head);
2289 			return -EINVAL;
2290 		}
2291 		ret = vhost_get_desc(vq, &desc, i);
2292 		if (unlikely(ret)) {
2293 			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2294 			       i, vq->desc + i);
2295 			return -EFAULT;
2296 		}
2297 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2298 			ret = get_indirect(vq, iov, iov_size,
2299 					   out_num, in_num,
2300 					   log, log_num, &desc);
2301 			if (unlikely(ret < 0)) {
2302 				if (ret != -EAGAIN)
2303 					vq_err(vq, "Failure detected "
2304 						"in indirect descriptor at idx %d\n", i);
2305 				return ret;
2306 			}
2307 			continue;
2308 		}
2309 
2310 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2311 			access = VHOST_ACCESS_WO;
2312 		else
2313 			access = VHOST_ACCESS_RO;
2314 		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2315 				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2316 				     iov_size - iov_count, access);
2317 		if (unlikely(ret < 0)) {
2318 			if (ret != -EAGAIN)
2319 				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2320 					ret, i);
2321 			return ret;
2322 		}
2323 		if (access == VHOST_ACCESS_WO) {
2324 			/* If this is an input descriptor,
2325 			 * increment that count. */
2326 			*in_num += ret;
2327 			if (unlikely(log && ret)) {
2328 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2329 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2330 				++*log_num;
2331 			}
2332 		} else {
2333 			/* If it's an output descriptor, they're all supposed
2334 			 * to come before any input descriptors. */
2335 			if (unlikely(*in_num)) {
2336 				vq_err(vq, "Descriptor has out after in: "
2337 				       "idx %d\n", i);
2338 				return -EINVAL;
2339 			}
2340 			*out_num += ret;
2341 		}
2342 	} while ((i = next_desc(vq, &desc)) != -1);
2343 
2344 	/* On success, increment avail index. */
2345 	vq->last_avail_idx++;
2346 
2347 	/* Assume notifications from guest are disabled at this point,
2348 	 * if they aren't we would need to update avail_event index. */
2349 	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2350 	return head;
2351 }
2352 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2353 
2354 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
vhost_discard_vq_desc(struct vhost_virtqueue * vq,int n)2355 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2356 {
2357 	vq->last_avail_idx -= n;
2358 }
2359 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2360 
2361 /* After we've used one of their buffers, we tell them about it.  We'll then
2362  * want to notify the guest, using eventfd. */
vhost_add_used(struct vhost_virtqueue * vq,unsigned int head,int len)2363 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2364 {
2365 	struct vring_used_elem heads = {
2366 		cpu_to_vhost32(vq, head),
2367 		cpu_to_vhost32(vq, len)
2368 	};
2369 
2370 	return vhost_add_used_n(vq, &heads, 1);
2371 }
2372 EXPORT_SYMBOL_GPL(vhost_add_used);
2373 
__vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2374 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2375 			    struct vring_used_elem *heads,
2376 			    unsigned count)
2377 {
2378 	vring_used_elem_t __user *used;
2379 	u16 old, new;
2380 	int start;
2381 
2382 	start = vq->last_used_idx & (vq->num - 1);
2383 	used = vq->used->ring + start;
2384 	if (vhost_put_used(vq, heads, start, count)) {
2385 		vq_err(vq, "Failed to write used");
2386 		return -EFAULT;
2387 	}
2388 	if (unlikely(vq->log_used)) {
2389 		/* Make sure data is seen before log. */
2390 		smp_wmb();
2391 		/* Log used ring entry write. */
2392 		log_used(vq, ((void __user *)used - (void __user *)vq->used),
2393 			 count * sizeof *used);
2394 	}
2395 	old = vq->last_used_idx;
2396 	new = (vq->last_used_idx += count);
2397 	/* If the driver never bothers to signal in a very long while,
2398 	 * used index might wrap around. If that happens, invalidate
2399 	 * signalled_used index we stored. TODO: make sure driver
2400 	 * signals at least once in 2^16 and remove this. */
2401 	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2402 		vq->signalled_used_valid = false;
2403 	return 0;
2404 }
2405 
2406 /* After we've used one of their buffers, we tell them about it.  We'll then
2407  * want to notify the guest, using eventfd. */
vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2408 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2409 		     unsigned count)
2410 {
2411 	int start, n, r;
2412 
2413 	start = vq->last_used_idx & (vq->num - 1);
2414 	n = vq->num - start;
2415 	if (n < count) {
2416 		r = __vhost_add_used_n(vq, heads, n);
2417 		if (r < 0)
2418 			return r;
2419 		heads += n;
2420 		count -= n;
2421 	}
2422 	r = __vhost_add_used_n(vq, heads, count);
2423 
2424 	/* Make sure buffer is written before we update index. */
2425 	smp_wmb();
2426 	if (vhost_put_used_idx(vq)) {
2427 		vq_err(vq, "Failed to increment used idx");
2428 		return -EFAULT;
2429 	}
2430 	if (unlikely(vq->log_used)) {
2431 		/* Make sure used idx is seen before log. */
2432 		smp_wmb();
2433 		/* Log used index update. */
2434 		log_used(vq, offsetof(struct vring_used, idx),
2435 			 sizeof vq->used->idx);
2436 		if (vq->log_ctx)
2437 			eventfd_signal(vq->log_ctx, 1);
2438 	}
2439 	return r;
2440 }
2441 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2442 
vhost_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2443 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2444 {
2445 	__u16 old, new;
2446 	__virtio16 event;
2447 	bool v;
2448 	/* Flush out used index updates. This is paired
2449 	 * with the barrier that the Guest executes when enabling
2450 	 * interrupts. */
2451 	smp_mb();
2452 
2453 	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2454 	    unlikely(vq->avail_idx == vq->last_avail_idx))
2455 		return true;
2456 
2457 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2458 		__virtio16 flags;
2459 		if (vhost_get_avail_flags(vq, &flags)) {
2460 			vq_err(vq, "Failed to get flags");
2461 			return true;
2462 		}
2463 		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2464 	}
2465 	old = vq->signalled_used;
2466 	v = vq->signalled_used_valid;
2467 	new = vq->signalled_used = vq->last_used_idx;
2468 	vq->signalled_used_valid = true;
2469 
2470 	if (unlikely(!v))
2471 		return true;
2472 
2473 	if (vhost_get_used_event(vq, &event)) {
2474 		vq_err(vq, "Failed to get used event idx");
2475 		return true;
2476 	}
2477 	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2478 }
2479 
2480 /* This actually signals the guest, using eventfd. */
vhost_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq)2481 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2482 {
2483 	/* Signal the Guest tell them we used something up. */
2484 	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2485 		eventfd_signal(vq->call_ctx.ctx, 1);
2486 }
2487 EXPORT_SYMBOL_GPL(vhost_signal);
2488 
2489 /* And here's the combo meal deal.  Supersize me! */
vhost_add_used_and_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq,unsigned int head,int len)2490 void vhost_add_used_and_signal(struct vhost_dev *dev,
2491 			       struct vhost_virtqueue *vq,
2492 			       unsigned int head, int len)
2493 {
2494 	vhost_add_used(vq, head, len);
2495 	vhost_signal(dev, vq);
2496 }
2497 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2498 
2499 /* multi-buffer version of vhost_add_used_and_signal */
vhost_add_used_and_signal_n(struct vhost_dev * dev,struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2500 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2501 				 struct vhost_virtqueue *vq,
2502 				 struct vring_used_elem *heads, unsigned count)
2503 {
2504 	vhost_add_used_n(vq, heads, count);
2505 	vhost_signal(dev, vq);
2506 }
2507 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2508 
2509 /* return true if we're sure that avaiable ring is empty */
vhost_vq_avail_empty(struct vhost_dev * dev,struct vhost_virtqueue * vq)2510 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2511 {
2512 	__virtio16 avail_idx;
2513 	int r;
2514 
2515 	if (vq->avail_idx != vq->last_avail_idx)
2516 		return false;
2517 
2518 	r = vhost_get_avail_idx(vq, &avail_idx);
2519 	if (unlikely(r))
2520 		return false;
2521 	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2522 
2523 	return vq->avail_idx == vq->last_avail_idx;
2524 }
2525 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2526 
2527 /* OK, now we need to know about added descriptors. */
vhost_enable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2528 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2529 {
2530 	__virtio16 avail_idx;
2531 	int r;
2532 
2533 	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2534 		return false;
2535 	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2536 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2537 		r = vhost_update_used_flags(vq);
2538 		if (r) {
2539 			vq_err(vq, "Failed to enable notification at %p: %d\n",
2540 			       &vq->used->flags, r);
2541 			return false;
2542 		}
2543 	} else {
2544 		r = vhost_update_avail_event(vq, vq->avail_idx);
2545 		if (r) {
2546 			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2547 			       vhost_avail_event(vq), r);
2548 			return false;
2549 		}
2550 	}
2551 	/* They could have slipped one in as we were doing that: make
2552 	 * sure it's written, then check again. */
2553 	smp_mb();
2554 	r = vhost_get_avail_idx(vq, &avail_idx);
2555 	if (r) {
2556 		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2557 		       &vq->avail->idx, r);
2558 		return false;
2559 	}
2560 
2561 	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2562 }
2563 EXPORT_SYMBOL_GPL(vhost_enable_notify);
2564 
2565 /* We don't need to be notified again. */
vhost_disable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2566 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2567 {
2568 	int r;
2569 
2570 	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2571 		return;
2572 	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2573 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2574 		r = vhost_update_used_flags(vq);
2575 		if (r)
2576 			vq_err(vq, "Failed to disable notification at %p: %d\n",
2577 			       &vq->used->flags, r);
2578 	}
2579 }
2580 EXPORT_SYMBOL_GPL(vhost_disable_notify);
2581 
2582 /* Create a new message. */
vhost_new_msg(struct vhost_virtqueue * vq,int type)2583 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2584 {
2585 	/* Make sure all padding within the structure is initialized. */
2586 	struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
2587 	if (!node)
2588 		return NULL;
2589 
2590 	node->vq = vq;
2591 	node->msg.type = type;
2592 	return node;
2593 }
2594 EXPORT_SYMBOL_GPL(vhost_new_msg);
2595 
vhost_enqueue_msg(struct vhost_dev * dev,struct list_head * head,struct vhost_msg_node * node)2596 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2597 		       struct vhost_msg_node *node)
2598 {
2599 	spin_lock(&dev->iotlb_lock);
2600 	list_add_tail(&node->node, head);
2601 	spin_unlock(&dev->iotlb_lock);
2602 
2603 	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2604 }
2605 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2606 
vhost_dequeue_msg(struct vhost_dev * dev,struct list_head * head)2607 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2608 					 struct list_head *head)
2609 {
2610 	struct vhost_msg_node *node = NULL;
2611 
2612 	spin_lock(&dev->iotlb_lock);
2613 	if (!list_empty(head)) {
2614 		node = list_first_entry(head, struct vhost_msg_node,
2615 					node);
2616 		list_del(&node->node);
2617 	}
2618 	spin_unlock(&dev->iotlb_lock);
2619 
2620 	return node;
2621 }
2622 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2623 
vhost_set_backend_features(struct vhost_dev * dev,u64 features)2624 void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2625 {
2626 	struct vhost_virtqueue *vq;
2627 	int i;
2628 
2629 	mutex_lock(&dev->mutex);
2630 	for (i = 0; i < dev->nvqs; ++i) {
2631 		vq = dev->vqs[i];
2632 		mutex_lock(&vq->mutex);
2633 		vq->acked_backend_features = features;
2634 		mutex_unlock(&vq->mutex);
2635 	}
2636 	mutex_unlock(&dev->mutex);
2637 }
2638 EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2639 
vhost_init(void)2640 static int __init vhost_init(void)
2641 {
2642 	return 0;
2643 }
2644 
vhost_exit(void)2645 static void __exit vhost_exit(void)
2646 {
2647 }
2648 
2649 module_init(vhost_init);
2650 module_exit(vhost_exit);
2651 
2652 MODULE_VERSION("0.0.1");
2653 MODULE_LICENSE("GPL v2");
2654 MODULE_AUTHOR("Michael S. Tsirkin");
2655 MODULE_DESCRIPTION("Host kernel accelerator for virtio");
2656