• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Copyright (C) 2006 Rusty Russell IBM Corporation
4  *
5  * Author: Michael S. Tsirkin <mst@redhat.com>
6  *
7  * Inspiration, some code, and most witty comments come from
8  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9  *
10  * Generic code for virtio server in host kernel.
11  */
12 
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
33 
34 #include "vhost.h"
35 
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39 	"Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43 	"Maximum number of iotlb entries. (default: 2048)");
44 
45 enum {
46 	VHOST_MEMORY_F_LOG = 0x1,
47 };
48 
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51 
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_disable_cross_endian(struct vhost_virtqueue * vq)53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
54 {
55 	vq->user_be = !virtio_legacy_is_little_endian();
56 }
57 
vhost_enable_cross_endian_big(struct vhost_virtqueue * vq)58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59 {
60 	vq->user_be = true;
61 }
62 
vhost_enable_cross_endian_little(struct vhost_virtqueue * vq)63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64 {
65 	vq->user_be = false;
66 }
67 
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69 {
70 	struct vhost_vring_state s;
71 
72 	if (vq->private_data)
73 		return -EBUSY;
74 
75 	if (copy_from_user(&s, argp, sizeof(s)))
76 		return -EFAULT;
77 
78 	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79 	    s.num != VHOST_VRING_BIG_ENDIAN)
80 		return -EINVAL;
81 
82 	if (s.num == VHOST_VRING_BIG_ENDIAN)
83 		vhost_enable_cross_endian_big(vq);
84 	else
85 		vhost_enable_cross_endian_little(vq);
86 
87 	return 0;
88 }
89 
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91 				   int __user *argp)
92 {
93 	struct vhost_vring_state s = {
94 		.index = idx,
95 		.num = vq->user_be
96 	};
97 
98 	if (copy_to_user(argp, &s, sizeof(s)))
99 		return -EFAULT;
100 
101 	return 0;
102 }
103 
vhost_init_is_le(struct vhost_virtqueue * vq)104 static void vhost_init_is_le(struct vhost_virtqueue *vq)
105 {
106 	/* Note for legacy virtio: user_be is initialized at reset time
107 	 * according to the host endianness. If userspace does not set an
108 	 * explicit endianness, the default behavior is native endian, as
109 	 * expected by legacy virtio.
110 	 */
111 	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112 }
113 #else
vhost_disable_cross_endian(struct vhost_virtqueue * vq)114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
115 {
116 }
117 
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119 {
120 	return -ENOIOCTLCMD;
121 }
122 
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124 				   int __user *argp)
125 {
126 	return -ENOIOCTLCMD;
127 }
128 
vhost_init_is_le(struct vhost_virtqueue * vq)129 static void vhost_init_is_le(struct vhost_virtqueue *vq)
130 {
131 	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132 		|| virtio_legacy_is_little_endian();
133 }
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135 
vhost_reset_is_le(struct vhost_virtqueue * vq)136 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137 {
138 	vhost_init_is_le(vq);
139 }
140 
141 struct vhost_flush_struct {
142 	struct vhost_work work;
143 	struct completion wait_event;
144 };
145 
vhost_flush_work(struct vhost_work * work)146 static void vhost_flush_work(struct vhost_work *work)
147 {
148 	struct vhost_flush_struct *s;
149 
150 	s = container_of(work, struct vhost_flush_struct, work);
151 	complete(&s->wait_event);
152 }
153 
vhost_poll_func(struct file * file,wait_queue_head_t * wqh,poll_table * pt)154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155 			    poll_table *pt)
156 {
157 	struct vhost_poll *poll;
158 
159 	poll = container_of(pt, struct vhost_poll, table);
160 	poll->wqh = wqh;
161 	add_wait_queue(wqh, &poll->wait);
162 }
163 
vhost_poll_wakeup(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)164 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
165 			     void *key)
166 {
167 	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
168 	struct vhost_work *work = &poll->work;
169 
170 	if (!(key_to_poll(key) & poll->mask))
171 		return 0;
172 
173 	if (!poll->dev->use_worker)
174 		work->fn(work);
175 	else
176 		vhost_poll_queue(poll);
177 
178 	return 0;
179 }
180 
vhost_work_init(struct vhost_work * work,vhost_work_fn_t fn)181 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
182 {
183 	clear_bit(VHOST_WORK_QUEUED, &work->flags);
184 	work->fn = fn;
185 }
186 EXPORT_SYMBOL_GPL(vhost_work_init);
187 
188 /* Init poll structure */
vhost_poll_init(struct vhost_poll * poll,vhost_work_fn_t fn,__poll_t mask,struct vhost_dev * dev)189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
190 		     __poll_t mask, struct vhost_dev *dev)
191 {
192 	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193 	init_poll_funcptr(&poll->table, vhost_poll_func);
194 	poll->mask = mask;
195 	poll->dev = dev;
196 	poll->wqh = NULL;
197 
198 	vhost_work_init(&poll->work, fn);
199 }
200 EXPORT_SYMBOL_GPL(vhost_poll_init);
201 
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203  * keep a reference to a file until after vhost_poll_stop is called. */
vhost_poll_start(struct vhost_poll * poll,struct file * file)204 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
205 {
206 	__poll_t mask;
207 
208 	if (poll->wqh)
209 		return 0;
210 
211 	mask = vfs_poll(file, &poll->table);
212 	if (mask)
213 		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214 	if (mask & EPOLLERR) {
215 		vhost_poll_stop(poll);
216 		return -EINVAL;
217 	}
218 
219 	return 0;
220 }
221 EXPORT_SYMBOL_GPL(vhost_poll_start);
222 
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224  * file reference. You must also flush afterwards. */
vhost_poll_stop(struct vhost_poll * poll)225 void vhost_poll_stop(struct vhost_poll *poll)
226 {
227 	if (poll->wqh) {
228 		remove_wait_queue(poll->wqh, &poll->wait);
229 		poll->wqh = NULL;
230 	}
231 }
232 EXPORT_SYMBOL_GPL(vhost_poll_stop);
233 
vhost_work_flush(struct vhost_dev * dev,struct vhost_work * work)234 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
235 {
236 	struct vhost_flush_struct flush;
237 
238 	if (dev->worker) {
239 		init_completion(&flush.wait_event);
240 		vhost_work_init(&flush.work, vhost_flush_work);
241 
242 		vhost_work_queue(dev, &flush.work);
243 		wait_for_completion(&flush.wait_event);
244 	}
245 }
246 EXPORT_SYMBOL_GPL(vhost_work_flush);
247 
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249  * locks that are also used by the callback. */
vhost_poll_flush(struct vhost_poll * poll)250 void vhost_poll_flush(struct vhost_poll *poll)
251 {
252 	vhost_work_flush(poll->dev, &poll->work);
253 }
254 EXPORT_SYMBOL_GPL(vhost_poll_flush);
255 
vhost_work_queue(struct vhost_dev * dev,struct vhost_work * work)256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
257 {
258 	if (!dev->worker)
259 		return;
260 
261 	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262 		/* We can only add the work to the list after we're
263 		 * sure it was not in the list.
264 		 * test_and_set_bit() implies a memory barrier.
265 		 */
266 		llist_add(&work->node, &dev->work_list);
267 		wake_up_process(dev->worker);
268 	}
269 }
270 EXPORT_SYMBOL_GPL(vhost_work_queue);
271 
272 /* A lockless hint for busy polling code to exit the loop */
vhost_has_work(struct vhost_dev * dev)273 bool vhost_has_work(struct vhost_dev *dev)
274 {
275 	return !llist_empty(&dev->work_list);
276 }
277 EXPORT_SYMBOL_GPL(vhost_has_work);
278 
vhost_poll_queue(struct vhost_poll * poll)279 void vhost_poll_queue(struct vhost_poll *poll)
280 {
281 	vhost_work_queue(poll->dev, &poll->work);
282 }
283 EXPORT_SYMBOL_GPL(vhost_poll_queue);
284 
__vhost_vq_meta_reset(struct vhost_virtqueue * vq)285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286 {
287 	int j;
288 
289 	for (j = 0; j < VHOST_NUM_ADDRS; j++)
290 		vq->meta_iotlb[j] = NULL;
291 }
292 
vhost_vq_meta_reset(struct vhost_dev * d)293 static void vhost_vq_meta_reset(struct vhost_dev *d)
294 {
295 	int i;
296 
297 	for (i = 0; i < d->nvqs; ++i)
298 		__vhost_vq_meta_reset(d->vqs[i]);
299 }
300 
vhost_vring_call_reset(struct vhost_vring_call * call_ctx)301 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302 {
303 	call_ctx->ctx = NULL;
304 	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
305 }
306 
vhost_vq_is_setup(struct vhost_virtqueue * vq)307 bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
308 {
309 	return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
310 }
311 EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
312 
vhost_vq_reset(struct vhost_dev * dev,struct vhost_virtqueue * vq)313 static void vhost_vq_reset(struct vhost_dev *dev,
314 			   struct vhost_virtqueue *vq)
315 {
316 	vq->num = 1;
317 	vq->desc = NULL;
318 	vq->avail = NULL;
319 	vq->used = NULL;
320 	vq->last_avail_idx = 0;
321 	vq->avail_idx = 0;
322 	vq->last_used_idx = 0;
323 	vq->signalled_used = 0;
324 	vq->signalled_used_valid = false;
325 	vq->used_flags = 0;
326 	vq->log_used = false;
327 	vq->log_addr = -1ull;
328 	vq->private_data = NULL;
329 	vq->acked_features = 0;
330 	vq->acked_backend_features = 0;
331 	vq->log_base = NULL;
332 	vq->error_ctx = NULL;
333 	vq->kick = NULL;
334 	vq->log_ctx = NULL;
335 	vhost_disable_cross_endian(vq);
336 	vhost_reset_is_le(vq);
337 	vq->busyloop_timeout = 0;
338 	vq->umem = NULL;
339 	vq->iotlb = NULL;
340 	vhost_vring_call_reset(&vq->call_ctx);
341 	__vhost_vq_meta_reset(vq);
342 }
343 
vhost_worker(void * data)344 static int vhost_worker(void *data)
345 {
346 	struct vhost_dev *dev = data;
347 	struct vhost_work *work, *work_next;
348 	struct llist_node *node;
349 
350 	kthread_use_mm(dev->mm);
351 
352 	for (;;) {
353 		/* mb paired w/ kthread_stop */
354 		set_current_state(TASK_INTERRUPTIBLE);
355 
356 		if (kthread_should_stop()) {
357 			__set_current_state(TASK_RUNNING);
358 			break;
359 		}
360 
361 		node = llist_del_all(&dev->work_list);
362 		if (!node)
363 			schedule();
364 
365 		node = llist_reverse_order(node);
366 		/* make sure flag is seen after deletion */
367 		smp_wmb();
368 		llist_for_each_entry_safe(work, work_next, node, node) {
369 			clear_bit(VHOST_WORK_QUEUED, &work->flags);
370 			__set_current_state(TASK_RUNNING);
371 			kcov_remote_start_common(dev->kcov_handle);
372 			work->fn(work);
373 			kcov_remote_stop();
374 			if (need_resched())
375 				schedule();
376 		}
377 	}
378 	kthread_unuse_mm(dev->mm);
379 	return 0;
380 }
381 
vhost_vq_free_iovecs(struct vhost_virtqueue * vq)382 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
383 {
384 	kfree(vq->indirect);
385 	vq->indirect = NULL;
386 	kfree(vq->log);
387 	vq->log = NULL;
388 	kfree(vq->heads);
389 	vq->heads = NULL;
390 }
391 
392 /* Helper to allocate iovec buffers for all vqs. */
vhost_dev_alloc_iovecs(struct vhost_dev * dev)393 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
394 {
395 	struct vhost_virtqueue *vq;
396 	int i;
397 
398 	for (i = 0; i < dev->nvqs; ++i) {
399 		vq = dev->vqs[i];
400 		vq->indirect = kmalloc_array(UIO_MAXIOV,
401 					     sizeof(*vq->indirect),
402 					     GFP_KERNEL);
403 		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
404 					GFP_KERNEL);
405 		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
406 					  GFP_KERNEL);
407 		if (!vq->indirect || !vq->log || !vq->heads)
408 			goto err_nomem;
409 	}
410 	return 0;
411 
412 err_nomem:
413 	for (; i >= 0; --i)
414 		vhost_vq_free_iovecs(dev->vqs[i]);
415 	return -ENOMEM;
416 }
417 
vhost_dev_free_iovecs(struct vhost_dev * dev)418 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
419 {
420 	int i;
421 
422 	for (i = 0; i < dev->nvqs; ++i)
423 		vhost_vq_free_iovecs(dev->vqs[i]);
424 }
425 
vhost_exceeds_weight(struct vhost_virtqueue * vq,int pkts,int total_len)426 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
427 			  int pkts, int total_len)
428 {
429 	struct vhost_dev *dev = vq->dev;
430 
431 	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
432 	    pkts >= dev->weight) {
433 		vhost_poll_queue(&vq->poll);
434 		return true;
435 	}
436 
437 	return false;
438 }
439 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
440 
vhost_get_avail_size(struct vhost_virtqueue * vq,unsigned int num)441 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
442 				   unsigned int num)
443 {
444 	size_t event __maybe_unused =
445 	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
446 
447 	return sizeof(*vq->avail) +
448 	       sizeof(*vq->avail->ring) * num + event;
449 }
450 
vhost_get_used_size(struct vhost_virtqueue * vq,unsigned int num)451 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
452 				  unsigned int num)
453 {
454 	size_t event __maybe_unused =
455 	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
456 
457 	return sizeof(*vq->used) +
458 	       sizeof(*vq->used->ring) * num + event;
459 }
460 
vhost_get_desc_size(struct vhost_virtqueue * vq,unsigned int num)461 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
462 				  unsigned int num)
463 {
464 	return sizeof(*vq->desc) * num;
465 }
466 
vhost_dev_init(struct vhost_dev * dev,struct vhost_virtqueue ** vqs,int nvqs,int iov_limit,int weight,int byte_weight,bool use_worker,int (* msg_handler)(struct vhost_dev * dev,struct vhost_iotlb_msg * msg))467 void vhost_dev_init(struct vhost_dev *dev,
468 		    struct vhost_virtqueue **vqs, int nvqs,
469 		    int iov_limit, int weight, int byte_weight,
470 		    bool use_worker,
471 		    int (*msg_handler)(struct vhost_dev *dev,
472 				       struct vhost_iotlb_msg *msg))
473 {
474 	struct vhost_virtqueue *vq;
475 	int i;
476 
477 	dev->vqs = vqs;
478 	dev->nvqs = nvqs;
479 	mutex_init(&dev->mutex);
480 	dev->log_ctx = NULL;
481 	dev->umem = NULL;
482 	dev->iotlb = NULL;
483 	dev->mm = NULL;
484 	dev->worker = NULL;
485 	dev->iov_limit = iov_limit;
486 	dev->weight = weight;
487 	dev->byte_weight = byte_weight;
488 	dev->use_worker = use_worker;
489 	dev->msg_handler = msg_handler;
490 	init_llist_head(&dev->work_list);
491 	init_waitqueue_head(&dev->wait);
492 	INIT_LIST_HEAD(&dev->read_list);
493 	INIT_LIST_HEAD(&dev->pending_list);
494 	spin_lock_init(&dev->iotlb_lock);
495 
496 
497 	for (i = 0; i < dev->nvqs; ++i) {
498 		vq = dev->vqs[i];
499 		vq->log = NULL;
500 		vq->indirect = NULL;
501 		vq->heads = NULL;
502 		vq->dev = dev;
503 		mutex_init(&vq->mutex);
504 		vhost_vq_reset(dev, vq);
505 		if (vq->handle_kick)
506 			vhost_poll_init(&vq->poll, vq->handle_kick,
507 					EPOLLIN, dev);
508 	}
509 }
510 EXPORT_SYMBOL_GPL(vhost_dev_init);
511 
512 /* Caller should have device mutex */
vhost_dev_check_owner(struct vhost_dev * dev)513 long vhost_dev_check_owner(struct vhost_dev *dev)
514 {
515 	/* Are you the owner? If not, I don't think you mean to do that */
516 	return dev->mm == current->mm ? 0 : -EPERM;
517 }
518 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
519 
520 struct vhost_attach_cgroups_struct {
521 	struct vhost_work work;
522 	struct task_struct *owner;
523 	int ret;
524 };
525 
vhost_attach_cgroups_work(struct vhost_work * work)526 static void vhost_attach_cgroups_work(struct vhost_work *work)
527 {
528 	struct vhost_attach_cgroups_struct *s;
529 
530 	s = container_of(work, struct vhost_attach_cgroups_struct, work);
531 	s->ret = cgroup_attach_task_all(s->owner, current);
532 }
533 
vhost_attach_cgroups(struct vhost_dev * dev)534 static int vhost_attach_cgroups(struct vhost_dev *dev)
535 {
536 	struct vhost_attach_cgroups_struct attach;
537 
538 	attach.owner = current;
539 	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
540 	vhost_work_queue(dev, &attach.work);
541 	vhost_work_flush(dev, &attach.work);
542 	return attach.ret;
543 }
544 
545 /* Caller should have device mutex */
vhost_dev_has_owner(struct vhost_dev * dev)546 bool vhost_dev_has_owner(struct vhost_dev *dev)
547 {
548 	return dev->mm;
549 }
550 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
551 
vhost_attach_mm(struct vhost_dev * dev)552 static void vhost_attach_mm(struct vhost_dev *dev)
553 {
554 	/* No owner, become one */
555 	if (dev->use_worker) {
556 		dev->mm = get_task_mm(current);
557 	} else {
558 		/* vDPA device does not use worker thead, so there's
559 		 * no need to hold the address space for mm. This help
560 		 * to avoid deadlock in the case of mmap() which may
561 		 * held the refcnt of the file and depends on release
562 		 * method to remove vma.
563 		 */
564 		dev->mm = current->mm;
565 		mmgrab(dev->mm);
566 	}
567 }
568 
vhost_detach_mm(struct vhost_dev * dev)569 static void vhost_detach_mm(struct vhost_dev *dev)
570 {
571 	if (!dev->mm)
572 		return;
573 
574 	if (dev->use_worker)
575 		mmput(dev->mm);
576 	else
577 		mmdrop(dev->mm);
578 
579 	dev->mm = NULL;
580 }
581 
582 /* Caller should have device mutex */
vhost_dev_set_owner(struct vhost_dev * dev)583 long vhost_dev_set_owner(struct vhost_dev *dev)
584 {
585 	struct task_struct *worker;
586 	int err;
587 
588 	/* Is there an owner already? */
589 	if (vhost_dev_has_owner(dev)) {
590 		err = -EBUSY;
591 		goto err_mm;
592 	}
593 
594 	vhost_attach_mm(dev);
595 
596 	dev->kcov_handle = kcov_common_handle();
597 	if (dev->use_worker) {
598 		worker = kthread_create(vhost_worker, dev,
599 					"vhost-%d", current->pid);
600 		if (IS_ERR(worker)) {
601 			err = PTR_ERR(worker);
602 			goto err_worker;
603 		}
604 
605 		dev->worker = worker;
606 		wake_up_process(worker); /* avoid contributing to loadavg */
607 
608 		err = vhost_attach_cgroups(dev);
609 		if (err)
610 			goto err_cgroup;
611 	}
612 
613 	err = vhost_dev_alloc_iovecs(dev);
614 	if (err)
615 		goto err_cgroup;
616 
617 	return 0;
618 err_cgroup:
619 	if (dev->worker) {
620 		kthread_stop(dev->worker);
621 		dev->worker = NULL;
622 	}
623 err_worker:
624 	vhost_detach_mm(dev);
625 	dev->kcov_handle = 0;
626 err_mm:
627 	return err;
628 }
629 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
630 
iotlb_alloc(void)631 static struct vhost_iotlb *iotlb_alloc(void)
632 {
633 	return vhost_iotlb_alloc(max_iotlb_entries,
634 				 VHOST_IOTLB_FLAG_RETIRE);
635 }
636 
vhost_dev_reset_owner_prepare(void)637 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
638 {
639 	return iotlb_alloc();
640 }
641 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
642 
643 /* Caller should have device mutex */
vhost_dev_reset_owner(struct vhost_dev * dev,struct vhost_iotlb * umem)644 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
645 {
646 	int i;
647 
648 	vhost_dev_cleanup(dev);
649 
650 	dev->umem = umem;
651 	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
652 	 * VQs aren't running.
653 	 */
654 	for (i = 0; i < dev->nvqs; ++i)
655 		dev->vqs[i]->umem = umem;
656 }
657 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
658 
vhost_dev_stop(struct vhost_dev * dev)659 void vhost_dev_stop(struct vhost_dev *dev)
660 {
661 	int i;
662 
663 	for (i = 0; i < dev->nvqs; ++i) {
664 		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
665 			vhost_poll_stop(&dev->vqs[i]->poll);
666 			vhost_poll_flush(&dev->vqs[i]->poll);
667 		}
668 	}
669 }
670 EXPORT_SYMBOL_GPL(vhost_dev_stop);
671 
vhost_clear_msg(struct vhost_dev * dev)672 void vhost_clear_msg(struct vhost_dev *dev)
673 {
674 	struct vhost_msg_node *node, *n;
675 
676 	spin_lock(&dev->iotlb_lock);
677 
678 	list_for_each_entry_safe(node, n, &dev->read_list, node) {
679 		list_del(&node->node);
680 		kfree(node);
681 	}
682 
683 	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
684 		list_del(&node->node);
685 		kfree(node);
686 	}
687 
688 	spin_unlock(&dev->iotlb_lock);
689 }
690 EXPORT_SYMBOL_GPL(vhost_clear_msg);
691 
vhost_dev_cleanup(struct vhost_dev * dev)692 void vhost_dev_cleanup(struct vhost_dev *dev)
693 {
694 	int i;
695 
696 	for (i = 0; i < dev->nvqs; ++i) {
697 		if (dev->vqs[i]->error_ctx)
698 			eventfd_ctx_put(dev->vqs[i]->error_ctx);
699 		if (dev->vqs[i]->kick)
700 			fput(dev->vqs[i]->kick);
701 		if (dev->vqs[i]->call_ctx.ctx)
702 			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
703 		vhost_vq_reset(dev, dev->vqs[i]);
704 	}
705 	vhost_dev_free_iovecs(dev);
706 	if (dev->log_ctx)
707 		eventfd_ctx_put(dev->log_ctx);
708 	dev->log_ctx = NULL;
709 	/* No one will access memory at this point */
710 	vhost_iotlb_free(dev->umem);
711 	dev->umem = NULL;
712 	vhost_iotlb_free(dev->iotlb);
713 	dev->iotlb = NULL;
714 	vhost_clear_msg(dev);
715 	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
716 	WARN_ON(!llist_empty(&dev->work_list));
717 	if (dev->worker) {
718 		kthread_stop(dev->worker);
719 		dev->worker = NULL;
720 		dev->kcov_handle = 0;
721 	}
722 	vhost_detach_mm(dev);
723 }
724 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
725 
log_access_ok(void __user * log_base,u64 addr,unsigned long sz)726 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
727 {
728 	u64 a = addr / VHOST_PAGE_SIZE / 8;
729 
730 	/* Make sure 64 bit math will not overflow. */
731 	if (a > ULONG_MAX - (unsigned long)log_base ||
732 	    a + (unsigned long)log_base > ULONG_MAX)
733 		return false;
734 
735 	return access_ok(log_base + a,
736 			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
737 }
738 
739 /* Make sure 64 bit math will not overflow. */
vhost_overflow(u64 uaddr,u64 size)740 static bool vhost_overflow(u64 uaddr, u64 size)
741 {
742 	if (uaddr > ULONG_MAX || size > ULONG_MAX)
743 		return true;
744 
745 	if (!size)
746 		return false;
747 
748 	return uaddr > ULONG_MAX - size + 1;
749 }
750 
751 /* Caller should have vq mutex and device mutex. */
vq_memory_access_ok(void __user * log_base,struct vhost_iotlb * umem,int log_all)752 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
753 				int log_all)
754 {
755 	struct vhost_iotlb_map *map;
756 
757 	if (!umem)
758 		return false;
759 
760 	list_for_each_entry(map, &umem->list, link) {
761 		unsigned long a = map->addr;
762 
763 		if (vhost_overflow(map->addr, map->size))
764 			return false;
765 
766 
767 		if (!access_ok((void __user *)a, map->size))
768 			return false;
769 		else if (log_all && !log_access_ok(log_base,
770 						   map->start,
771 						   map->size))
772 			return false;
773 	}
774 	return true;
775 }
776 
vhost_vq_meta_fetch(struct vhost_virtqueue * vq,u64 addr,unsigned int size,int type)777 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
778 					       u64 addr, unsigned int size,
779 					       int type)
780 {
781 	const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
782 
783 	if (!map)
784 		return NULL;
785 
786 	return (void __user *)(uintptr_t)(map->addr + addr - map->start);
787 }
788 
789 /* Can we switch to this memory table? */
790 /* Caller should have device mutex but not vq mutex */
memory_access_ok(struct vhost_dev * d,struct vhost_iotlb * umem,int log_all)791 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
792 			     int log_all)
793 {
794 	int i;
795 
796 	for (i = 0; i < d->nvqs; ++i) {
797 		bool ok;
798 		bool log;
799 
800 		mutex_lock(&d->vqs[i]->mutex);
801 		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
802 		/* If ring is inactive, will check when it's enabled. */
803 		if (d->vqs[i]->private_data)
804 			ok = vq_memory_access_ok(d->vqs[i]->log_base,
805 						 umem, log);
806 		else
807 			ok = true;
808 		mutex_unlock(&d->vqs[i]->mutex);
809 		if (!ok)
810 			return false;
811 	}
812 	return true;
813 }
814 
815 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
816 			  struct iovec iov[], int iov_size, int access);
817 
vhost_copy_to_user(struct vhost_virtqueue * vq,void __user * to,const void * from,unsigned size)818 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
819 			      const void *from, unsigned size)
820 {
821 	int ret;
822 
823 	if (!vq->iotlb)
824 		return __copy_to_user(to, from, size);
825 	else {
826 		/* This function should be called after iotlb
827 		 * prefetch, which means we're sure that all vq
828 		 * could be access through iotlb. So -EAGAIN should
829 		 * not happen in this case.
830 		 */
831 		struct iov_iter t;
832 		void __user *uaddr = vhost_vq_meta_fetch(vq,
833 				     (u64)(uintptr_t)to, size,
834 				     VHOST_ADDR_USED);
835 
836 		if (uaddr)
837 			return __copy_to_user(uaddr, from, size);
838 
839 		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
840 				     ARRAY_SIZE(vq->iotlb_iov),
841 				     VHOST_ACCESS_WO);
842 		if (ret < 0)
843 			goto out;
844 		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
845 		ret = copy_to_iter(from, size, &t);
846 		if (ret == size)
847 			ret = 0;
848 	}
849 out:
850 	return ret;
851 }
852 
vhost_copy_from_user(struct vhost_virtqueue * vq,void * to,void __user * from,unsigned size)853 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
854 				void __user *from, unsigned size)
855 {
856 	int ret;
857 
858 	if (!vq->iotlb)
859 		return __copy_from_user(to, from, size);
860 	else {
861 		/* This function should be called after iotlb
862 		 * prefetch, which means we're sure that vq
863 		 * could be access through iotlb. So -EAGAIN should
864 		 * not happen in this case.
865 		 */
866 		void __user *uaddr = vhost_vq_meta_fetch(vq,
867 				     (u64)(uintptr_t)from, size,
868 				     VHOST_ADDR_DESC);
869 		struct iov_iter f;
870 
871 		if (uaddr)
872 			return __copy_from_user(to, uaddr, size);
873 
874 		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
875 				     ARRAY_SIZE(vq->iotlb_iov),
876 				     VHOST_ACCESS_RO);
877 		if (ret < 0) {
878 			vq_err(vq, "IOTLB translation failure: uaddr "
879 			       "%p size 0x%llx\n", from,
880 			       (unsigned long long) size);
881 			goto out;
882 		}
883 		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
884 		ret = copy_from_iter(to, size, &f);
885 		if (ret == size)
886 			ret = 0;
887 	}
888 
889 out:
890 	return ret;
891 }
892 
__vhost_get_user_slow(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)893 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
894 					  void __user *addr, unsigned int size,
895 					  int type)
896 {
897 	int ret;
898 
899 	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
900 			     ARRAY_SIZE(vq->iotlb_iov),
901 			     VHOST_ACCESS_RO);
902 	if (ret < 0) {
903 		vq_err(vq, "IOTLB translation failure: uaddr "
904 			"%p size 0x%llx\n", addr,
905 			(unsigned long long) size);
906 		return NULL;
907 	}
908 
909 	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
910 		vq_err(vq, "Non atomic userspace memory access: uaddr "
911 			"%p size 0x%llx\n", addr,
912 			(unsigned long long) size);
913 		return NULL;
914 	}
915 
916 	return vq->iotlb_iov[0].iov_base;
917 }
918 
919 /* This function should be called after iotlb
920  * prefetch, which means we're sure that vq
921  * could be access through iotlb. So -EAGAIN should
922  * not happen in this case.
923  */
__vhost_get_user(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)924 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
925 					    void __user *addr, unsigned int size,
926 					    int type)
927 {
928 	void __user *uaddr = vhost_vq_meta_fetch(vq,
929 			     (u64)(uintptr_t)addr, size, type);
930 	if (uaddr)
931 		return uaddr;
932 
933 	return __vhost_get_user_slow(vq, addr, size, type);
934 }
935 
936 #define vhost_put_user(vq, x, ptr)		\
937 ({ \
938 	int ret; \
939 	if (!vq->iotlb) { \
940 		ret = __put_user(x, ptr); \
941 	} else { \
942 		__typeof__(ptr) to = \
943 			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
944 					  sizeof(*ptr), VHOST_ADDR_USED); \
945 		if (to != NULL) \
946 			ret = __put_user(x, to); \
947 		else \
948 			ret = -EFAULT;	\
949 	} \
950 	ret; \
951 })
952 
vhost_put_avail_event(struct vhost_virtqueue * vq)953 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
954 {
955 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
956 			      vhost_avail_event(vq));
957 }
958 
vhost_put_used(struct vhost_virtqueue * vq,struct vring_used_elem * head,int idx,int count)959 static inline int vhost_put_used(struct vhost_virtqueue *vq,
960 				 struct vring_used_elem *head, int idx,
961 				 int count)
962 {
963 	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
964 				  count * sizeof(*head));
965 }
966 
vhost_put_used_flags(struct vhost_virtqueue * vq)967 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
968 
969 {
970 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
971 			      &vq->used->flags);
972 }
973 
vhost_put_used_idx(struct vhost_virtqueue * vq)974 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
975 
976 {
977 	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
978 			      &vq->used->idx);
979 }
980 
981 #define vhost_get_user(vq, x, ptr, type)		\
982 ({ \
983 	int ret; \
984 	if (!vq->iotlb) { \
985 		ret = __get_user(x, ptr); \
986 	} else { \
987 		__typeof__(ptr) from = \
988 			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
989 							   sizeof(*ptr), \
990 							   type); \
991 		if (from != NULL) \
992 			ret = __get_user(x, from); \
993 		else \
994 			ret = -EFAULT; \
995 	} \
996 	ret; \
997 })
998 
999 #define vhost_get_avail(vq, x, ptr) \
1000 	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1001 
1002 #define vhost_get_used(vq, x, ptr) \
1003 	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1004 
vhost_dev_lock_vqs(struct vhost_dev * d)1005 static void vhost_dev_lock_vqs(struct vhost_dev *d)
1006 {
1007 	int i = 0;
1008 	for (i = 0; i < d->nvqs; ++i)
1009 		mutex_lock_nested(&d->vqs[i]->mutex, i);
1010 }
1011 
vhost_dev_unlock_vqs(struct vhost_dev * d)1012 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1013 {
1014 	int i = 0;
1015 	for (i = 0; i < d->nvqs; ++i)
1016 		mutex_unlock(&d->vqs[i]->mutex);
1017 }
1018 
vhost_get_avail_idx(struct vhost_virtqueue * vq,__virtio16 * idx)1019 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1020 				      __virtio16 *idx)
1021 {
1022 	return vhost_get_avail(vq, *idx, &vq->avail->idx);
1023 }
1024 
vhost_get_avail_head(struct vhost_virtqueue * vq,__virtio16 * head,int idx)1025 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1026 				       __virtio16 *head, int idx)
1027 {
1028 	return vhost_get_avail(vq, *head,
1029 			       &vq->avail->ring[idx & (vq->num - 1)]);
1030 }
1031 
vhost_get_avail_flags(struct vhost_virtqueue * vq,__virtio16 * flags)1032 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1033 					__virtio16 *flags)
1034 {
1035 	return vhost_get_avail(vq, *flags, &vq->avail->flags);
1036 }
1037 
vhost_get_used_event(struct vhost_virtqueue * vq,__virtio16 * event)1038 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1039 				       __virtio16 *event)
1040 {
1041 	return vhost_get_avail(vq, *event, vhost_used_event(vq));
1042 }
1043 
vhost_get_used_idx(struct vhost_virtqueue * vq,__virtio16 * idx)1044 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1045 				     __virtio16 *idx)
1046 {
1047 	return vhost_get_used(vq, *idx, &vq->used->idx);
1048 }
1049 
vhost_get_desc(struct vhost_virtqueue * vq,struct vring_desc * desc,int idx)1050 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1051 				 struct vring_desc *desc, int idx)
1052 {
1053 	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1054 }
1055 
vhost_iotlb_notify_vq(struct vhost_dev * d,struct vhost_iotlb_msg * msg)1056 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1057 				  struct vhost_iotlb_msg *msg)
1058 {
1059 	struct vhost_msg_node *node, *n;
1060 
1061 	spin_lock(&d->iotlb_lock);
1062 
1063 	list_for_each_entry_safe(node, n, &d->pending_list, node) {
1064 		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1065 		if (msg->iova <= vq_msg->iova &&
1066 		    msg->iova + msg->size - 1 >= vq_msg->iova &&
1067 		    vq_msg->type == VHOST_IOTLB_MISS) {
1068 			vhost_poll_queue(&node->vq->poll);
1069 			list_del(&node->node);
1070 			kfree(node);
1071 		}
1072 	}
1073 
1074 	spin_unlock(&d->iotlb_lock);
1075 }
1076 
umem_access_ok(u64 uaddr,u64 size,int access)1077 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1078 {
1079 	unsigned long a = uaddr;
1080 
1081 	/* Make sure 64 bit math will not overflow. */
1082 	if (vhost_overflow(uaddr, size))
1083 		return false;
1084 
1085 	if ((access & VHOST_ACCESS_RO) &&
1086 	    !access_ok((void __user *)a, size))
1087 		return false;
1088 	if ((access & VHOST_ACCESS_WO) &&
1089 	    !access_ok((void __user *)a, size))
1090 		return false;
1091 	return true;
1092 }
1093 
vhost_process_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * msg)1094 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1095 				   struct vhost_iotlb_msg *msg)
1096 {
1097 	int ret = 0;
1098 
1099 	mutex_lock(&dev->mutex);
1100 	vhost_dev_lock_vqs(dev);
1101 	switch (msg->type) {
1102 	case VHOST_IOTLB_UPDATE:
1103 		if (!dev->iotlb) {
1104 			ret = -EFAULT;
1105 			break;
1106 		}
1107 		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1108 			ret = -EFAULT;
1109 			break;
1110 		}
1111 		vhost_vq_meta_reset(dev);
1112 		if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1113 					  msg->iova + msg->size - 1,
1114 					  msg->uaddr, msg->perm)) {
1115 			ret = -ENOMEM;
1116 			break;
1117 		}
1118 		vhost_iotlb_notify_vq(dev, msg);
1119 		break;
1120 	case VHOST_IOTLB_INVALIDATE:
1121 		if (!dev->iotlb) {
1122 			ret = -EFAULT;
1123 			break;
1124 		}
1125 		vhost_vq_meta_reset(dev);
1126 		vhost_iotlb_del_range(dev->iotlb, msg->iova,
1127 				      msg->iova + msg->size - 1);
1128 		break;
1129 	default:
1130 		ret = -EINVAL;
1131 		break;
1132 	}
1133 
1134 	vhost_dev_unlock_vqs(dev);
1135 	mutex_unlock(&dev->mutex);
1136 
1137 	return ret;
1138 }
vhost_chr_write_iter(struct vhost_dev * dev,struct iov_iter * from)1139 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1140 			     struct iov_iter *from)
1141 {
1142 	struct vhost_iotlb_msg msg;
1143 	size_t offset;
1144 	int type, ret;
1145 
1146 	ret = copy_from_iter(&type, sizeof(type), from);
1147 	if (ret != sizeof(type)) {
1148 		ret = -EINVAL;
1149 		goto done;
1150 	}
1151 
1152 	switch (type) {
1153 	case VHOST_IOTLB_MSG:
1154 		/* There maybe a hole after type for V1 message type,
1155 		 * so skip it here.
1156 		 */
1157 		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1158 		break;
1159 	case VHOST_IOTLB_MSG_V2:
1160 		offset = sizeof(__u32);
1161 		break;
1162 	default:
1163 		ret = -EINVAL;
1164 		goto done;
1165 	}
1166 
1167 	iov_iter_advance(from, offset);
1168 	ret = copy_from_iter(&msg, sizeof(msg), from);
1169 	if (ret != sizeof(msg)) {
1170 		ret = -EINVAL;
1171 		goto done;
1172 	}
1173 
1174 	if (dev->msg_handler)
1175 		ret = dev->msg_handler(dev, &msg);
1176 	else
1177 		ret = vhost_process_iotlb_msg(dev, &msg);
1178 	if (ret) {
1179 		ret = -EFAULT;
1180 		goto done;
1181 	}
1182 
1183 	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1184 	      sizeof(struct vhost_msg_v2);
1185 done:
1186 	return ret;
1187 }
1188 EXPORT_SYMBOL(vhost_chr_write_iter);
1189 
vhost_chr_poll(struct file * file,struct vhost_dev * dev,poll_table * wait)1190 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1191 			    poll_table *wait)
1192 {
1193 	__poll_t mask = 0;
1194 
1195 	poll_wait(file, &dev->wait, wait);
1196 
1197 	if (!list_empty(&dev->read_list))
1198 		mask |= EPOLLIN | EPOLLRDNORM;
1199 
1200 	return mask;
1201 }
1202 EXPORT_SYMBOL(vhost_chr_poll);
1203 
vhost_chr_read_iter(struct vhost_dev * dev,struct iov_iter * to,int noblock)1204 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1205 			    int noblock)
1206 {
1207 	DEFINE_WAIT(wait);
1208 	struct vhost_msg_node *node;
1209 	ssize_t ret = 0;
1210 	unsigned size = sizeof(struct vhost_msg);
1211 
1212 	if (iov_iter_count(to) < size)
1213 		return 0;
1214 
1215 	while (1) {
1216 		if (!noblock)
1217 			prepare_to_wait(&dev->wait, &wait,
1218 					TASK_INTERRUPTIBLE);
1219 
1220 		node = vhost_dequeue_msg(dev, &dev->read_list);
1221 		if (node)
1222 			break;
1223 		if (noblock) {
1224 			ret = -EAGAIN;
1225 			break;
1226 		}
1227 		if (signal_pending(current)) {
1228 			ret = -ERESTARTSYS;
1229 			break;
1230 		}
1231 		if (!dev->iotlb) {
1232 			ret = -EBADFD;
1233 			break;
1234 		}
1235 
1236 		schedule();
1237 	}
1238 
1239 	if (!noblock)
1240 		finish_wait(&dev->wait, &wait);
1241 
1242 	if (node) {
1243 		struct vhost_iotlb_msg *msg;
1244 		void *start = &node->msg;
1245 
1246 		switch (node->msg.type) {
1247 		case VHOST_IOTLB_MSG:
1248 			size = sizeof(node->msg);
1249 			msg = &node->msg.iotlb;
1250 			break;
1251 		case VHOST_IOTLB_MSG_V2:
1252 			size = sizeof(node->msg_v2);
1253 			msg = &node->msg_v2.iotlb;
1254 			break;
1255 		default:
1256 			BUG();
1257 			break;
1258 		}
1259 
1260 		ret = copy_to_iter(start, size, to);
1261 		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1262 			kfree(node);
1263 			return ret;
1264 		}
1265 		vhost_enqueue_msg(dev, &dev->pending_list, node);
1266 	}
1267 
1268 	return ret;
1269 }
1270 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1271 
vhost_iotlb_miss(struct vhost_virtqueue * vq,u64 iova,int access)1272 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1273 {
1274 	struct vhost_dev *dev = vq->dev;
1275 	struct vhost_msg_node *node;
1276 	struct vhost_iotlb_msg *msg;
1277 	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1278 
1279 	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1280 	if (!node)
1281 		return -ENOMEM;
1282 
1283 	if (v2) {
1284 		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1285 		msg = &node->msg_v2.iotlb;
1286 	} else {
1287 		msg = &node->msg.iotlb;
1288 	}
1289 
1290 	msg->type = VHOST_IOTLB_MISS;
1291 	msg->iova = iova;
1292 	msg->perm = access;
1293 
1294 	vhost_enqueue_msg(dev, &dev->read_list, node);
1295 
1296 	return 0;
1297 }
1298 
vq_access_ok(struct vhost_virtqueue * vq,unsigned int num,vring_desc_t __user * desc,vring_avail_t __user * avail,vring_used_t __user * used)1299 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1300 			 vring_desc_t __user *desc,
1301 			 vring_avail_t __user *avail,
1302 			 vring_used_t __user *used)
1303 
1304 {
1305 	/* If an IOTLB device is present, the vring addresses are
1306 	 * GIOVAs. Access validation occurs at prefetch time. */
1307 	if (vq->iotlb)
1308 		return true;
1309 
1310 	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1311 	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
1312 	       access_ok(used, vhost_get_used_size(vq, num));
1313 }
1314 
vhost_vq_meta_update(struct vhost_virtqueue * vq,const struct vhost_iotlb_map * map,int type)1315 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1316 				 const struct vhost_iotlb_map *map,
1317 				 int type)
1318 {
1319 	int access = (type == VHOST_ADDR_USED) ?
1320 		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1321 
1322 	if (likely(map->perm & access))
1323 		vq->meta_iotlb[type] = map;
1324 }
1325 
iotlb_access_ok(struct vhost_virtqueue * vq,int access,u64 addr,u64 len,int type)1326 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1327 			    int access, u64 addr, u64 len, int type)
1328 {
1329 	const struct vhost_iotlb_map *map;
1330 	struct vhost_iotlb *umem = vq->iotlb;
1331 	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1332 
1333 	if (vhost_vq_meta_fetch(vq, addr, len, type))
1334 		return true;
1335 
1336 	while (len > s) {
1337 		map = vhost_iotlb_itree_first(umem, addr, last);
1338 		if (map == NULL || map->start > addr) {
1339 			vhost_iotlb_miss(vq, addr, access);
1340 			return false;
1341 		} else if (!(map->perm & access)) {
1342 			/* Report the possible access violation by
1343 			 * request another translation from userspace.
1344 			 */
1345 			return false;
1346 		}
1347 
1348 		size = map->size - addr + map->start;
1349 
1350 		if (orig_addr == addr && size >= len)
1351 			vhost_vq_meta_update(vq, map, type);
1352 
1353 		s += size;
1354 		addr += size;
1355 	}
1356 
1357 	return true;
1358 }
1359 
vq_meta_prefetch(struct vhost_virtqueue * vq)1360 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1361 {
1362 	unsigned int num = vq->num;
1363 
1364 	if (!vq->iotlb)
1365 		return 1;
1366 
1367 	return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1368 			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1369 	       iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1370 			       vhost_get_avail_size(vq, num),
1371 			       VHOST_ADDR_AVAIL) &&
1372 	       iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1373 			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1374 }
1375 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1376 
1377 /* Can we log writes? */
1378 /* Caller should have device mutex but not vq mutex */
vhost_log_access_ok(struct vhost_dev * dev)1379 bool vhost_log_access_ok(struct vhost_dev *dev)
1380 {
1381 	return memory_access_ok(dev, dev->umem, 1);
1382 }
1383 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1384 
vq_log_used_access_ok(struct vhost_virtqueue * vq,void __user * log_base,bool log_used,u64 log_addr)1385 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1386 				  void __user *log_base,
1387 				  bool log_used,
1388 				  u64 log_addr)
1389 {
1390 	/* If an IOTLB device is present, log_addr is a GIOVA that
1391 	 * will never be logged by log_used(). */
1392 	if (vq->iotlb)
1393 		return true;
1394 
1395 	return !log_used || log_access_ok(log_base, log_addr,
1396 					  vhost_get_used_size(vq, vq->num));
1397 }
1398 
1399 /* Verify access for write logging. */
1400 /* Caller should have vq mutex and device mutex */
vq_log_access_ok(struct vhost_virtqueue * vq,void __user * log_base)1401 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1402 			     void __user *log_base)
1403 {
1404 	return vq_memory_access_ok(log_base, vq->umem,
1405 				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1406 		vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
1407 }
1408 
1409 /* Can we start vq? */
1410 /* Caller should have vq mutex and device mutex */
vhost_vq_access_ok(struct vhost_virtqueue * vq)1411 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1412 {
1413 	if (!vq_log_access_ok(vq, vq->log_base))
1414 		return false;
1415 
1416 	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1417 }
1418 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1419 
vhost_set_memory(struct vhost_dev * d,struct vhost_memory __user * m)1420 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1421 {
1422 	struct vhost_memory mem, *newmem;
1423 	struct vhost_memory_region *region;
1424 	struct vhost_iotlb *newumem, *oldumem;
1425 	unsigned long size = offsetof(struct vhost_memory, regions);
1426 	int i;
1427 
1428 	if (copy_from_user(&mem, m, size))
1429 		return -EFAULT;
1430 	if (mem.padding)
1431 		return -EOPNOTSUPP;
1432 	if (mem.nregions > max_mem_regions)
1433 		return -E2BIG;
1434 	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1435 			GFP_KERNEL);
1436 	if (!newmem)
1437 		return -ENOMEM;
1438 
1439 	memcpy(newmem, &mem, size);
1440 	if (copy_from_user(newmem->regions, m->regions,
1441 			   flex_array_size(newmem, regions, mem.nregions))) {
1442 		kvfree(newmem);
1443 		return -EFAULT;
1444 	}
1445 
1446 	newumem = iotlb_alloc();
1447 	if (!newumem) {
1448 		kvfree(newmem);
1449 		return -ENOMEM;
1450 	}
1451 
1452 	for (region = newmem->regions;
1453 	     region < newmem->regions + mem.nregions;
1454 	     region++) {
1455 		if (vhost_iotlb_add_range(newumem,
1456 					  region->guest_phys_addr,
1457 					  region->guest_phys_addr +
1458 					  region->memory_size - 1,
1459 					  region->userspace_addr,
1460 					  VHOST_MAP_RW))
1461 			goto err;
1462 	}
1463 
1464 	if (!memory_access_ok(d, newumem, 0))
1465 		goto err;
1466 
1467 	oldumem = d->umem;
1468 	d->umem = newumem;
1469 
1470 	/* All memory accesses are done under some VQ mutex. */
1471 	for (i = 0; i < d->nvqs; ++i) {
1472 		mutex_lock(&d->vqs[i]->mutex);
1473 		d->vqs[i]->umem = newumem;
1474 		mutex_unlock(&d->vqs[i]->mutex);
1475 	}
1476 
1477 	kvfree(newmem);
1478 	vhost_iotlb_free(oldumem);
1479 	return 0;
1480 
1481 err:
1482 	vhost_iotlb_free(newumem);
1483 	kvfree(newmem);
1484 	return -EFAULT;
1485 }
1486 
vhost_vring_set_num(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)1487 static long vhost_vring_set_num(struct vhost_dev *d,
1488 				struct vhost_virtqueue *vq,
1489 				void __user *argp)
1490 {
1491 	struct vhost_vring_state s;
1492 
1493 	/* Resizing ring with an active backend?
1494 	 * You don't want to do that. */
1495 	if (vq->private_data)
1496 		return -EBUSY;
1497 
1498 	if (copy_from_user(&s, argp, sizeof s))
1499 		return -EFAULT;
1500 
1501 	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1502 		return -EINVAL;
1503 	vq->num = s.num;
1504 
1505 	return 0;
1506 }
1507 
vhost_vring_set_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)1508 static long vhost_vring_set_addr(struct vhost_dev *d,
1509 				 struct vhost_virtqueue *vq,
1510 				 void __user *argp)
1511 {
1512 	struct vhost_vring_addr a;
1513 
1514 	if (copy_from_user(&a, argp, sizeof a))
1515 		return -EFAULT;
1516 	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1517 		return -EOPNOTSUPP;
1518 
1519 	/* For 32bit, verify that the top 32bits of the user
1520 	   data are set to zero. */
1521 	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1522 	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1523 	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1524 		return -EFAULT;
1525 
1526 	/* Make sure it's safe to cast pointers to vring types. */
1527 	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1528 	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1529 	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1530 	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1531 	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1532 		return -EINVAL;
1533 
1534 	/* We only verify access here if backend is configured.
1535 	 * If it is not, we don't as size might not have been setup.
1536 	 * We will verify when backend is configured. */
1537 	if (vq->private_data) {
1538 		if (!vq_access_ok(vq, vq->num,
1539 			(void __user *)(unsigned long)a.desc_user_addr,
1540 			(void __user *)(unsigned long)a.avail_user_addr,
1541 			(void __user *)(unsigned long)a.used_user_addr))
1542 			return -EINVAL;
1543 
1544 		/* Also validate log access for used ring if enabled. */
1545 		if (!vq_log_used_access_ok(vq, vq->log_base,
1546 				a.flags & (0x1 << VHOST_VRING_F_LOG),
1547 				a.log_guest_addr))
1548 			return -EINVAL;
1549 	}
1550 
1551 	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1552 	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1553 	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1554 	vq->log_addr = a.log_guest_addr;
1555 	vq->used = (void __user *)(unsigned long)a.used_user_addr;
1556 
1557 	return 0;
1558 }
1559 
vhost_vring_set_num_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,unsigned int ioctl,void __user * argp)1560 static long vhost_vring_set_num_addr(struct vhost_dev *d,
1561 				     struct vhost_virtqueue *vq,
1562 				     unsigned int ioctl,
1563 				     void __user *argp)
1564 {
1565 	long r;
1566 
1567 	mutex_lock(&vq->mutex);
1568 
1569 	switch (ioctl) {
1570 	case VHOST_SET_VRING_NUM:
1571 		r = vhost_vring_set_num(d, vq, argp);
1572 		break;
1573 	case VHOST_SET_VRING_ADDR:
1574 		r = vhost_vring_set_addr(d, vq, argp);
1575 		break;
1576 	default:
1577 		BUG();
1578 	}
1579 
1580 	mutex_unlock(&vq->mutex);
1581 
1582 	return r;
1583 }
vhost_vring_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)1584 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1585 {
1586 	struct file *eventfp, *filep = NULL;
1587 	bool pollstart = false, pollstop = false;
1588 	struct eventfd_ctx *ctx = NULL;
1589 	u32 __user *idxp = argp;
1590 	struct vhost_virtqueue *vq;
1591 	struct vhost_vring_state s;
1592 	struct vhost_vring_file f;
1593 	u32 idx;
1594 	long r;
1595 
1596 	r = get_user(idx, idxp);
1597 	if (r < 0)
1598 		return r;
1599 	if (idx >= d->nvqs)
1600 		return -ENOBUFS;
1601 
1602 	idx = array_index_nospec(idx, d->nvqs);
1603 	vq = d->vqs[idx];
1604 
1605 	if (ioctl == VHOST_SET_VRING_NUM ||
1606 	    ioctl == VHOST_SET_VRING_ADDR) {
1607 		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1608 	}
1609 
1610 	mutex_lock(&vq->mutex);
1611 
1612 	switch (ioctl) {
1613 	case VHOST_SET_VRING_BASE:
1614 		/* Moving base with an active backend?
1615 		 * You don't want to do that. */
1616 		if (vq->private_data) {
1617 			r = -EBUSY;
1618 			break;
1619 		}
1620 		if (copy_from_user(&s, argp, sizeof s)) {
1621 			r = -EFAULT;
1622 			break;
1623 		}
1624 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
1625 			vq->last_avail_idx = s.num & 0xffff;
1626 			vq->last_used_idx = (s.num >> 16) & 0xffff;
1627 		} else {
1628 			if (s.num > 0xffff) {
1629 				r = -EINVAL;
1630 				break;
1631 			}
1632 			vq->last_avail_idx = s.num;
1633 		}
1634 		/* Forget the cached index value. */
1635 		vq->avail_idx = vq->last_avail_idx;
1636 		break;
1637 	case VHOST_GET_VRING_BASE:
1638 		s.index = idx;
1639 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
1640 			s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
1641 		else
1642 			s.num = vq->last_avail_idx;
1643 		if (copy_to_user(argp, &s, sizeof s))
1644 			r = -EFAULT;
1645 		break;
1646 	case VHOST_SET_VRING_KICK:
1647 		if (copy_from_user(&f, argp, sizeof f)) {
1648 			r = -EFAULT;
1649 			break;
1650 		}
1651 		eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1652 		if (IS_ERR(eventfp)) {
1653 			r = PTR_ERR(eventfp);
1654 			break;
1655 		}
1656 		if (eventfp != vq->kick) {
1657 			pollstop = (filep = vq->kick) != NULL;
1658 			pollstart = (vq->kick = eventfp) != NULL;
1659 		} else
1660 			filep = eventfp;
1661 		break;
1662 	case VHOST_SET_VRING_CALL:
1663 		if (copy_from_user(&f, argp, sizeof f)) {
1664 			r = -EFAULT;
1665 			break;
1666 		}
1667 		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1668 		if (IS_ERR(ctx)) {
1669 			r = PTR_ERR(ctx);
1670 			break;
1671 		}
1672 
1673 		swap(ctx, vq->call_ctx.ctx);
1674 		break;
1675 	case VHOST_SET_VRING_ERR:
1676 		if (copy_from_user(&f, argp, sizeof f)) {
1677 			r = -EFAULT;
1678 			break;
1679 		}
1680 		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1681 		if (IS_ERR(ctx)) {
1682 			r = PTR_ERR(ctx);
1683 			break;
1684 		}
1685 		swap(ctx, vq->error_ctx);
1686 		break;
1687 	case VHOST_SET_VRING_ENDIAN:
1688 		r = vhost_set_vring_endian(vq, argp);
1689 		break;
1690 	case VHOST_GET_VRING_ENDIAN:
1691 		r = vhost_get_vring_endian(vq, idx, argp);
1692 		break;
1693 	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1694 		if (copy_from_user(&s, argp, sizeof(s))) {
1695 			r = -EFAULT;
1696 			break;
1697 		}
1698 		vq->busyloop_timeout = s.num;
1699 		break;
1700 	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1701 		s.index = idx;
1702 		s.num = vq->busyloop_timeout;
1703 		if (copy_to_user(argp, &s, sizeof(s)))
1704 			r = -EFAULT;
1705 		break;
1706 	default:
1707 		r = -ENOIOCTLCMD;
1708 	}
1709 
1710 	if (pollstop && vq->handle_kick)
1711 		vhost_poll_stop(&vq->poll);
1712 
1713 	if (!IS_ERR_OR_NULL(ctx))
1714 		eventfd_ctx_put(ctx);
1715 	if (filep)
1716 		fput(filep);
1717 
1718 	if (pollstart && vq->handle_kick)
1719 		r = vhost_poll_start(&vq->poll, vq->kick);
1720 
1721 	mutex_unlock(&vq->mutex);
1722 
1723 	if (pollstop && vq->handle_kick)
1724 		vhost_poll_flush(&vq->poll);
1725 	return r;
1726 }
1727 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1728 
vhost_init_device_iotlb(struct vhost_dev * d,bool enabled)1729 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1730 {
1731 	struct vhost_iotlb *niotlb, *oiotlb;
1732 	int i;
1733 
1734 	niotlb = iotlb_alloc();
1735 	if (!niotlb)
1736 		return -ENOMEM;
1737 
1738 	oiotlb = d->iotlb;
1739 	d->iotlb = niotlb;
1740 
1741 	for (i = 0; i < d->nvqs; ++i) {
1742 		struct vhost_virtqueue *vq = d->vqs[i];
1743 
1744 		mutex_lock(&vq->mutex);
1745 		vq->iotlb = niotlb;
1746 		__vhost_vq_meta_reset(vq);
1747 		mutex_unlock(&vq->mutex);
1748 	}
1749 
1750 	vhost_iotlb_free(oiotlb);
1751 
1752 	return 0;
1753 }
1754 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1755 
1756 /* Caller must have device mutex */
vhost_dev_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)1757 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1758 {
1759 	struct eventfd_ctx *ctx;
1760 	u64 p;
1761 	long r;
1762 	int i, fd;
1763 
1764 	/* If you are not the owner, you can become one */
1765 	if (ioctl == VHOST_SET_OWNER) {
1766 		r = vhost_dev_set_owner(d);
1767 		goto done;
1768 	}
1769 
1770 	/* You must be the owner to do anything else */
1771 	r = vhost_dev_check_owner(d);
1772 	if (r)
1773 		goto done;
1774 
1775 	switch (ioctl) {
1776 	case VHOST_SET_MEM_TABLE:
1777 		r = vhost_set_memory(d, argp);
1778 		break;
1779 	case VHOST_SET_LOG_BASE:
1780 		if (copy_from_user(&p, argp, sizeof p)) {
1781 			r = -EFAULT;
1782 			break;
1783 		}
1784 		if ((u64)(unsigned long)p != p) {
1785 			r = -EFAULT;
1786 			break;
1787 		}
1788 		for (i = 0; i < d->nvqs; ++i) {
1789 			struct vhost_virtqueue *vq;
1790 			void __user *base = (void __user *)(unsigned long)p;
1791 			vq = d->vqs[i];
1792 			mutex_lock(&vq->mutex);
1793 			/* If ring is inactive, will check when it's enabled. */
1794 			if (vq->private_data && !vq_log_access_ok(vq, base))
1795 				r = -EFAULT;
1796 			else
1797 				vq->log_base = base;
1798 			mutex_unlock(&vq->mutex);
1799 		}
1800 		break;
1801 	case VHOST_SET_LOG_FD:
1802 		r = get_user(fd, (int __user *)argp);
1803 		if (r < 0)
1804 			break;
1805 		ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1806 		if (IS_ERR(ctx)) {
1807 			r = PTR_ERR(ctx);
1808 			break;
1809 		}
1810 		swap(ctx, d->log_ctx);
1811 		for (i = 0; i < d->nvqs; ++i) {
1812 			mutex_lock(&d->vqs[i]->mutex);
1813 			d->vqs[i]->log_ctx = d->log_ctx;
1814 			mutex_unlock(&d->vqs[i]->mutex);
1815 		}
1816 		if (ctx)
1817 			eventfd_ctx_put(ctx);
1818 		break;
1819 	default:
1820 		r = -ENOIOCTLCMD;
1821 		break;
1822 	}
1823 done:
1824 	return r;
1825 }
1826 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1827 
1828 /* TODO: This is really inefficient.  We need something like get_user()
1829  * (instruction directly accesses the data, with an exception table entry
1830  * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1831  */
set_bit_to_user(int nr,void __user * addr)1832 static int set_bit_to_user(int nr, void __user *addr)
1833 {
1834 	unsigned long log = (unsigned long)addr;
1835 	struct page *page;
1836 	void *base;
1837 	int bit = nr + (log % PAGE_SIZE) * 8;
1838 	int r;
1839 
1840 	r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1841 	if (r < 0)
1842 		return r;
1843 	BUG_ON(r != 1);
1844 	base = kmap_atomic(page);
1845 	set_bit(bit, base);
1846 	kunmap_atomic(base);
1847 	unpin_user_pages_dirty_lock(&page, 1, true);
1848 	return 0;
1849 }
1850 
log_write(void __user * log_base,u64 write_address,u64 write_length)1851 static int log_write(void __user *log_base,
1852 		     u64 write_address, u64 write_length)
1853 {
1854 	u64 write_page = write_address / VHOST_PAGE_SIZE;
1855 	int r;
1856 
1857 	if (!write_length)
1858 		return 0;
1859 	write_length += write_address % VHOST_PAGE_SIZE;
1860 	for (;;) {
1861 		u64 base = (u64)(unsigned long)log_base;
1862 		u64 log = base + write_page / 8;
1863 		int bit = write_page % 8;
1864 		if ((u64)(unsigned long)log != log)
1865 			return -EFAULT;
1866 		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1867 		if (r < 0)
1868 			return r;
1869 		if (write_length <= VHOST_PAGE_SIZE)
1870 			break;
1871 		write_length -= VHOST_PAGE_SIZE;
1872 		write_page += 1;
1873 	}
1874 	return r;
1875 }
1876 
log_write_hva(struct vhost_virtqueue * vq,u64 hva,u64 len)1877 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1878 {
1879 	struct vhost_iotlb *umem = vq->umem;
1880 	struct vhost_iotlb_map *u;
1881 	u64 start, end, l, min;
1882 	int r;
1883 	bool hit = false;
1884 
1885 	while (len) {
1886 		min = len;
1887 		/* More than one GPAs can be mapped into a single HVA. So
1888 		 * iterate all possible umems here to be safe.
1889 		 */
1890 		list_for_each_entry(u, &umem->list, link) {
1891 			if (u->addr > hva - 1 + len ||
1892 			    u->addr - 1 + u->size < hva)
1893 				continue;
1894 			start = max(u->addr, hva);
1895 			end = min(u->addr - 1 + u->size, hva - 1 + len);
1896 			l = end - start + 1;
1897 			r = log_write(vq->log_base,
1898 				      u->start + start - u->addr,
1899 				      l);
1900 			if (r < 0)
1901 				return r;
1902 			hit = true;
1903 			min = min(l, min);
1904 		}
1905 
1906 		if (!hit)
1907 			return -EFAULT;
1908 
1909 		len -= min;
1910 		hva += min;
1911 	}
1912 
1913 	return 0;
1914 }
1915 
log_used(struct vhost_virtqueue * vq,u64 used_offset,u64 len)1916 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1917 {
1918 	struct iovec *iov = vq->log_iov;
1919 	int i, ret;
1920 
1921 	if (!vq->iotlb)
1922 		return log_write(vq->log_base, vq->log_addr + used_offset, len);
1923 
1924 	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1925 			     len, iov, 64, VHOST_ACCESS_WO);
1926 	if (ret < 0)
1927 		return ret;
1928 
1929 	for (i = 0; i < ret; i++) {
1930 		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
1931 				    iov[i].iov_len);
1932 		if (ret)
1933 			return ret;
1934 	}
1935 
1936 	return 0;
1937 }
1938 
vhost_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num,u64 len,struct iovec * iov,int count)1939 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1940 		    unsigned int log_num, u64 len, struct iovec *iov, int count)
1941 {
1942 	int i, r;
1943 
1944 	/* Make sure data written is seen before log. */
1945 	smp_wmb();
1946 
1947 	if (vq->iotlb) {
1948 		for (i = 0; i < count; i++) {
1949 			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1950 					  iov[i].iov_len);
1951 			if (r < 0)
1952 				return r;
1953 		}
1954 		return 0;
1955 	}
1956 
1957 	for (i = 0; i < log_num; ++i) {
1958 		u64 l = min(log[i].len, len);
1959 		r = log_write(vq->log_base, log[i].addr, l);
1960 		if (r < 0)
1961 			return r;
1962 		len -= l;
1963 		if (!len) {
1964 			if (vq->log_ctx)
1965 				eventfd_signal(vq->log_ctx, 1);
1966 			return 0;
1967 		}
1968 	}
1969 	/* Length written exceeds what we have stored. This is a bug. */
1970 	BUG();
1971 	return 0;
1972 }
1973 EXPORT_SYMBOL_GPL(vhost_log_write);
1974 
vhost_update_used_flags(struct vhost_virtqueue * vq)1975 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1976 {
1977 	void __user *used;
1978 	if (vhost_put_used_flags(vq))
1979 		return -EFAULT;
1980 	if (unlikely(vq->log_used)) {
1981 		/* Make sure the flag is seen before log. */
1982 		smp_wmb();
1983 		/* Log used flag write. */
1984 		used = &vq->used->flags;
1985 		log_used(vq, (used - (void __user *)vq->used),
1986 			 sizeof vq->used->flags);
1987 		if (vq->log_ctx)
1988 			eventfd_signal(vq->log_ctx, 1);
1989 	}
1990 	return 0;
1991 }
1992 
vhost_update_avail_event(struct vhost_virtqueue * vq,u16 avail_event)1993 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1994 {
1995 	if (vhost_put_avail_event(vq))
1996 		return -EFAULT;
1997 	if (unlikely(vq->log_used)) {
1998 		void __user *used;
1999 		/* Make sure the event is seen before log. */
2000 		smp_wmb();
2001 		/* Log avail event write */
2002 		used = vhost_avail_event(vq);
2003 		log_used(vq, (used - (void __user *)vq->used),
2004 			 sizeof *vhost_avail_event(vq));
2005 		if (vq->log_ctx)
2006 			eventfd_signal(vq->log_ctx, 1);
2007 	}
2008 	return 0;
2009 }
2010 
vhost_vq_init_access(struct vhost_virtqueue * vq)2011 int vhost_vq_init_access(struct vhost_virtqueue *vq)
2012 {
2013 	__virtio16 last_used_idx;
2014 	int r;
2015 	bool is_le = vq->is_le;
2016 
2017 	if (!vq->private_data)
2018 		return 0;
2019 
2020 	vhost_init_is_le(vq);
2021 
2022 	r = vhost_update_used_flags(vq);
2023 	if (r)
2024 		goto err;
2025 	vq->signalled_used_valid = false;
2026 	if (!vq->iotlb &&
2027 	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2028 		r = -EFAULT;
2029 		goto err;
2030 	}
2031 	r = vhost_get_used_idx(vq, &last_used_idx);
2032 	if (r) {
2033 		vq_err(vq, "Can't access used idx at %p\n",
2034 		       &vq->used->idx);
2035 		goto err;
2036 	}
2037 	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2038 	return 0;
2039 
2040 err:
2041 	vq->is_le = is_le;
2042 	return r;
2043 }
2044 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2045 
translate_desc(struct vhost_virtqueue * vq,u64 addr,u32 len,struct iovec iov[],int iov_size,int access)2046 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2047 			  struct iovec iov[], int iov_size, int access)
2048 {
2049 	const struct vhost_iotlb_map *map;
2050 	struct vhost_dev *dev = vq->dev;
2051 	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2052 	struct iovec *_iov;
2053 	u64 s = 0, last = addr + len - 1;
2054 	int ret = 0;
2055 
2056 	while ((u64)len > s) {
2057 		u64 size;
2058 		if (unlikely(ret >= iov_size)) {
2059 			ret = -ENOBUFS;
2060 			break;
2061 		}
2062 
2063 		map = vhost_iotlb_itree_first(umem, addr, last);
2064 		if (map == NULL || map->start > addr) {
2065 			if (umem != dev->iotlb) {
2066 				ret = -EFAULT;
2067 				break;
2068 			}
2069 			ret = -EAGAIN;
2070 			break;
2071 		} else if (!(map->perm & access)) {
2072 			ret = -EPERM;
2073 			break;
2074 		}
2075 
2076 		_iov = iov + ret;
2077 		size = map->size - addr + map->start;
2078 		_iov->iov_len = min((u64)len - s, size);
2079 		_iov->iov_base = (void __user *)(unsigned long)
2080 				 (map->addr + addr - map->start);
2081 		s += size;
2082 		addr += size;
2083 		++ret;
2084 	}
2085 
2086 	if (ret == -EAGAIN)
2087 		vhost_iotlb_miss(vq, addr, access);
2088 	return ret;
2089 }
2090 
2091 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
2092  * function returns the next descriptor in the chain,
2093  * or -1U if we're at the end. */
next_desc(struct vhost_virtqueue * vq,struct vring_desc * desc)2094 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2095 {
2096 	unsigned int next;
2097 
2098 	/* If this descriptor says it doesn't chain, we're done. */
2099 	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2100 		return -1U;
2101 
2102 	/* Check they're not leading us off end of descriptors. */
2103 	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2104 	return next;
2105 }
2106 
get_indirect(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num,struct vring_desc * indirect)2107 static int get_indirect(struct vhost_virtqueue *vq,
2108 			struct iovec iov[], unsigned int iov_size,
2109 			unsigned int *out_num, unsigned int *in_num,
2110 			struct vhost_log *log, unsigned int *log_num,
2111 			struct vring_desc *indirect)
2112 {
2113 	struct vring_desc desc;
2114 	unsigned int i = 0, count, found = 0;
2115 	u32 len = vhost32_to_cpu(vq, indirect->len);
2116 	struct iov_iter from;
2117 	int ret, access;
2118 
2119 	/* Sanity check */
2120 	if (unlikely(len % sizeof desc)) {
2121 		vq_err(vq, "Invalid length in indirect descriptor: "
2122 		       "len 0x%llx not multiple of 0x%zx\n",
2123 		       (unsigned long long)len,
2124 		       sizeof desc);
2125 		return -EINVAL;
2126 	}
2127 
2128 	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2129 			     UIO_MAXIOV, VHOST_ACCESS_RO);
2130 	if (unlikely(ret < 0)) {
2131 		if (ret != -EAGAIN)
2132 			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2133 		return ret;
2134 	}
2135 	iov_iter_init(&from, READ, vq->indirect, ret, len);
2136 	count = len / sizeof desc;
2137 	/* Buffers are chained via a 16 bit next field, so
2138 	 * we can have at most 2^16 of these. */
2139 	if (unlikely(count > USHRT_MAX + 1)) {
2140 		vq_err(vq, "Indirect buffer length too big: %d\n",
2141 		       indirect->len);
2142 		return -E2BIG;
2143 	}
2144 
2145 	do {
2146 		unsigned iov_count = *in_num + *out_num;
2147 		if (unlikely(++found > count)) {
2148 			vq_err(vq, "Loop detected: last one at %u "
2149 			       "indirect size %u\n",
2150 			       i, count);
2151 			return -EINVAL;
2152 		}
2153 		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2154 			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2155 			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2156 			return -EINVAL;
2157 		}
2158 		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2159 			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2160 			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2161 			return -EINVAL;
2162 		}
2163 
2164 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2165 			access = VHOST_ACCESS_WO;
2166 		else
2167 			access = VHOST_ACCESS_RO;
2168 
2169 		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2170 				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2171 				     iov_size - iov_count, access);
2172 		if (unlikely(ret < 0)) {
2173 			if (ret != -EAGAIN)
2174 				vq_err(vq, "Translation failure %d indirect idx %d\n",
2175 					ret, i);
2176 			return ret;
2177 		}
2178 		/* If this is an input descriptor, increment that count. */
2179 		if (access == VHOST_ACCESS_WO) {
2180 			*in_num += ret;
2181 			if (unlikely(log && ret)) {
2182 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2183 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2184 				++*log_num;
2185 			}
2186 		} else {
2187 			/* If it's an output descriptor, they're all supposed
2188 			 * to come before any input descriptors. */
2189 			if (unlikely(*in_num)) {
2190 				vq_err(vq, "Indirect descriptor "
2191 				       "has out after in: idx %d\n", i);
2192 				return -EINVAL;
2193 			}
2194 			*out_num += ret;
2195 		}
2196 	} while ((i = next_desc(vq, &desc)) != -1);
2197 	return 0;
2198 }
2199 
2200 /* This looks in the virtqueue and for the first available buffer, and converts
2201  * it to an iovec for convenient access.  Since descriptors consist of some
2202  * number of output then some number of input descriptors, it's actually two
2203  * iovecs, but we pack them into one and note how many of each there were.
2204  *
2205  * This function returns the descriptor number found, or vq->num (which is
2206  * never a valid descriptor number) if none was found.  A negative code is
2207  * returned on error. */
vhost_get_vq_desc(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num)2208 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2209 		      struct iovec iov[], unsigned int iov_size,
2210 		      unsigned int *out_num, unsigned int *in_num,
2211 		      struct vhost_log *log, unsigned int *log_num)
2212 {
2213 	struct vring_desc desc;
2214 	unsigned int i, head, found = 0;
2215 	u16 last_avail_idx;
2216 	__virtio16 avail_idx;
2217 	__virtio16 ring_head;
2218 	int ret, access;
2219 
2220 	/* Check it isn't doing very strange things with descriptor numbers. */
2221 	last_avail_idx = vq->last_avail_idx;
2222 
2223 	if (vq->avail_idx == vq->last_avail_idx) {
2224 		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2225 			vq_err(vq, "Failed to access avail idx at %p\n",
2226 				&vq->avail->idx);
2227 			return -EFAULT;
2228 		}
2229 		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2230 
2231 		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2232 			vq_err(vq, "Guest moved used index from %u to %u",
2233 				last_avail_idx, vq->avail_idx);
2234 			return -EFAULT;
2235 		}
2236 
2237 		/* If there's nothing new since last we looked, return
2238 		 * invalid.
2239 		 */
2240 		if (vq->avail_idx == last_avail_idx)
2241 			return vq->num;
2242 
2243 		/* Only get avail ring entries after they have been
2244 		 * exposed by guest.
2245 		 */
2246 		smp_rmb();
2247 	}
2248 
2249 	/* Grab the next descriptor number they're advertising, and increment
2250 	 * the index we've seen. */
2251 	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2252 		vq_err(vq, "Failed to read head: idx %d address %p\n",
2253 		       last_avail_idx,
2254 		       &vq->avail->ring[last_avail_idx % vq->num]);
2255 		return -EFAULT;
2256 	}
2257 
2258 	head = vhost16_to_cpu(vq, ring_head);
2259 
2260 	/* If their number is silly, that's an error. */
2261 	if (unlikely(head >= vq->num)) {
2262 		vq_err(vq, "Guest says index %u > %u is available",
2263 		       head, vq->num);
2264 		return -EINVAL;
2265 	}
2266 
2267 	/* When we start there are none of either input nor output. */
2268 	*out_num = *in_num = 0;
2269 	if (unlikely(log))
2270 		*log_num = 0;
2271 
2272 	i = head;
2273 	do {
2274 		unsigned iov_count = *in_num + *out_num;
2275 		if (unlikely(i >= vq->num)) {
2276 			vq_err(vq, "Desc index is %u > %u, head = %u",
2277 			       i, vq->num, head);
2278 			return -EINVAL;
2279 		}
2280 		if (unlikely(++found > vq->num)) {
2281 			vq_err(vq, "Loop detected: last one at %u "
2282 			       "vq size %u head %u\n",
2283 			       i, vq->num, head);
2284 			return -EINVAL;
2285 		}
2286 		ret = vhost_get_desc(vq, &desc, i);
2287 		if (unlikely(ret)) {
2288 			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2289 			       i, vq->desc + i);
2290 			return -EFAULT;
2291 		}
2292 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2293 			ret = get_indirect(vq, iov, iov_size,
2294 					   out_num, in_num,
2295 					   log, log_num, &desc);
2296 			if (unlikely(ret < 0)) {
2297 				if (ret != -EAGAIN)
2298 					vq_err(vq, "Failure detected "
2299 						"in indirect descriptor at idx %d\n", i);
2300 				return ret;
2301 			}
2302 			continue;
2303 		}
2304 
2305 		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2306 			access = VHOST_ACCESS_WO;
2307 		else
2308 			access = VHOST_ACCESS_RO;
2309 		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2310 				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2311 				     iov_size - iov_count, access);
2312 		if (unlikely(ret < 0)) {
2313 			if (ret != -EAGAIN)
2314 				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2315 					ret, i);
2316 			return ret;
2317 		}
2318 		if (access == VHOST_ACCESS_WO) {
2319 			/* If this is an input descriptor,
2320 			 * increment that count. */
2321 			*in_num += ret;
2322 			if (unlikely(log && ret)) {
2323 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2324 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2325 				++*log_num;
2326 			}
2327 		} else {
2328 			/* If it's an output descriptor, they're all supposed
2329 			 * to come before any input descriptors. */
2330 			if (unlikely(*in_num)) {
2331 				vq_err(vq, "Descriptor has out after in: "
2332 				       "idx %d\n", i);
2333 				return -EINVAL;
2334 			}
2335 			*out_num += ret;
2336 		}
2337 	} while ((i = next_desc(vq, &desc)) != -1);
2338 
2339 	/* On success, increment avail index. */
2340 	vq->last_avail_idx++;
2341 
2342 	/* Assume notifications from guest are disabled at this point,
2343 	 * if they aren't we would need to update avail_event index. */
2344 	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2345 	return head;
2346 }
2347 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2348 
2349 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
vhost_discard_vq_desc(struct vhost_virtqueue * vq,int n)2350 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2351 {
2352 	vq->last_avail_idx -= n;
2353 }
2354 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2355 
2356 /* After we've used one of their buffers, we tell them about it.  We'll then
2357  * want to notify the guest, using eventfd. */
vhost_add_used(struct vhost_virtqueue * vq,unsigned int head,int len)2358 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2359 {
2360 	struct vring_used_elem heads = {
2361 		cpu_to_vhost32(vq, head),
2362 		cpu_to_vhost32(vq, len)
2363 	};
2364 
2365 	return vhost_add_used_n(vq, &heads, 1);
2366 }
2367 EXPORT_SYMBOL_GPL(vhost_add_used);
2368 
__vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2369 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2370 			    struct vring_used_elem *heads,
2371 			    unsigned count)
2372 {
2373 	vring_used_elem_t __user *used;
2374 	u16 old, new;
2375 	int start;
2376 
2377 	start = vq->last_used_idx & (vq->num - 1);
2378 	used = vq->used->ring + start;
2379 	if (vhost_put_used(vq, heads, start, count)) {
2380 		vq_err(vq, "Failed to write used");
2381 		return -EFAULT;
2382 	}
2383 	if (unlikely(vq->log_used)) {
2384 		/* Make sure data is seen before log. */
2385 		smp_wmb();
2386 		/* Log used ring entry write. */
2387 		log_used(vq, ((void __user *)used - (void __user *)vq->used),
2388 			 count * sizeof *used);
2389 	}
2390 	old = vq->last_used_idx;
2391 	new = (vq->last_used_idx += count);
2392 	/* If the driver never bothers to signal in a very long while,
2393 	 * used index might wrap around. If that happens, invalidate
2394 	 * signalled_used index we stored. TODO: make sure driver
2395 	 * signals at least once in 2^16 and remove this. */
2396 	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2397 		vq->signalled_used_valid = false;
2398 	return 0;
2399 }
2400 
2401 /* After we've used one of their buffers, we tell them about it.  We'll then
2402  * want to notify the guest, using eventfd. */
vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2403 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2404 		     unsigned count)
2405 {
2406 	int start, n, r;
2407 
2408 	start = vq->last_used_idx & (vq->num - 1);
2409 	n = vq->num - start;
2410 	if (n < count) {
2411 		r = __vhost_add_used_n(vq, heads, n);
2412 		if (r < 0)
2413 			return r;
2414 		heads += n;
2415 		count -= n;
2416 	}
2417 	r = __vhost_add_used_n(vq, heads, count);
2418 
2419 	/* Make sure buffer is written before we update index. */
2420 	smp_wmb();
2421 	if (vhost_put_used_idx(vq)) {
2422 		vq_err(vq, "Failed to increment used idx");
2423 		return -EFAULT;
2424 	}
2425 	if (unlikely(vq->log_used)) {
2426 		/* Make sure used idx is seen before log. */
2427 		smp_wmb();
2428 		/* Log used index update. */
2429 		log_used(vq, offsetof(struct vring_used, idx),
2430 			 sizeof vq->used->idx);
2431 		if (vq->log_ctx)
2432 			eventfd_signal(vq->log_ctx, 1);
2433 	}
2434 	return r;
2435 }
2436 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2437 
vhost_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2438 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2439 {
2440 	__u16 old, new;
2441 	__virtio16 event;
2442 	bool v;
2443 	/* Flush out used index updates. This is paired
2444 	 * with the barrier that the Guest executes when enabling
2445 	 * interrupts. */
2446 	smp_mb();
2447 
2448 	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2449 	    unlikely(vq->avail_idx == vq->last_avail_idx))
2450 		return true;
2451 
2452 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2453 		__virtio16 flags;
2454 		if (vhost_get_avail_flags(vq, &flags)) {
2455 			vq_err(vq, "Failed to get flags");
2456 			return true;
2457 		}
2458 		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2459 	}
2460 	old = vq->signalled_used;
2461 	v = vq->signalled_used_valid;
2462 	new = vq->signalled_used = vq->last_used_idx;
2463 	vq->signalled_used_valid = true;
2464 
2465 	if (unlikely(!v))
2466 		return true;
2467 
2468 	if (vhost_get_used_event(vq, &event)) {
2469 		vq_err(vq, "Failed to get used event idx");
2470 		return true;
2471 	}
2472 	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2473 }
2474 
2475 /* This actually signals the guest, using eventfd. */
vhost_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq)2476 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2477 {
2478 	/* Signal the Guest tell them we used something up. */
2479 	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2480 		eventfd_signal(vq->call_ctx.ctx, 1);
2481 }
2482 EXPORT_SYMBOL_GPL(vhost_signal);
2483 
2484 /* And here's the combo meal deal.  Supersize me! */
vhost_add_used_and_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq,unsigned int head,int len)2485 void vhost_add_used_and_signal(struct vhost_dev *dev,
2486 			       struct vhost_virtqueue *vq,
2487 			       unsigned int head, int len)
2488 {
2489 	vhost_add_used(vq, head, len);
2490 	vhost_signal(dev, vq);
2491 }
2492 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2493 
2494 /* multi-buffer version of vhost_add_used_and_signal */
vhost_add_used_and_signal_n(struct vhost_dev * dev,struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2495 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2496 				 struct vhost_virtqueue *vq,
2497 				 struct vring_used_elem *heads, unsigned count)
2498 {
2499 	vhost_add_used_n(vq, heads, count);
2500 	vhost_signal(dev, vq);
2501 }
2502 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2503 
2504 /* return true if we're sure that avaiable ring is empty */
vhost_vq_avail_empty(struct vhost_dev * dev,struct vhost_virtqueue * vq)2505 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2506 {
2507 	__virtio16 avail_idx;
2508 	int r;
2509 
2510 	if (vq->avail_idx != vq->last_avail_idx)
2511 		return false;
2512 
2513 	r = vhost_get_avail_idx(vq, &avail_idx);
2514 	if (unlikely(r))
2515 		return false;
2516 	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2517 
2518 	return vq->avail_idx == vq->last_avail_idx;
2519 }
2520 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2521 
2522 /* OK, now we need to know about added descriptors. */
vhost_enable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2523 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2524 {
2525 	__virtio16 avail_idx;
2526 	int r;
2527 
2528 	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2529 		return false;
2530 	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2531 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2532 		r = vhost_update_used_flags(vq);
2533 		if (r) {
2534 			vq_err(vq, "Failed to enable notification at %p: %d\n",
2535 			       &vq->used->flags, r);
2536 			return false;
2537 		}
2538 	} else {
2539 		r = vhost_update_avail_event(vq, vq->avail_idx);
2540 		if (r) {
2541 			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2542 			       vhost_avail_event(vq), r);
2543 			return false;
2544 		}
2545 	}
2546 	/* They could have slipped one in as we were doing that: make
2547 	 * sure it's written, then check again. */
2548 	smp_mb();
2549 	r = vhost_get_avail_idx(vq, &avail_idx);
2550 	if (r) {
2551 		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2552 		       &vq->avail->idx, r);
2553 		return false;
2554 	}
2555 
2556 	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2557 }
2558 EXPORT_SYMBOL_GPL(vhost_enable_notify);
2559 
2560 /* We don't need to be notified again. */
vhost_disable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)2561 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2562 {
2563 	int r;
2564 
2565 	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2566 		return;
2567 	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2568 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2569 		r = vhost_update_used_flags(vq);
2570 		if (r)
2571 			vq_err(vq, "Failed to disable notification at %p: %d\n",
2572 			       &vq->used->flags, r);
2573 	}
2574 }
2575 EXPORT_SYMBOL_GPL(vhost_disable_notify);
2576 
2577 /* Create a new message. */
vhost_new_msg(struct vhost_virtqueue * vq,int type)2578 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2579 {
2580 	/* Make sure all padding within the structure is initialized. */
2581 	struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
2582 	if (!node)
2583 		return NULL;
2584 
2585 	node->vq = vq;
2586 	node->msg.type = type;
2587 	return node;
2588 }
2589 EXPORT_SYMBOL_GPL(vhost_new_msg);
2590 
vhost_enqueue_msg(struct vhost_dev * dev,struct list_head * head,struct vhost_msg_node * node)2591 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2592 		       struct vhost_msg_node *node)
2593 {
2594 	spin_lock(&dev->iotlb_lock);
2595 	list_add_tail(&node->node, head);
2596 	spin_unlock(&dev->iotlb_lock);
2597 
2598 	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2599 }
2600 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2601 
vhost_dequeue_msg(struct vhost_dev * dev,struct list_head * head)2602 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2603 					 struct list_head *head)
2604 {
2605 	struct vhost_msg_node *node = NULL;
2606 
2607 	spin_lock(&dev->iotlb_lock);
2608 	if (!list_empty(head)) {
2609 		node = list_first_entry(head, struct vhost_msg_node,
2610 					node);
2611 		list_del(&node->node);
2612 	}
2613 	spin_unlock(&dev->iotlb_lock);
2614 
2615 	return node;
2616 }
2617 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2618 
vhost_set_backend_features(struct vhost_dev * dev,u64 features)2619 void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2620 {
2621 	struct vhost_virtqueue *vq;
2622 	int i;
2623 
2624 	mutex_lock(&dev->mutex);
2625 	for (i = 0; i < dev->nvqs; ++i) {
2626 		vq = dev->vqs[i];
2627 		mutex_lock(&vq->mutex);
2628 		vq->acked_backend_features = features;
2629 		mutex_unlock(&vq->mutex);
2630 	}
2631 	mutex_unlock(&dev->mutex);
2632 }
2633 EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2634 
vhost_init(void)2635 static int __init vhost_init(void)
2636 {
2637 	return 0;
2638 }
2639 
vhost_exit(void)2640 static void __exit vhost_exit(void)
2641 {
2642 }
2643 
2644 module_init(vhost_init);
2645 module_exit(vhost_exit);
2646 
2647 MODULE_VERSION("0.0.1");
2648 MODULE_LICENSE("GPL v2");
2649 MODULE_AUTHOR("Michael S. Tsirkin");
2650 MODULE_DESCRIPTION("Host kernel accelerator for virtio");
2651