• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25 
26 #include "vhost.h"
27 
28 enum {
29 	VHOST_VDPA_BACKEND_FEATURES =
30 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32 };
33 
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35 
36 struct vhost_vdpa {
37 	struct vhost_dev vdev;
38 	struct iommu_domain *domain;
39 	struct vhost_virtqueue *vqs;
40 	struct completion completion;
41 	struct vdpa_device *vdpa;
42 	struct device dev;
43 	struct cdev cdev;
44 	atomic_t opened;
45 	int nvqs;
46 	int virtio_id;
47 	int minor;
48 	struct eventfd_ctx *config_ctx;
49 	int in_batch;
50 	struct vdpa_iova_range range;
51 };
52 
53 static DEFINE_IDA(vhost_vdpa_ida);
54 
55 static dev_t vhost_vdpa_major;
56 
handle_vq_kick(struct vhost_work * work)57 static void handle_vq_kick(struct vhost_work *work)
58 {
59 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 						  poll.work);
61 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 	const struct vdpa_config_ops *ops = v->vdpa->config;
63 
64 	ops->kick_vq(v->vdpa, vq - v->vqs);
65 }
66 
vhost_vdpa_virtqueue_cb(void * private)67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 {
69 	struct vhost_virtqueue *vq = private;
70 	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71 
72 	if (call_ctx)
73 		eventfd_signal(call_ctx, 1);
74 
75 	return IRQ_HANDLED;
76 }
77 
vhost_vdpa_config_cb(void * private)78 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 {
80 	struct vhost_vdpa *v = private;
81 	struct eventfd_ctx *config_ctx = v->config_ctx;
82 
83 	if (config_ctx)
84 		eventfd_signal(config_ctx, 1);
85 
86 	return IRQ_HANDLED;
87 }
88 
vhost_vdpa_setup_vq_irq(struct vhost_vdpa * v,u16 qid)89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 {
91 	struct vhost_virtqueue *vq = &v->vqs[qid];
92 	const struct vdpa_config_ops *ops = v->vdpa->config;
93 	struct vdpa_device *vdpa = v->vdpa;
94 	int ret, irq;
95 
96 	if (!ops->get_vq_irq)
97 		return;
98 
99 	irq = ops->get_vq_irq(vdpa, qid);
100 	if (irq < 0)
101 		return;
102 
103 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
104 	if (!vq->call_ctx.ctx)
105 		return;
106 
107 	vq->call_ctx.producer.token = vq->call_ctx.ctx;
108 	vq->call_ctx.producer.irq = irq;
109 	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
110 	if (unlikely(ret))
111 		dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
112 			 qid, vq->call_ctx.producer.token, ret);
113 }
114 
vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa * v,u16 qid)115 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
116 {
117 	struct vhost_virtqueue *vq = &v->vqs[qid];
118 
119 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
120 }
121 
vhost_vdpa_reset(struct vhost_vdpa * v)122 static void vhost_vdpa_reset(struct vhost_vdpa *v)
123 {
124 	struct vdpa_device *vdpa = v->vdpa;
125 
126 	vdpa_reset(vdpa);
127 	v->in_batch = 0;
128 }
129 
vhost_vdpa_get_device_id(struct vhost_vdpa * v,u8 __user * argp)130 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
131 {
132 	struct vdpa_device *vdpa = v->vdpa;
133 	const struct vdpa_config_ops *ops = vdpa->config;
134 	u32 device_id;
135 
136 	device_id = ops->get_device_id(vdpa);
137 
138 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
139 		return -EFAULT;
140 
141 	return 0;
142 }
143 
vhost_vdpa_get_status(struct vhost_vdpa * v,u8 __user * statusp)144 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
145 {
146 	struct vdpa_device *vdpa = v->vdpa;
147 	const struct vdpa_config_ops *ops = vdpa->config;
148 	u8 status;
149 
150 	status = ops->get_status(vdpa);
151 
152 	if (copy_to_user(statusp, &status, sizeof(status)))
153 		return -EFAULT;
154 
155 	return 0;
156 }
157 
vhost_vdpa_set_status(struct vhost_vdpa * v,u8 __user * statusp)158 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
159 {
160 	struct vdpa_device *vdpa = v->vdpa;
161 	const struct vdpa_config_ops *ops = vdpa->config;
162 	u8 status, status_old;
163 	int nvqs = v->nvqs;
164 	u16 i;
165 
166 	if (copy_from_user(&status, statusp, sizeof(status)))
167 		return -EFAULT;
168 
169 	status_old = ops->get_status(vdpa);
170 
171 	/*
172 	 * Userspace shouldn't remove status bits unless reset the
173 	 * status to 0.
174 	 */
175 	if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
176 		return -EINVAL;
177 
178 	ops->set_status(vdpa, status);
179 
180 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
181 		for (i = 0; i < nvqs; i++)
182 			vhost_vdpa_setup_vq_irq(v, i);
183 
184 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
185 		for (i = 0; i < nvqs; i++)
186 			vhost_vdpa_unsetup_vq_irq(v, i);
187 
188 	return 0;
189 }
190 
vhost_vdpa_config_validate(struct vhost_vdpa * v,struct vhost_vdpa_config * c)191 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
192 				      struct vhost_vdpa_config *c)
193 {
194 	long size = 0;
195 
196 	switch (v->virtio_id) {
197 	case VIRTIO_ID_NET:
198 		size = sizeof(struct virtio_net_config);
199 		break;
200 	}
201 
202 	if (c->len == 0 || c->off > size)
203 		return -EINVAL;
204 
205 	if (c->len > size - c->off)
206 		return -E2BIG;
207 
208 	return 0;
209 }
210 
vhost_vdpa_get_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)211 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
212 				  struct vhost_vdpa_config __user *c)
213 {
214 	struct vdpa_device *vdpa = v->vdpa;
215 	struct vhost_vdpa_config config;
216 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
217 	u8 *buf;
218 
219 	if (copy_from_user(&config, c, size))
220 		return -EFAULT;
221 	if (vhost_vdpa_config_validate(v, &config))
222 		return -EINVAL;
223 	buf = kvzalloc(config.len, GFP_KERNEL);
224 	if (!buf)
225 		return -ENOMEM;
226 
227 	vdpa_get_config(vdpa, config.off, buf, config.len);
228 
229 	if (copy_to_user(c->buf, buf, config.len)) {
230 		kvfree(buf);
231 		return -EFAULT;
232 	}
233 
234 	kvfree(buf);
235 	return 0;
236 }
237 
vhost_vdpa_set_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)238 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
239 				  struct vhost_vdpa_config __user *c)
240 {
241 	struct vdpa_device *vdpa = v->vdpa;
242 	const struct vdpa_config_ops *ops = vdpa->config;
243 	struct vhost_vdpa_config config;
244 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
245 	u8 *buf;
246 
247 	if (copy_from_user(&config, c, size))
248 		return -EFAULT;
249 	if (vhost_vdpa_config_validate(v, &config))
250 		return -EINVAL;
251 	buf = kvzalloc(config.len, GFP_KERNEL);
252 	if (!buf)
253 		return -ENOMEM;
254 
255 	if (copy_from_user(buf, c->buf, config.len)) {
256 		kvfree(buf);
257 		return -EFAULT;
258 	}
259 
260 	ops->set_config(vdpa, config.off, buf, config.len);
261 
262 	kvfree(buf);
263 	return 0;
264 }
265 
vhost_vdpa_get_features(struct vhost_vdpa * v,u64 __user * featurep)266 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
267 {
268 	struct vdpa_device *vdpa = v->vdpa;
269 	const struct vdpa_config_ops *ops = vdpa->config;
270 	u64 features;
271 
272 	features = ops->get_features(vdpa);
273 
274 	if (copy_to_user(featurep, &features, sizeof(features)))
275 		return -EFAULT;
276 
277 	return 0;
278 }
279 
vhost_vdpa_set_features(struct vhost_vdpa * v,u64 __user * featurep)280 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
281 {
282 	struct vdpa_device *vdpa = v->vdpa;
283 	const struct vdpa_config_ops *ops = vdpa->config;
284 	u64 features;
285 
286 	/*
287 	 * It's not allowed to change the features after they have
288 	 * been negotiated.
289 	 */
290 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
291 		return -EBUSY;
292 
293 	if (copy_from_user(&features, featurep, sizeof(features)))
294 		return -EFAULT;
295 
296 	if (vdpa_set_features(vdpa, features))
297 		return -EINVAL;
298 
299 	return 0;
300 }
301 
vhost_vdpa_get_vring_num(struct vhost_vdpa * v,u16 __user * argp)302 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
303 {
304 	struct vdpa_device *vdpa = v->vdpa;
305 	const struct vdpa_config_ops *ops = vdpa->config;
306 	u16 num;
307 
308 	num = ops->get_vq_num_max(vdpa);
309 
310 	if (copy_to_user(argp, &num, sizeof(num)))
311 		return -EFAULT;
312 
313 	return 0;
314 }
315 
vhost_vdpa_config_put(struct vhost_vdpa * v)316 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
317 {
318 	if (v->config_ctx) {
319 		eventfd_ctx_put(v->config_ctx);
320 		v->config_ctx = NULL;
321 	}
322 }
323 
vhost_vdpa_set_config_call(struct vhost_vdpa * v,u32 __user * argp)324 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
325 {
326 	struct vdpa_callback cb;
327 	int fd;
328 	struct eventfd_ctx *ctx;
329 
330 	cb.callback = vhost_vdpa_config_cb;
331 	cb.private = v;
332 	if (copy_from_user(&fd, argp, sizeof(fd)))
333 		return  -EFAULT;
334 
335 	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
336 	swap(ctx, v->config_ctx);
337 
338 	if (!IS_ERR_OR_NULL(ctx))
339 		eventfd_ctx_put(ctx);
340 
341 	if (IS_ERR(v->config_ctx)) {
342 		long ret = PTR_ERR(v->config_ctx);
343 
344 		v->config_ctx = NULL;
345 		return ret;
346 	}
347 
348 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
349 
350 	return 0;
351 }
352 
vhost_vdpa_get_iova_range(struct vhost_vdpa * v,u32 __user * argp)353 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
354 {
355 	struct vhost_vdpa_iova_range range = {
356 		.first = v->range.first,
357 		.last = v->range.last,
358 	};
359 
360 	if (copy_to_user(argp, &range, sizeof(range)))
361 		return -EFAULT;
362 	return 0;
363 }
364 
vhost_vdpa_vring_ioctl(struct vhost_vdpa * v,unsigned int cmd,void __user * argp)365 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
366 				   void __user *argp)
367 {
368 	struct vdpa_device *vdpa = v->vdpa;
369 	const struct vdpa_config_ops *ops = vdpa->config;
370 	struct vdpa_vq_state vq_state;
371 	struct vdpa_callback cb;
372 	struct vhost_virtqueue *vq;
373 	struct vhost_vring_state s;
374 	u32 idx;
375 	long r;
376 
377 	r = get_user(idx, (u32 __user *)argp);
378 	if (r < 0)
379 		return r;
380 
381 	if (idx >= v->nvqs)
382 		return -ENOBUFS;
383 
384 	idx = array_index_nospec(idx, v->nvqs);
385 	vq = &v->vqs[idx];
386 
387 	switch (cmd) {
388 	case VHOST_VDPA_SET_VRING_ENABLE:
389 		if (copy_from_user(&s, argp, sizeof(s)))
390 			return -EFAULT;
391 		ops->set_vq_ready(vdpa, idx, s.num);
392 		return 0;
393 	case VHOST_GET_VRING_BASE:
394 		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
395 		if (r)
396 			return r;
397 
398 		vq->last_avail_idx = vq_state.avail_index;
399 		break;
400 	}
401 
402 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
403 	if (r)
404 		return r;
405 
406 	switch (cmd) {
407 	case VHOST_SET_VRING_ADDR:
408 		if (ops->set_vq_address(vdpa, idx,
409 					(u64)(uintptr_t)vq->desc,
410 					(u64)(uintptr_t)vq->avail,
411 					(u64)(uintptr_t)vq->used))
412 			r = -EINVAL;
413 		break;
414 
415 	case VHOST_SET_VRING_BASE:
416 		vq_state.avail_index = vq->last_avail_idx;
417 		if (ops->set_vq_state(vdpa, idx, &vq_state))
418 			r = -EINVAL;
419 		break;
420 
421 	case VHOST_SET_VRING_CALL:
422 		if (vq->call_ctx.ctx) {
423 			cb.callback = vhost_vdpa_virtqueue_cb;
424 			cb.private = vq;
425 		} else {
426 			cb.callback = NULL;
427 			cb.private = NULL;
428 		}
429 		ops->set_vq_cb(vdpa, idx, &cb);
430 		vhost_vdpa_setup_vq_irq(v, idx);
431 		break;
432 
433 	case VHOST_SET_VRING_NUM:
434 		ops->set_vq_num(vdpa, idx, vq->num);
435 		break;
436 	}
437 
438 	return r;
439 }
440 
vhost_vdpa_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)441 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
442 				      unsigned int cmd, unsigned long arg)
443 {
444 	struct vhost_vdpa *v = filep->private_data;
445 	struct vhost_dev *d = &v->vdev;
446 	void __user *argp = (void __user *)arg;
447 	u64 __user *featurep = argp;
448 	u64 features;
449 	long r = 0;
450 
451 	if (cmd == VHOST_SET_BACKEND_FEATURES) {
452 		if (copy_from_user(&features, featurep, sizeof(features)))
453 			return -EFAULT;
454 		if (features & ~VHOST_VDPA_BACKEND_FEATURES)
455 			return -EOPNOTSUPP;
456 		vhost_set_backend_features(&v->vdev, features);
457 		return 0;
458 	}
459 
460 	mutex_lock(&d->mutex);
461 
462 	switch (cmd) {
463 	case VHOST_VDPA_GET_DEVICE_ID:
464 		r = vhost_vdpa_get_device_id(v, argp);
465 		break;
466 	case VHOST_VDPA_GET_STATUS:
467 		r = vhost_vdpa_get_status(v, argp);
468 		break;
469 	case VHOST_VDPA_SET_STATUS:
470 		r = vhost_vdpa_set_status(v, argp);
471 		break;
472 	case VHOST_VDPA_GET_CONFIG:
473 		r = vhost_vdpa_get_config(v, argp);
474 		break;
475 	case VHOST_VDPA_SET_CONFIG:
476 		r = vhost_vdpa_set_config(v, argp);
477 		break;
478 	case VHOST_GET_FEATURES:
479 		r = vhost_vdpa_get_features(v, argp);
480 		break;
481 	case VHOST_SET_FEATURES:
482 		r = vhost_vdpa_set_features(v, argp);
483 		break;
484 	case VHOST_VDPA_GET_VRING_NUM:
485 		r = vhost_vdpa_get_vring_num(v, argp);
486 		break;
487 	case VHOST_SET_LOG_BASE:
488 	case VHOST_SET_LOG_FD:
489 		r = -ENOIOCTLCMD;
490 		break;
491 	case VHOST_VDPA_SET_CONFIG_CALL:
492 		r = vhost_vdpa_set_config_call(v, argp);
493 		break;
494 	case VHOST_GET_BACKEND_FEATURES:
495 		features = VHOST_VDPA_BACKEND_FEATURES;
496 		if (copy_to_user(featurep, &features, sizeof(features)))
497 			r = -EFAULT;
498 		break;
499 	case VHOST_VDPA_GET_IOVA_RANGE:
500 		r = vhost_vdpa_get_iova_range(v, argp);
501 		break;
502 	default:
503 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
504 		if (r == -ENOIOCTLCMD)
505 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
506 		break;
507 	}
508 
509 	mutex_unlock(&d->mutex);
510 	return r;
511 }
512 
vhost_vdpa_iotlb_unmap(struct vhost_vdpa * v,u64 start,u64 last)513 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
514 {
515 	struct vhost_dev *dev = &v->vdev;
516 	struct vhost_iotlb *iotlb = dev->iotlb;
517 	struct vhost_iotlb_map *map;
518 	struct page *page;
519 	unsigned long pfn, pinned;
520 
521 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
522 		pinned = map->size >> PAGE_SHIFT;
523 		for (pfn = map->addr >> PAGE_SHIFT;
524 		     pinned > 0; pfn++, pinned--) {
525 			page = pfn_to_page(pfn);
526 			if (map->perm & VHOST_ACCESS_WO)
527 				set_page_dirty_lock(page);
528 			unpin_user_page(page);
529 		}
530 		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
531 		vhost_iotlb_map_free(iotlb, map);
532 	}
533 }
534 
vhost_vdpa_iotlb_free(struct vhost_vdpa * v)535 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
536 {
537 	struct vhost_dev *dev = &v->vdev;
538 
539 	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
540 	kfree(dev->iotlb);
541 	dev->iotlb = NULL;
542 }
543 
perm_to_iommu_flags(u32 perm)544 static int perm_to_iommu_flags(u32 perm)
545 {
546 	int flags = 0;
547 
548 	switch (perm) {
549 	case VHOST_ACCESS_WO:
550 		flags |= IOMMU_WRITE;
551 		break;
552 	case VHOST_ACCESS_RO:
553 		flags |= IOMMU_READ;
554 		break;
555 	case VHOST_ACCESS_RW:
556 		flags |= (IOMMU_WRITE | IOMMU_READ);
557 		break;
558 	default:
559 		WARN(1, "invalidate vhost IOTLB permission\n");
560 		break;
561 	}
562 
563 	return flags | IOMMU_CACHE;
564 }
565 
vhost_vdpa_map(struct vhost_vdpa * v,u64 iova,u64 size,u64 pa,u32 perm)566 static int vhost_vdpa_map(struct vhost_vdpa *v,
567 			  u64 iova, u64 size, u64 pa, u32 perm)
568 {
569 	struct vhost_dev *dev = &v->vdev;
570 	struct vdpa_device *vdpa = v->vdpa;
571 	const struct vdpa_config_ops *ops = vdpa->config;
572 	int r = 0;
573 
574 	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
575 				  pa, perm);
576 	if (r)
577 		return r;
578 
579 	if (ops->dma_map) {
580 		r = ops->dma_map(vdpa, iova, size, pa, perm);
581 	} else if (ops->set_map) {
582 		if (!v->in_batch)
583 			r = ops->set_map(vdpa, dev->iotlb);
584 	} else {
585 		r = iommu_map(v->domain, iova, pa, size,
586 			      perm_to_iommu_flags(perm));
587 	}
588 
589 	if (r)
590 		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
591 	else
592 		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
593 
594 	return r;
595 }
596 
vhost_vdpa_unmap(struct vhost_vdpa * v,u64 iova,u64 size)597 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
598 {
599 	struct vhost_dev *dev = &v->vdev;
600 	struct vdpa_device *vdpa = v->vdpa;
601 	const struct vdpa_config_ops *ops = vdpa->config;
602 
603 	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
604 
605 	if (ops->dma_map) {
606 		ops->dma_unmap(vdpa, iova, size);
607 	} else if (ops->set_map) {
608 		if (!v->in_batch)
609 			ops->set_map(vdpa, dev->iotlb);
610 	} else {
611 		iommu_unmap(v->domain, iova, size);
612 	}
613 }
614 
vhost_vdpa_process_iotlb_update(struct vhost_vdpa * v,struct vhost_iotlb_msg * msg)615 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
616 					   struct vhost_iotlb_msg *msg)
617 {
618 	struct vhost_dev *dev = &v->vdev;
619 	struct vhost_iotlb *iotlb = dev->iotlb;
620 	struct page **page_list;
621 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
622 	unsigned int gup_flags = FOLL_LONGTERM;
623 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
624 	unsigned long lock_limit, sz2pin, nchunks, i;
625 	u64 iova = msg->iova;
626 	long pinned;
627 	int ret = 0;
628 
629 	if (msg->iova < v->range.first || !msg->size ||
630 	    msg->iova > U64_MAX - msg->size + 1 ||
631 	    msg->iova + msg->size - 1 > v->range.last)
632 		return -EINVAL;
633 
634 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
635 				    msg->iova + msg->size - 1))
636 		return -EEXIST;
637 
638 	/* Limit the use of memory for bookkeeping */
639 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
640 	if (!page_list)
641 		return -ENOMEM;
642 
643 	if (msg->perm & VHOST_ACCESS_WO)
644 		gup_flags |= FOLL_WRITE;
645 
646 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
647 	if (!npages) {
648 		ret = -EINVAL;
649 		goto free;
650 	}
651 
652 	mmap_read_lock(dev->mm);
653 
654 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
655 	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
656 		ret = -ENOMEM;
657 		goto unlock;
658 	}
659 
660 	cur_base = msg->uaddr & PAGE_MASK;
661 	iova &= PAGE_MASK;
662 	nchunks = 0;
663 
664 	while (npages) {
665 		sz2pin = min_t(unsigned long, npages, list_size);
666 		pinned = pin_user_pages(cur_base, sz2pin,
667 					gup_flags, page_list, NULL);
668 		if (sz2pin != pinned) {
669 			if (pinned < 0) {
670 				ret = pinned;
671 			} else {
672 				unpin_user_pages(page_list, pinned);
673 				ret = -ENOMEM;
674 			}
675 			goto out;
676 		}
677 		nchunks++;
678 
679 		if (!last_pfn)
680 			map_pfn = page_to_pfn(page_list[0]);
681 
682 		for (i = 0; i < pinned; i++) {
683 			unsigned long this_pfn = page_to_pfn(page_list[i]);
684 			u64 csize;
685 
686 			if (last_pfn && (this_pfn != last_pfn + 1)) {
687 				/* Pin a contiguous chunk of memory */
688 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
689 				ret = vhost_vdpa_map(v, iova, csize,
690 						     map_pfn << PAGE_SHIFT,
691 						     msg->perm);
692 				if (ret) {
693 					/*
694 					 * Unpin the pages that are left unmapped
695 					 * from this point on in the current
696 					 * page_list. The remaining outstanding
697 					 * ones which may stride across several
698 					 * chunks will be covered in the common
699 					 * error path subsequently.
700 					 */
701 					unpin_user_pages(&page_list[i],
702 							 pinned - i);
703 					goto out;
704 				}
705 
706 				map_pfn = this_pfn;
707 				iova += csize;
708 				nchunks = 0;
709 			}
710 
711 			last_pfn = this_pfn;
712 		}
713 
714 		cur_base += pinned << PAGE_SHIFT;
715 		npages -= pinned;
716 	}
717 
718 	/* Pin the rest chunk */
719 	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
720 			     map_pfn << PAGE_SHIFT, msg->perm);
721 out:
722 	if (ret) {
723 		if (nchunks) {
724 			unsigned long pfn;
725 
726 			/*
727 			 * Unpin the outstanding pages which are yet to be
728 			 * mapped but haven't due to vdpa_map() or
729 			 * pin_user_pages() failure.
730 			 *
731 			 * Mapped pages are accounted in vdpa_map(), hence
732 			 * the corresponding unpinning will be handled by
733 			 * vdpa_unmap().
734 			 */
735 			WARN_ON(!last_pfn);
736 			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
737 				unpin_user_page(pfn_to_page(pfn));
738 		}
739 		vhost_vdpa_unmap(v, msg->iova, msg->size);
740 	}
741 unlock:
742 	mmap_read_unlock(dev->mm);
743 free:
744 	free_page((unsigned long)page_list);
745 	return ret;
746 }
747 
vhost_vdpa_process_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * msg)748 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
749 					struct vhost_iotlb_msg *msg)
750 {
751 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
752 	struct vdpa_device *vdpa = v->vdpa;
753 	const struct vdpa_config_ops *ops = vdpa->config;
754 	int r = 0;
755 
756 	mutex_lock(&dev->mutex);
757 
758 	r = vhost_dev_check_owner(dev);
759 	if (r)
760 		goto unlock;
761 
762 	switch (msg->type) {
763 	case VHOST_IOTLB_UPDATE:
764 		r = vhost_vdpa_process_iotlb_update(v, msg);
765 		break;
766 	case VHOST_IOTLB_INVALIDATE:
767 		vhost_vdpa_unmap(v, msg->iova, msg->size);
768 		break;
769 	case VHOST_IOTLB_BATCH_BEGIN:
770 		v->in_batch = true;
771 		break;
772 	case VHOST_IOTLB_BATCH_END:
773 		if (v->in_batch && ops->set_map)
774 			ops->set_map(vdpa, dev->iotlb);
775 		v->in_batch = false;
776 		break;
777 	default:
778 		r = -EINVAL;
779 		break;
780 	}
781 unlock:
782 	mutex_unlock(&dev->mutex);
783 
784 	return r;
785 }
786 
vhost_vdpa_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)787 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
788 					 struct iov_iter *from)
789 {
790 	struct file *file = iocb->ki_filp;
791 	struct vhost_vdpa *v = file->private_data;
792 	struct vhost_dev *dev = &v->vdev;
793 
794 	return vhost_chr_write_iter(dev, from);
795 }
796 
vhost_vdpa_alloc_domain(struct vhost_vdpa * v)797 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
798 {
799 	struct vdpa_device *vdpa = v->vdpa;
800 	const struct vdpa_config_ops *ops = vdpa->config;
801 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
802 	struct bus_type *bus;
803 	int ret;
804 
805 	/* Device want to do DMA by itself */
806 	if (ops->set_map || ops->dma_map)
807 		return 0;
808 
809 	bus = dma_dev->bus;
810 	if (!bus)
811 		return -EFAULT;
812 
813 	if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
814 		return -ENOTSUPP;
815 
816 	v->domain = iommu_domain_alloc(bus);
817 	if (!v->domain)
818 		return -EIO;
819 
820 	ret = iommu_attach_device(v->domain, dma_dev);
821 	if (ret)
822 		goto err_attach;
823 
824 	return 0;
825 
826 err_attach:
827 	iommu_domain_free(v->domain);
828 	return ret;
829 }
830 
vhost_vdpa_free_domain(struct vhost_vdpa * v)831 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
832 {
833 	struct vdpa_device *vdpa = v->vdpa;
834 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
835 
836 	if (v->domain) {
837 		iommu_detach_device(v->domain, dma_dev);
838 		iommu_domain_free(v->domain);
839 	}
840 
841 	v->domain = NULL;
842 }
843 
vhost_vdpa_set_iova_range(struct vhost_vdpa * v)844 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
845 {
846 	struct vdpa_iova_range *range = &v->range;
847 	struct iommu_domain_geometry geo;
848 	struct vdpa_device *vdpa = v->vdpa;
849 	const struct vdpa_config_ops *ops = vdpa->config;
850 
851 	if (ops->get_iova_range) {
852 		*range = ops->get_iova_range(vdpa);
853 	} else if (v->domain &&
854 		   !iommu_domain_get_attr(v->domain,
855 		   DOMAIN_ATTR_GEOMETRY, &geo) &&
856 		   geo.force_aperture) {
857 		range->first = geo.aperture_start;
858 		range->last = geo.aperture_end;
859 	} else {
860 		range->first = 0;
861 		range->last = ULLONG_MAX;
862 	}
863 }
864 
vhost_vdpa_open(struct inode * inode,struct file * filep)865 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
866 {
867 	struct vhost_vdpa *v;
868 	struct vhost_dev *dev;
869 	struct vhost_virtqueue **vqs;
870 	int nvqs, i, r, opened;
871 
872 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
873 
874 	opened = atomic_cmpxchg(&v->opened, 0, 1);
875 	if (opened)
876 		return -EBUSY;
877 
878 	nvqs = v->nvqs;
879 	vhost_vdpa_reset(v);
880 
881 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
882 	if (!vqs) {
883 		r = -ENOMEM;
884 		goto err;
885 	}
886 
887 	dev = &v->vdev;
888 	for (i = 0; i < nvqs; i++) {
889 		vqs[i] = &v->vqs[i];
890 		vqs[i]->handle_kick = handle_vq_kick;
891 	}
892 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
893 		       vhost_vdpa_process_iotlb_msg);
894 
895 	dev->iotlb = vhost_iotlb_alloc(0, 0);
896 	if (!dev->iotlb) {
897 		r = -ENOMEM;
898 		goto err_init_iotlb;
899 	}
900 
901 	r = vhost_vdpa_alloc_domain(v);
902 	if (r)
903 		goto err_init_iotlb;
904 
905 	vhost_vdpa_set_iova_range(v);
906 
907 	filep->private_data = v;
908 
909 	return 0;
910 
911 err_init_iotlb:
912 	vhost_dev_cleanup(&v->vdev);
913 	kfree(vqs);
914 err:
915 	atomic_dec(&v->opened);
916 	return r;
917 }
918 
vhost_vdpa_clean_irq(struct vhost_vdpa * v)919 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
920 {
921 	int i;
922 
923 	for (i = 0; i < v->nvqs; i++)
924 		vhost_vdpa_unsetup_vq_irq(v, i);
925 }
926 
vhost_vdpa_release(struct inode * inode,struct file * filep)927 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
928 {
929 	struct vhost_vdpa *v = filep->private_data;
930 	struct vhost_dev *d = &v->vdev;
931 
932 	mutex_lock(&d->mutex);
933 	filep->private_data = NULL;
934 	vhost_vdpa_reset(v);
935 	vhost_dev_stop(&v->vdev);
936 	vhost_vdpa_iotlb_free(v);
937 	vhost_vdpa_free_domain(v);
938 	vhost_vdpa_config_put(v);
939 	vhost_vdpa_clean_irq(v);
940 	vhost_dev_cleanup(&v->vdev);
941 	kfree(v->vdev.vqs);
942 	mutex_unlock(&d->mutex);
943 
944 	atomic_dec(&v->opened);
945 	complete(&v->completion);
946 
947 	return 0;
948 }
949 
950 #ifdef CONFIG_MMU
vhost_vdpa_fault(struct vm_fault * vmf)951 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
952 {
953 	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
954 	struct vdpa_device *vdpa = v->vdpa;
955 	const struct vdpa_config_ops *ops = vdpa->config;
956 	struct vdpa_notification_area notify;
957 	struct vm_area_struct *vma = vmf->vma;
958 	u16 index = vma->vm_pgoff;
959 
960 	notify = ops->get_vq_notification(vdpa, index);
961 
962 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
963 	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
964 			    notify.addr >> PAGE_SHIFT, PAGE_SIZE,
965 			    vma->vm_page_prot))
966 		return VM_FAULT_SIGBUS;
967 
968 	return VM_FAULT_NOPAGE;
969 }
970 
971 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
972 	.fault = vhost_vdpa_fault,
973 };
974 
vhost_vdpa_mmap(struct file * file,struct vm_area_struct * vma)975 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
976 {
977 	struct vhost_vdpa *v = vma->vm_file->private_data;
978 	struct vdpa_device *vdpa = v->vdpa;
979 	const struct vdpa_config_ops *ops = vdpa->config;
980 	struct vdpa_notification_area notify;
981 	unsigned long index = vma->vm_pgoff;
982 
983 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
984 		return -EINVAL;
985 	if ((vma->vm_flags & VM_SHARED) == 0)
986 		return -EINVAL;
987 	if (vma->vm_flags & VM_READ)
988 		return -EINVAL;
989 	if (index > 65535)
990 		return -EINVAL;
991 	if (!ops->get_vq_notification)
992 		return -ENOTSUPP;
993 
994 	/* To be safe and easily modelled by userspace, We only
995 	 * support the doorbell which sits on the page boundary and
996 	 * does not share the page with other registers.
997 	 */
998 	notify = ops->get_vq_notification(vdpa, index);
999 	if (notify.addr & (PAGE_SIZE - 1))
1000 		return -EINVAL;
1001 	if (vma->vm_end - vma->vm_start != notify.size)
1002 		return -ENOTSUPP;
1003 
1004 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1005 	vma->vm_ops = &vhost_vdpa_vm_ops;
1006 	return 0;
1007 }
1008 #endif /* CONFIG_MMU */
1009 
1010 static const struct file_operations vhost_vdpa_fops = {
1011 	.owner		= THIS_MODULE,
1012 	.open		= vhost_vdpa_open,
1013 	.release	= vhost_vdpa_release,
1014 	.write_iter	= vhost_vdpa_chr_write_iter,
1015 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
1016 #ifdef CONFIG_MMU
1017 	.mmap		= vhost_vdpa_mmap,
1018 #endif /* CONFIG_MMU */
1019 	.compat_ioctl	= compat_ptr_ioctl,
1020 };
1021 
vhost_vdpa_release_dev(struct device * device)1022 static void vhost_vdpa_release_dev(struct device *device)
1023 {
1024 	struct vhost_vdpa *v =
1025 	       container_of(device, struct vhost_vdpa, dev);
1026 
1027 	ida_simple_remove(&vhost_vdpa_ida, v->minor);
1028 	kfree(v->vqs);
1029 	kfree(v);
1030 }
1031 
vhost_vdpa_probe(struct vdpa_device * vdpa)1032 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1033 {
1034 	const struct vdpa_config_ops *ops = vdpa->config;
1035 	struct vhost_vdpa *v;
1036 	int minor;
1037 	int r;
1038 
1039 	/* Currently, we only accept the network devices. */
1040 	if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1041 		return -ENOTSUPP;
1042 
1043 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1044 	if (!v)
1045 		return -ENOMEM;
1046 
1047 	minor = ida_simple_get(&vhost_vdpa_ida, 0,
1048 			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1049 	if (minor < 0) {
1050 		kfree(v);
1051 		return minor;
1052 	}
1053 
1054 	atomic_set(&v->opened, 0);
1055 	v->minor = minor;
1056 	v->vdpa = vdpa;
1057 	v->nvqs = vdpa->nvqs;
1058 	v->virtio_id = ops->get_device_id(vdpa);
1059 
1060 	device_initialize(&v->dev);
1061 	v->dev.release = vhost_vdpa_release_dev;
1062 	v->dev.parent = &vdpa->dev;
1063 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1064 	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1065 			       GFP_KERNEL);
1066 	if (!v->vqs) {
1067 		r = -ENOMEM;
1068 		goto err;
1069 	}
1070 
1071 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1072 	if (r)
1073 		goto err;
1074 
1075 	cdev_init(&v->cdev, &vhost_vdpa_fops);
1076 	v->cdev.owner = THIS_MODULE;
1077 
1078 	r = cdev_device_add(&v->cdev, &v->dev);
1079 	if (r)
1080 		goto err;
1081 
1082 	init_completion(&v->completion);
1083 	vdpa_set_drvdata(vdpa, v);
1084 
1085 	return 0;
1086 
1087 err:
1088 	put_device(&v->dev);
1089 	return r;
1090 }
1091 
vhost_vdpa_remove(struct vdpa_device * vdpa)1092 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1093 {
1094 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1095 	int opened;
1096 
1097 	cdev_device_del(&v->cdev, &v->dev);
1098 
1099 	do {
1100 		opened = atomic_cmpxchg(&v->opened, 0, 1);
1101 		if (!opened)
1102 			break;
1103 		wait_for_completion(&v->completion);
1104 	} while (1);
1105 
1106 	put_device(&v->dev);
1107 }
1108 
1109 static struct vdpa_driver vhost_vdpa_driver = {
1110 	.driver = {
1111 		.name	= "vhost_vdpa",
1112 	},
1113 	.probe	= vhost_vdpa_probe,
1114 	.remove	= vhost_vdpa_remove,
1115 };
1116 
vhost_vdpa_init(void)1117 static int __init vhost_vdpa_init(void)
1118 {
1119 	int r;
1120 
1121 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1122 				"vhost-vdpa");
1123 	if (r)
1124 		goto err_alloc_chrdev;
1125 
1126 	r = vdpa_register_driver(&vhost_vdpa_driver);
1127 	if (r)
1128 		goto err_vdpa_register_driver;
1129 
1130 	return 0;
1131 
1132 err_vdpa_register_driver:
1133 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1134 err_alloc_chrdev:
1135 	return r;
1136 }
1137 module_init(vhost_vdpa_init);
1138 
vhost_vdpa_exit(void)1139 static void __exit vhost_vdpa_exit(void)
1140 {
1141 	vdpa_unregister_driver(&vhost_vdpa_driver);
1142 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1143 }
1144 module_exit(vhost_vdpa_exit);
1145 
1146 MODULE_VERSION("0.0.1");
1147 MODULE_LICENSE("GPL v2");
1148 MODULE_AUTHOR("Intel Corporation");
1149 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1150