• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
25 
26 #include "vhost.h"
27 
28 enum {
29 	VHOST_VDPA_BACKEND_FEATURES =
30 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 	(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
33 };
34 
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36 
37 #define VHOST_VDPA_IOTLB_BUCKETS 16
38 
39 struct vhost_vdpa_as {
40 	struct hlist_node hash_link;
41 	struct vhost_iotlb iotlb;
42 	u32 id;
43 };
44 
45 struct vhost_vdpa {
46 	struct vhost_dev vdev;
47 	struct iommu_domain *domain;
48 	struct vhost_virtqueue *vqs;
49 	struct completion completion;
50 	struct vdpa_device *vdpa;
51 	struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
52 	struct device dev;
53 	struct cdev cdev;
54 	atomic_t opened;
55 	u32 nvqs;
56 	int virtio_id;
57 	int minor;
58 	struct eventfd_ctx *config_ctx;
59 	int in_batch;
60 	struct vdpa_iova_range range;
61 	u32 batch_asid;
62 };
63 
64 static DEFINE_IDA(vhost_vdpa_ida);
65 
66 static dev_t vhost_vdpa_major;
67 
68 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
69 				   struct vhost_iotlb *iotlb, u64 start,
70 				   u64 last, u32 asid);
71 
iotlb_to_asid(struct vhost_iotlb * iotlb)72 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
73 {
74 	struct vhost_vdpa_as *as = container_of(iotlb, struct
75 						vhost_vdpa_as, iotlb);
76 	return as->id;
77 }
78 
asid_to_as(struct vhost_vdpa * v,u32 asid)79 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
80 {
81 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
82 	struct vhost_vdpa_as *as;
83 
84 	hlist_for_each_entry(as, head, hash_link)
85 		if (as->id == asid)
86 			return as;
87 
88 	return NULL;
89 }
90 
asid_to_iotlb(struct vhost_vdpa * v,u32 asid)91 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
92 {
93 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
94 
95 	if (!as)
96 		return NULL;
97 
98 	return &as->iotlb;
99 }
100 
vhost_vdpa_alloc_as(struct vhost_vdpa * v,u32 asid)101 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
102 {
103 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
104 	struct vhost_vdpa_as *as;
105 
106 	if (asid_to_as(v, asid))
107 		return NULL;
108 
109 	if (asid >= v->vdpa->nas)
110 		return NULL;
111 
112 	as = kmalloc(sizeof(*as), GFP_KERNEL);
113 	if (!as)
114 		return NULL;
115 
116 	vhost_iotlb_init(&as->iotlb, 0, 0);
117 	as->id = asid;
118 	hlist_add_head(&as->hash_link, head);
119 
120 	return as;
121 }
122 
vhost_vdpa_find_alloc_as(struct vhost_vdpa * v,u32 asid)123 static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
124 						      u32 asid)
125 {
126 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
127 
128 	if (as)
129 		return as;
130 
131 	return vhost_vdpa_alloc_as(v, asid);
132 }
133 
vhost_vdpa_remove_as(struct vhost_vdpa * v,u32 asid)134 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
135 {
136 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
137 
138 	if (!as)
139 		return -EINVAL;
140 
141 	hlist_del(&as->hash_link);
142 	vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
143 	kfree(as);
144 
145 	return 0;
146 }
147 
handle_vq_kick(struct vhost_work * work)148 static void handle_vq_kick(struct vhost_work *work)
149 {
150 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
151 						  poll.work);
152 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
153 	const struct vdpa_config_ops *ops = v->vdpa->config;
154 
155 	ops->kick_vq(v->vdpa, vq - v->vqs);
156 }
157 
vhost_vdpa_virtqueue_cb(void * private)158 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
159 {
160 	struct vhost_virtqueue *vq = private;
161 	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
162 
163 	if (call_ctx)
164 		eventfd_signal(call_ctx, 1);
165 
166 	return IRQ_HANDLED;
167 }
168 
vhost_vdpa_config_cb(void * private)169 static irqreturn_t vhost_vdpa_config_cb(void *private)
170 {
171 	struct vhost_vdpa *v = private;
172 	struct eventfd_ctx *config_ctx = v->config_ctx;
173 
174 	if (config_ctx)
175 		eventfd_signal(config_ctx, 1);
176 
177 	return IRQ_HANDLED;
178 }
179 
vhost_vdpa_setup_vq_irq(struct vhost_vdpa * v,u16 qid)180 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
181 {
182 	struct vhost_virtqueue *vq = &v->vqs[qid];
183 	const struct vdpa_config_ops *ops = v->vdpa->config;
184 	struct vdpa_device *vdpa = v->vdpa;
185 	int ret, irq;
186 
187 	if (!ops->get_vq_irq)
188 		return;
189 
190 	irq = ops->get_vq_irq(vdpa, qid);
191 	if (irq < 0)
192 		return;
193 
194 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
195 	if (!vq->call_ctx.ctx)
196 		return;
197 
198 	vq->call_ctx.producer.token = vq->call_ctx.ctx;
199 	vq->call_ctx.producer.irq = irq;
200 	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
201 	if (unlikely(ret))
202 		dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
203 			 qid, vq->call_ctx.producer.token, ret);
204 }
205 
vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa * v,u16 qid)206 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
207 {
208 	struct vhost_virtqueue *vq = &v->vqs[qid];
209 
210 	irq_bypass_unregister_producer(&vq->call_ctx.producer);
211 }
212 
vhost_vdpa_reset(struct vhost_vdpa * v)213 static int vhost_vdpa_reset(struct vhost_vdpa *v)
214 {
215 	struct vdpa_device *vdpa = v->vdpa;
216 
217 	v->in_batch = 0;
218 
219 	return vdpa_reset(vdpa);
220 }
221 
vhost_vdpa_get_device_id(struct vhost_vdpa * v,u8 __user * argp)222 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
223 {
224 	struct vdpa_device *vdpa = v->vdpa;
225 	const struct vdpa_config_ops *ops = vdpa->config;
226 	u32 device_id;
227 
228 	device_id = ops->get_device_id(vdpa);
229 
230 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
231 		return -EFAULT;
232 
233 	return 0;
234 }
235 
vhost_vdpa_get_status(struct vhost_vdpa * v,u8 __user * statusp)236 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
237 {
238 	struct vdpa_device *vdpa = v->vdpa;
239 	const struct vdpa_config_ops *ops = vdpa->config;
240 	u8 status;
241 
242 	status = ops->get_status(vdpa);
243 
244 	if (copy_to_user(statusp, &status, sizeof(status)))
245 		return -EFAULT;
246 
247 	return 0;
248 }
249 
vhost_vdpa_set_status(struct vhost_vdpa * v,u8 __user * statusp)250 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
251 {
252 	struct vdpa_device *vdpa = v->vdpa;
253 	const struct vdpa_config_ops *ops = vdpa->config;
254 	u8 status, status_old;
255 	u32 nvqs = v->nvqs;
256 	int ret;
257 	u16 i;
258 
259 	if (copy_from_user(&status, statusp, sizeof(status)))
260 		return -EFAULT;
261 
262 	status_old = ops->get_status(vdpa);
263 
264 	/*
265 	 * Userspace shouldn't remove status bits unless reset the
266 	 * status to 0.
267 	 */
268 	if (status != 0 && (status_old & ~status) != 0)
269 		return -EINVAL;
270 
271 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
272 		for (i = 0; i < nvqs; i++)
273 			vhost_vdpa_unsetup_vq_irq(v, i);
274 
275 	if (status == 0) {
276 		ret = vdpa_reset(vdpa);
277 		if (ret)
278 			return ret;
279 	} else
280 		vdpa_set_status(vdpa, status);
281 
282 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
283 		for (i = 0; i < nvqs; i++)
284 			vhost_vdpa_setup_vq_irq(v, i);
285 
286 	return 0;
287 }
288 
vhost_vdpa_config_validate(struct vhost_vdpa * v,struct vhost_vdpa_config * c)289 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
290 				      struct vhost_vdpa_config *c)
291 {
292 	struct vdpa_device *vdpa = v->vdpa;
293 	size_t size = vdpa->config->get_config_size(vdpa);
294 
295 	if (c->len == 0 || c->off > size)
296 		return -EINVAL;
297 
298 	if (c->len > size - c->off)
299 		return -E2BIG;
300 
301 	return 0;
302 }
303 
vhost_vdpa_get_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)304 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
305 				  struct vhost_vdpa_config __user *c)
306 {
307 	struct vdpa_device *vdpa = v->vdpa;
308 	struct vhost_vdpa_config config;
309 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
310 	u8 *buf;
311 
312 	if (copy_from_user(&config, c, size))
313 		return -EFAULT;
314 	if (vhost_vdpa_config_validate(v, &config))
315 		return -EINVAL;
316 	buf = kvzalloc(config.len, GFP_KERNEL);
317 	if (!buf)
318 		return -ENOMEM;
319 
320 	vdpa_get_config(vdpa, config.off, buf, config.len);
321 
322 	if (copy_to_user(c->buf, buf, config.len)) {
323 		kvfree(buf);
324 		return -EFAULT;
325 	}
326 
327 	kvfree(buf);
328 	return 0;
329 }
330 
vhost_vdpa_set_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)331 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
332 				  struct vhost_vdpa_config __user *c)
333 {
334 	struct vdpa_device *vdpa = v->vdpa;
335 	struct vhost_vdpa_config config;
336 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
337 	u8 *buf;
338 
339 	if (copy_from_user(&config, c, size))
340 		return -EFAULT;
341 	if (vhost_vdpa_config_validate(v, &config))
342 		return -EINVAL;
343 
344 	buf = vmemdup_user(c->buf, config.len);
345 	if (IS_ERR(buf))
346 		return PTR_ERR(buf);
347 
348 	vdpa_set_config(vdpa, config.off, buf, config.len);
349 
350 	kvfree(buf);
351 	return 0;
352 }
353 
vhost_vdpa_can_suspend(const struct vhost_vdpa * v)354 static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
355 {
356 	struct vdpa_device *vdpa = v->vdpa;
357 	const struct vdpa_config_ops *ops = vdpa->config;
358 
359 	return ops->suspend;
360 }
361 
vhost_vdpa_get_features(struct vhost_vdpa * v,u64 __user * featurep)362 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
363 {
364 	struct vdpa_device *vdpa = v->vdpa;
365 	const struct vdpa_config_ops *ops = vdpa->config;
366 	u64 features;
367 
368 	features = ops->get_device_features(vdpa);
369 
370 	if (copy_to_user(featurep, &features, sizeof(features)))
371 		return -EFAULT;
372 
373 	return 0;
374 }
375 
vhost_vdpa_set_features(struct vhost_vdpa * v,u64 __user * featurep)376 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
377 {
378 	struct vdpa_device *vdpa = v->vdpa;
379 	const struct vdpa_config_ops *ops = vdpa->config;
380 	struct vhost_dev *d = &v->vdev;
381 	u64 actual_features;
382 	u64 features;
383 	int i;
384 
385 	/*
386 	 * It's not allowed to change the features after they have
387 	 * been negotiated.
388 	 */
389 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
390 		return -EBUSY;
391 
392 	if (copy_from_user(&features, featurep, sizeof(features)))
393 		return -EFAULT;
394 
395 	if (vdpa_set_features(vdpa, features))
396 		return -EINVAL;
397 
398 	/* let the vqs know what has been configured */
399 	actual_features = ops->get_driver_features(vdpa);
400 	for (i = 0; i < d->nvqs; ++i) {
401 		struct vhost_virtqueue *vq = d->vqs[i];
402 
403 		mutex_lock(&vq->mutex);
404 		vq->acked_features = actual_features;
405 		mutex_unlock(&vq->mutex);
406 	}
407 
408 	return 0;
409 }
410 
vhost_vdpa_get_vring_num(struct vhost_vdpa * v,u16 __user * argp)411 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
412 {
413 	struct vdpa_device *vdpa = v->vdpa;
414 	const struct vdpa_config_ops *ops = vdpa->config;
415 	u16 num;
416 
417 	num = ops->get_vq_num_max(vdpa);
418 
419 	if (copy_to_user(argp, &num, sizeof(num)))
420 		return -EFAULT;
421 
422 	return 0;
423 }
424 
vhost_vdpa_config_put(struct vhost_vdpa * v)425 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
426 {
427 	if (v->config_ctx) {
428 		eventfd_ctx_put(v->config_ctx);
429 		v->config_ctx = NULL;
430 	}
431 }
432 
vhost_vdpa_set_config_call(struct vhost_vdpa * v,u32 __user * argp)433 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
434 {
435 	struct vdpa_callback cb;
436 	int fd;
437 	struct eventfd_ctx *ctx;
438 
439 	cb.callback = vhost_vdpa_config_cb;
440 	cb.private = v;
441 	if (copy_from_user(&fd, argp, sizeof(fd)))
442 		return  -EFAULT;
443 
444 	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
445 	swap(ctx, v->config_ctx);
446 
447 	if (!IS_ERR_OR_NULL(ctx))
448 		eventfd_ctx_put(ctx);
449 
450 	if (IS_ERR(v->config_ctx)) {
451 		long ret = PTR_ERR(v->config_ctx);
452 
453 		v->config_ctx = NULL;
454 		return ret;
455 	}
456 
457 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
458 
459 	return 0;
460 }
461 
vhost_vdpa_get_iova_range(struct vhost_vdpa * v,u32 __user * argp)462 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
463 {
464 	struct vhost_vdpa_iova_range range = {
465 		.first = v->range.first,
466 		.last = v->range.last,
467 	};
468 
469 	if (copy_to_user(argp, &range, sizeof(range)))
470 		return -EFAULT;
471 	return 0;
472 }
473 
vhost_vdpa_get_config_size(struct vhost_vdpa * v,u32 __user * argp)474 static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
475 {
476 	struct vdpa_device *vdpa = v->vdpa;
477 	const struct vdpa_config_ops *ops = vdpa->config;
478 	u32 size;
479 
480 	size = ops->get_config_size(vdpa);
481 
482 	if (copy_to_user(argp, &size, sizeof(size)))
483 		return -EFAULT;
484 
485 	return 0;
486 }
487 
vhost_vdpa_get_vqs_count(struct vhost_vdpa * v,u32 __user * argp)488 static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
489 {
490 	struct vdpa_device *vdpa = v->vdpa;
491 
492 	if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
493 		return -EFAULT;
494 
495 	return 0;
496 }
497 
498 /* After a successful return of ioctl the device must not process more
499  * virtqueue descriptors. The device can answer to read or writes of config
500  * fields as if it were not suspended. In particular, writing to "queue_enable"
501  * with a value of 1 will not make the device start processing buffers.
502  */
vhost_vdpa_suspend(struct vhost_vdpa * v)503 static long vhost_vdpa_suspend(struct vhost_vdpa *v)
504 {
505 	struct vdpa_device *vdpa = v->vdpa;
506 	const struct vdpa_config_ops *ops = vdpa->config;
507 
508 	if (!ops->suspend)
509 		return -EOPNOTSUPP;
510 
511 	return ops->suspend(vdpa);
512 }
513 
vhost_vdpa_vring_ioctl(struct vhost_vdpa * v,unsigned int cmd,void __user * argp)514 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
515 				   void __user *argp)
516 {
517 	struct vdpa_device *vdpa = v->vdpa;
518 	const struct vdpa_config_ops *ops = vdpa->config;
519 	struct vdpa_vq_state vq_state;
520 	struct vdpa_callback cb;
521 	struct vhost_virtqueue *vq;
522 	struct vhost_vring_state s;
523 	u32 idx;
524 	long r;
525 
526 	r = get_user(idx, (u32 __user *)argp);
527 	if (r < 0)
528 		return r;
529 
530 	if (idx >= v->nvqs)
531 		return -ENOBUFS;
532 
533 	idx = array_index_nospec(idx, v->nvqs);
534 	vq = &v->vqs[idx];
535 
536 	switch (cmd) {
537 	case VHOST_VDPA_SET_VRING_ENABLE:
538 		if (copy_from_user(&s, argp, sizeof(s)))
539 			return -EFAULT;
540 		ops->set_vq_ready(vdpa, idx, s.num);
541 		return 0;
542 	case VHOST_VDPA_GET_VRING_GROUP:
543 		if (!ops->get_vq_group)
544 			return -EOPNOTSUPP;
545 		s.index = idx;
546 		s.num = ops->get_vq_group(vdpa, idx);
547 		if (s.num >= vdpa->ngroups)
548 			return -EIO;
549 		else if (copy_to_user(argp, &s, sizeof(s)))
550 			return -EFAULT;
551 		return 0;
552 	case VHOST_VDPA_SET_GROUP_ASID:
553 		if (copy_from_user(&s, argp, sizeof(s)))
554 			return -EFAULT;
555 		if (s.num >= vdpa->nas)
556 			return -EINVAL;
557 		if (!ops->set_group_asid)
558 			return -EOPNOTSUPP;
559 		return ops->set_group_asid(vdpa, idx, s.num);
560 	case VHOST_GET_VRING_BASE:
561 		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
562 		if (r)
563 			return r;
564 
565 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
566 			vq->last_avail_idx = vq_state.packed.last_avail_idx |
567 					     (vq_state.packed.last_avail_counter << 15);
568 			vq->last_used_idx = vq_state.packed.last_used_idx |
569 					    (vq_state.packed.last_used_counter << 15);
570 		} else {
571 			vq->last_avail_idx = vq_state.split.avail_index;
572 		}
573 		break;
574 	}
575 
576 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
577 	if (r)
578 		return r;
579 
580 	switch (cmd) {
581 	case VHOST_SET_VRING_ADDR:
582 		if (ops->set_vq_address(vdpa, idx,
583 					(u64)(uintptr_t)vq->desc,
584 					(u64)(uintptr_t)vq->avail,
585 					(u64)(uintptr_t)vq->used))
586 			r = -EINVAL;
587 		break;
588 
589 	case VHOST_SET_VRING_BASE:
590 		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
591 			vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
592 			vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
593 			vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
594 			vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
595 		} else {
596 			vq_state.split.avail_index = vq->last_avail_idx;
597 		}
598 		r = ops->set_vq_state(vdpa, idx, &vq_state);
599 		break;
600 
601 	case VHOST_SET_VRING_CALL:
602 		if (vq->call_ctx.ctx) {
603 			cb.callback = vhost_vdpa_virtqueue_cb;
604 			cb.private = vq;
605 		} else {
606 			cb.callback = NULL;
607 			cb.private = NULL;
608 		}
609 		ops->set_vq_cb(vdpa, idx, &cb);
610 		vhost_vdpa_setup_vq_irq(v, idx);
611 		break;
612 
613 	case VHOST_SET_VRING_NUM:
614 		ops->set_vq_num(vdpa, idx, vq->num);
615 		break;
616 	}
617 
618 	return r;
619 }
620 
vhost_vdpa_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)621 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
622 				      unsigned int cmd, unsigned long arg)
623 {
624 	struct vhost_vdpa *v = filep->private_data;
625 	struct vhost_dev *d = &v->vdev;
626 	void __user *argp = (void __user *)arg;
627 	u64 __user *featurep = argp;
628 	u64 features;
629 	long r = 0;
630 
631 	if (cmd == VHOST_SET_BACKEND_FEATURES) {
632 		if (copy_from_user(&features, featurep, sizeof(features)))
633 			return -EFAULT;
634 		if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
635 				 BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
636 			return -EOPNOTSUPP;
637 		if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
638 		     !vhost_vdpa_can_suspend(v))
639 			return -EOPNOTSUPP;
640 		vhost_set_backend_features(&v->vdev, features);
641 		return 0;
642 	}
643 
644 	mutex_lock(&d->mutex);
645 
646 	switch (cmd) {
647 	case VHOST_VDPA_GET_DEVICE_ID:
648 		r = vhost_vdpa_get_device_id(v, argp);
649 		break;
650 	case VHOST_VDPA_GET_STATUS:
651 		r = vhost_vdpa_get_status(v, argp);
652 		break;
653 	case VHOST_VDPA_SET_STATUS:
654 		r = vhost_vdpa_set_status(v, argp);
655 		break;
656 	case VHOST_VDPA_GET_CONFIG:
657 		r = vhost_vdpa_get_config(v, argp);
658 		break;
659 	case VHOST_VDPA_SET_CONFIG:
660 		r = vhost_vdpa_set_config(v, argp);
661 		break;
662 	case VHOST_GET_FEATURES:
663 		r = vhost_vdpa_get_features(v, argp);
664 		break;
665 	case VHOST_SET_FEATURES:
666 		r = vhost_vdpa_set_features(v, argp);
667 		break;
668 	case VHOST_VDPA_GET_VRING_NUM:
669 		r = vhost_vdpa_get_vring_num(v, argp);
670 		break;
671 	case VHOST_VDPA_GET_GROUP_NUM:
672 		if (copy_to_user(argp, &v->vdpa->ngroups,
673 				 sizeof(v->vdpa->ngroups)))
674 			r = -EFAULT;
675 		break;
676 	case VHOST_VDPA_GET_AS_NUM:
677 		if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
678 			r = -EFAULT;
679 		break;
680 	case VHOST_SET_LOG_BASE:
681 	case VHOST_SET_LOG_FD:
682 		r = -ENOIOCTLCMD;
683 		break;
684 	case VHOST_VDPA_SET_CONFIG_CALL:
685 		r = vhost_vdpa_set_config_call(v, argp);
686 		break;
687 	case VHOST_GET_BACKEND_FEATURES:
688 		features = VHOST_VDPA_BACKEND_FEATURES;
689 		if (vhost_vdpa_can_suspend(v))
690 			features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
691 		if (copy_to_user(featurep, &features, sizeof(features)))
692 			r = -EFAULT;
693 		break;
694 	case VHOST_VDPA_GET_IOVA_RANGE:
695 		r = vhost_vdpa_get_iova_range(v, argp);
696 		break;
697 	case VHOST_VDPA_GET_CONFIG_SIZE:
698 		r = vhost_vdpa_get_config_size(v, argp);
699 		break;
700 	case VHOST_VDPA_GET_VQS_COUNT:
701 		r = vhost_vdpa_get_vqs_count(v, argp);
702 		break;
703 	case VHOST_VDPA_SUSPEND:
704 		r = vhost_vdpa_suspend(v);
705 		break;
706 	default:
707 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
708 		if (r == -ENOIOCTLCMD)
709 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
710 		break;
711 	}
712 
713 	mutex_unlock(&d->mutex);
714 	return r;
715 }
vhost_vdpa_general_unmap(struct vhost_vdpa * v,struct vhost_iotlb_map * map,u32 asid)716 static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
717 				     struct vhost_iotlb_map *map, u32 asid)
718 {
719 	struct vdpa_device *vdpa = v->vdpa;
720 	const struct vdpa_config_ops *ops = vdpa->config;
721 	if (ops->dma_map) {
722 		ops->dma_unmap(vdpa, asid, map->start, map->size);
723 	} else if (ops->set_map == NULL) {
724 		iommu_unmap(v->domain, map->start, map->size);
725 	}
726 }
727 
vhost_vdpa_pa_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)728 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
729 				u64 start, u64 last, u32 asid)
730 {
731 	struct vhost_dev *dev = &v->vdev;
732 	struct vhost_iotlb_map *map;
733 	struct page *page;
734 	unsigned long pfn, pinned;
735 
736 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
737 		pinned = PFN_DOWN(map->size);
738 		for (pfn = PFN_DOWN(map->addr);
739 		     pinned > 0; pfn++, pinned--) {
740 			page = pfn_to_page(pfn);
741 			if (map->perm & VHOST_ACCESS_WO)
742 				set_page_dirty_lock(page);
743 			unpin_user_page(page);
744 		}
745 		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
746 		vhost_vdpa_general_unmap(v, map, asid);
747 		vhost_iotlb_map_free(iotlb, map);
748 	}
749 }
750 
vhost_vdpa_va_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)751 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
752 				u64 start, u64 last, u32 asid)
753 {
754 	struct vhost_iotlb_map *map;
755 	struct vdpa_map_file *map_file;
756 
757 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
758 		map_file = (struct vdpa_map_file *)map->opaque;
759 		fput(map_file->file);
760 		kfree(map_file);
761 		vhost_vdpa_general_unmap(v, map, asid);
762 		vhost_iotlb_map_free(iotlb, map);
763 	}
764 }
765 
vhost_vdpa_iotlb_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 start,u64 last,u32 asid)766 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
767 				   struct vhost_iotlb *iotlb, u64 start,
768 				   u64 last, u32 asid)
769 {
770 	struct vdpa_device *vdpa = v->vdpa;
771 
772 	if (vdpa->use_va)
773 		return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
774 
775 	return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
776 }
777 
perm_to_iommu_flags(u32 perm)778 static int perm_to_iommu_flags(u32 perm)
779 {
780 	int flags = 0;
781 
782 	switch (perm) {
783 	case VHOST_ACCESS_WO:
784 		flags |= IOMMU_WRITE;
785 		break;
786 	case VHOST_ACCESS_RO:
787 		flags |= IOMMU_READ;
788 		break;
789 	case VHOST_ACCESS_RW:
790 		flags |= (IOMMU_WRITE | IOMMU_READ);
791 		break;
792 	default:
793 		WARN(1, "invalidate vhost IOTLB permission\n");
794 		break;
795 	}
796 
797 	return flags | IOMMU_CACHE;
798 }
799 
vhost_vdpa_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)800 static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
801 			  u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
802 {
803 	struct vhost_dev *dev = &v->vdev;
804 	struct vdpa_device *vdpa = v->vdpa;
805 	const struct vdpa_config_ops *ops = vdpa->config;
806 	u32 asid = iotlb_to_asid(iotlb);
807 	int r = 0;
808 
809 	r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
810 				      pa, perm, opaque);
811 	if (r)
812 		return r;
813 
814 	if (ops->dma_map) {
815 		r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
816 	} else if (ops->set_map) {
817 		if (!v->in_batch)
818 			r = ops->set_map(vdpa, asid, iotlb);
819 	} else {
820 		r = iommu_map(v->domain, iova, pa, size,
821 			      perm_to_iommu_flags(perm));
822 	}
823 	if (r) {
824 		vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
825 		return r;
826 	}
827 
828 	if (!vdpa->use_va)
829 		atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
830 
831 	return 0;
832 }
833 
vhost_vdpa_unmap(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size)834 static void vhost_vdpa_unmap(struct vhost_vdpa *v,
835 			     struct vhost_iotlb *iotlb,
836 			     u64 iova, u64 size)
837 {
838 	struct vdpa_device *vdpa = v->vdpa;
839 	const struct vdpa_config_ops *ops = vdpa->config;
840 	u32 asid = iotlb_to_asid(iotlb);
841 
842 	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
843 
844 	if (ops->set_map) {
845 		if (!v->in_batch)
846 			ops->set_map(vdpa, asid, iotlb);
847 	}
848 
849 }
850 
vhost_vdpa_va_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 uaddr,u32 perm)851 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
852 			     struct vhost_iotlb *iotlb,
853 			     u64 iova, u64 size, u64 uaddr, u32 perm)
854 {
855 	struct vhost_dev *dev = &v->vdev;
856 	u64 offset, map_size, map_iova = iova;
857 	struct vdpa_map_file *map_file;
858 	struct vm_area_struct *vma;
859 	int ret = 0;
860 
861 	mmap_read_lock(dev->mm);
862 
863 	while (size) {
864 		vma = find_vma(dev->mm, uaddr);
865 		if (!vma) {
866 			ret = -EINVAL;
867 			break;
868 		}
869 		map_size = min(size, vma->vm_end - uaddr);
870 		if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
871 			!(vma->vm_flags & (VM_IO | VM_PFNMAP))))
872 			goto next;
873 
874 		map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
875 		if (!map_file) {
876 			ret = -ENOMEM;
877 			break;
878 		}
879 		offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
880 		map_file->offset = offset;
881 		map_file->file = get_file(vma->vm_file);
882 		ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
883 				     perm, map_file);
884 		if (ret) {
885 			fput(map_file->file);
886 			kfree(map_file);
887 			break;
888 		}
889 next:
890 		size -= map_size;
891 		uaddr += map_size;
892 		map_iova += map_size;
893 	}
894 	if (ret)
895 		vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
896 
897 	mmap_read_unlock(dev->mm);
898 
899 	return ret;
900 }
901 
vhost_vdpa_pa_map(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,u64 iova,u64 size,u64 uaddr,u32 perm)902 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
903 			     struct vhost_iotlb *iotlb,
904 			     u64 iova, u64 size, u64 uaddr, u32 perm)
905 {
906 	struct vhost_dev *dev = &v->vdev;
907 	struct page **page_list;
908 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
909 	unsigned int gup_flags = FOLL_LONGTERM;
910 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
911 	unsigned long lock_limit, sz2pin, nchunks, i;
912 	u64 start = iova;
913 	long pinned;
914 	int ret = 0;
915 
916 	/* Limit the use of memory for bookkeeping */
917 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
918 	if (!page_list)
919 		return -ENOMEM;
920 
921 	if (perm & VHOST_ACCESS_WO)
922 		gup_flags |= FOLL_WRITE;
923 
924 	npages = PFN_UP(size + (iova & ~PAGE_MASK));
925 	if (!npages) {
926 		ret = -EINVAL;
927 		goto free;
928 	}
929 
930 	mmap_read_lock(dev->mm);
931 
932 	lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
933 	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
934 		ret = -ENOMEM;
935 		goto unlock;
936 	}
937 
938 	cur_base = uaddr & PAGE_MASK;
939 	iova &= PAGE_MASK;
940 	nchunks = 0;
941 
942 	while (npages) {
943 		sz2pin = min_t(unsigned long, npages, list_size);
944 		pinned = pin_user_pages(cur_base, sz2pin,
945 					gup_flags, page_list, NULL);
946 		if (sz2pin != pinned) {
947 			if (pinned < 0) {
948 				ret = pinned;
949 			} else {
950 				unpin_user_pages(page_list, pinned);
951 				ret = -ENOMEM;
952 			}
953 			goto out;
954 		}
955 		nchunks++;
956 
957 		if (!last_pfn)
958 			map_pfn = page_to_pfn(page_list[0]);
959 
960 		for (i = 0; i < pinned; i++) {
961 			unsigned long this_pfn = page_to_pfn(page_list[i]);
962 			u64 csize;
963 
964 			if (last_pfn && (this_pfn != last_pfn + 1)) {
965 				/* Pin a contiguous chunk of memory */
966 				csize = PFN_PHYS(last_pfn - map_pfn + 1);
967 				ret = vhost_vdpa_map(v, iotlb, iova, csize,
968 						     PFN_PHYS(map_pfn),
969 						     perm, NULL);
970 				if (ret) {
971 					/*
972 					 * Unpin the pages that are left unmapped
973 					 * from this point on in the current
974 					 * page_list. The remaining outstanding
975 					 * ones which may stride across several
976 					 * chunks will be covered in the common
977 					 * error path subsequently.
978 					 */
979 					unpin_user_pages(&page_list[i],
980 							 pinned - i);
981 					goto out;
982 				}
983 
984 				map_pfn = this_pfn;
985 				iova += csize;
986 				nchunks = 0;
987 			}
988 
989 			last_pfn = this_pfn;
990 		}
991 
992 		cur_base += PFN_PHYS(pinned);
993 		npages -= pinned;
994 	}
995 
996 	/* Pin the rest chunk */
997 	ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
998 			     PFN_PHYS(map_pfn), perm, NULL);
999 out:
1000 	if (ret) {
1001 		if (nchunks) {
1002 			unsigned long pfn;
1003 
1004 			/*
1005 			 * Unpin the outstanding pages which are yet to be
1006 			 * mapped but haven't due to vdpa_map() or
1007 			 * pin_user_pages() failure.
1008 			 *
1009 			 * Mapped pages are accounted in vdpa_map(), hence
1010 			 * the corresponding unpinning will be handled by
1011 			 * vdpa_unmap().
1012 			 */
1013 			WARN_ON(!last_pfn);
1014 			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1015 				unpin_user_page(pfn_to_page(pfn));
1016 		}
1017 		vhost_vdpa_unmap(v, iotlb, start, size);
1018 	}
1019 unlock:
1020 	mmap_read_unlock(dev->mm);
1021 free:
1022 	free_page((unsigned long)page_list);
1023 	return ret;
1024 
1025 }
1026 
vhost_vdpa_process_iotlb_update(struct vhost_vdpa * v,struct vhost_iotlb * iotlb,struct vhost_iotlb_msg * msg)1027 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1028 					   struct vhost_iotlb *iotlb,
1029 					   struct vhost_iotlb_msg *msg)
1030 {
1031 	struct vdpa_device *vdpa = v->vdpa;
1032 
1033 	if (msg->iova < v->range.first || !msg->size ||
1034 	    msg->iova > U64_MAX - msg->size + 1 ||
1035 	    msg->iova + msg->size - 1 > v->range.last)
1036 		return -EINVAL;
1037 
1038 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
1039 				    msg->iova + msg->size - 1))
1040 		return -EEXIST;
1041 
1042 	if (vdpa->use_va)
1043 		return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1044 					 msg->uaddr, msg->perm);
1045 
1046 	return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1047 				 msg->perm);
1048 }
1049 
vhost_vdpa_process_iotlb_msg(struct vhost_dev * dev,u32 asid,struct vhost_iotlb_msg * msg)1050 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1051 					struct vhost_iotlb_msg *msg)
1052 {
1053 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1054 	struct vdpa_device *vdpa = v->vdpa;
1055 	const struct vdpa_config_ops *ops = vdpa->config;
1056 	struct vhost_iotlb *iotlb = NULL;
1057 	struct vhost_vdpa_as *as = NULL;
1058 	int r = 0;
1059 
1060 	mutex_lock(&dev->mutex);
1061 
1062 	r = vhost_dev_check_owner(dev);
1063 	if (r)
1064 		goto unlock;
1065 
1066 	if (msg->type == VHOST_IOTLB_UPDATE ||
1067 	    msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1068 		as = vhost_vdpa_find_alloc_as(v, asid);
1069 		if (!as) {
1070 			dev_err(&v->dev, "can't find and alloc asid %d\n",
1071 				asid);
1072 			r = -EINVAL;
1073 			goto unlock;
1074 		}
1075 		iotlb = &as->iotlb;
1076 	} else
1077 		iotlb = asid_to_iotlb(v, asid);
1078 
1079 	if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1080 		if (v->in_batch && v->batch_asid != asid) {
1081 			dev_info(&v->dev, "batch id %d asid %d\n",
1082 				 v->batch_asid, asid);
1083 		}
1084 		if (!iotlb)
1085 			dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1086 		r = -EINVAL;
1087 		goto unlock;
1088 	}
1089 
1090 	switch (msg->type) {
1091 	case VHOST_IOTLB_UPDATE:
1092 		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1093 		break;
1094 	case VHOST_IOTLB_INVALIDATE:
1095 		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1096 		break;
1097 	case VHOST_IOTLB_BATCH_BEGIN:
1098 		v->batch_asid = asid;
1099 		v->in_batch = true;
1100 		break;
1101 	case VHOST_IOTLB_BATCH_END:
1102 		if (v->in_batch && ops->set_map)
1103 			ops->set_map(vdpa, asid, iotlb);
1104 		v->in_batch = false;
1105 		break;
1106 	default:
1107 		r = -EINVAL;
1108 		break;
1109 	}
1110 unlock:
1111 	mutex_unlock(&dev->mutex);
1112 
1113 	return r;
1114 }
1115 
vhost_vdpa_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)1116 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1117 					 struct iov_iter *from)
1118 {
1119 	struct file *file = iocb->ki_filp;
1120 	struct vhost_vdpa *v = file->private_data;
1121 	struct vhost_dev *dev = &v->vdev;
1122 
1123 	return vhost_chr_write_iter(dev, from);
1124 }
1125 
vhost_vdpa_alloc_domain(struct vhost_vdpa * v)1126 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1127 {
1128 	struct vdpa_device *vdpa = v->vdpa;
1129 	const struct vdpa_config_ops *ops = vdpa->config;
1130 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1131 	struct bus_type *bus;
1132 	int ret;
1133 
1134 	/* Device want to do DMA by itself */
1135 	if (ops->set_map || ops->dma_map)
1136 		return 0;
1137 
1138 	bus = dma_dev->bus;
1139 	if (!bus)
1140 		return -EFAULT;
1141 
1142 	if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
1143 		return -ENOTSUPP;
1144 
1145 	v->domain = iommu_domain_alloc(bus);
1146 	if (!v->domain)
1147 		return -EIO;
1148 
1149 	ret = iommu_attach_device(v->domain, dma_dev);
1150 	if (ret)
1151 		goto err_attach;
1152 
1153 	return 0;
1154 
1155 err_attach:
1156 	iommu_domain_free(v->domain);
1157 	v->domain = NULL;
1158 	return ret;
1159 }
1160 
vhost_vdpa_free_domain(struct vhost_vdpa * v)1161 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1162 {
1163 	struct vdpa_device *vdpa = v->vdpa;
1164 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1165 
1166 	if (v->domain) {
1167 		iommu_detach_device(v->domain, dma_dev);
1168 		iommu_domain_free(v->domain);
1169 	}
1170 
1171 	v->domain = NULL;
1172 }
1173 
vhost_vdpa_set_iova_range(struct vhost_vdpa * v)1174 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1175 {
1176 	struct vdpa_iova_range *range = &v->range;
1177 	struct vdpa_device *vdpa = v->vdpa;
1178 	const struct vdpa_config_ops *ops = vdpa->config;
1179 
1180 	if (ops->get_iova_range) {
1181 		*range = ops->get_iova_range(vdpa);
1182 	} else if (v->domain && v->domain->geometry.force_aperture) {
1183 		range->first = v->domain->geometry.aperture_start;
1184 		range->last = v->domain->geometry.aperture_end;
1185 	} else {
1186 		range->first = 0;
1187 		range->last = ULLONG_MAX;
1188 	}
1189 }
1190 
vhost_vdpa_cleanup(struct vhost_vdpa * v)1191 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1192 {
1193 	struct vhost_vdpa_as *as;
1194 	u32 asid;
1195 
1196 	for (asid = 0; asid < v->vdpa->nas; asid++) {
1197 		as = asid_to_as(v, asid);
1198 		if (as)
1199 			vhost_vdpa_remove_as(v, asid);
1200 	}
1201 
1202 	vhost_vdpa_free_domain(v);
1203 	vhost_dev_cleanup(&v->vdev);
1204 	kfree(v->vdev.vqs);
1205 }
1206 
vhost_vdpa_open(struct inode * inode,struct file * filep)1207 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1208 {
1209 	struct vhost_vdpa *v;
1210 	struct vhost_dev *dev;
1211 	struct vhost_virtqueue **vqs;
1212 	int r, opened;
1213 	u32 i, nvqs;
1214 
1215 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1216 
1217 	opened = atomic_cmpxchg(&v->opened, 0, 1);
1218 	if (opened)
1219 		return -EBUSY;
1220 
1221 	nvqs = v->nvqs;
1222 	r = vhost_vdpa_reset(v);
1223 	if (r)
1224 		goto err;
1225 
1226 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1227 	if (!vqs) {
1228 		r = -ENOMEM;
1229 		goto err;
1230 	}
1231 
1232 	dev = &v->vdev;
1233 	for (i = 0; i < nvqs; i++) {
1234 		vqs[i] = &v->vqs[i];
1235 		vqs[i]->handle_kick = handle_vq_kick;
1236 	}
1237 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1238 		       vhost_vdpa_process_iotlb_msg);
1239 
1240 	r = vhost_vdpa_alloc_domain(v);
1241 	if (r)
1242 		goto err_alloc_domain;
1243 
1244 	vhost_vdpa_set_iova_range(v);
1245 
1246 	filep->private_data = v;
1247 
1248 	return 0;
1249 
1250 err_alloc_domain:
1251 	vhost_vdpa_cleanup(v);
1252 err:
1253 	atomic_dec(&v->opened);
1254 	return r;
1255 }
1256 
vhost_vdpa_clean_irq(struct vhost_vdpa * v)1257 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1258 {
1259 	u32 i;
1260 
1261 	for (i = 0; i < v->nvqs; i++)
1262 		vhost_vdpa_unsetup_vq_irq(v, i);
1263 }
1264 
vhost_vdpa_release(struct inode * inode,struct file * filep)1265 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1266 {
1267 	struct vhost_vdpa *v = filep->private_data;
1268 	struct vhost_dev *d = &v->vdev;
1269 
1270 	mutex_lock(&d->mutex);
1271 	filep->private_data = NULL;
1272 	vhost_vdpa_clean_irq(v);
1273 	vhost_vdpa_reset(v);
1274 	vhost_dev_stop(&v->vdev);
1275 	vhost_vdpa_config_put(v);
1276 	vhost_vdpa_cleanup(v);
1277 	mutex_unlock(&d->mutex);
1278 
1279 	atomic_dec(&v->opened);
1280 	complete(&v->completion);
1281 
1282 	return 0;
1283 }
1284 
1285 #ifdef CONFIG_MMU
vhost_vdpa_fault(struct vm_fault * vmf)1286 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1287 {
1288 	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1289 	struct vdpa_device *vdpa = v->vdpa;
1290 	const struct vdpa_config_ops *ops = vdpa->config;
1291 	struct vdpa_notification_area notify;
1292 	struct vm_area_struct *vma = vmf->vma;
1293 	u16 index = vma->vm_pgoff;
1294 
1295 	notify = ops->get_vq_notification(vdpa, index);
1296 
1297 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1298 	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1299 			    PFN_DOWN(notify.addr), PAGE_SIZE,
1300 			    vma->vm_page_prot))
1301 		return VM_FAULT_SIGBUS;
1302 
1303 	return VM_FAULT_NOPAGE;
1304 }
1305 
1306 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1307 	.fault = vhost_vdpa_fault,
1308 };
1309 
vhost_vdpa_mmap(struct file * file,struct vm_area_struct * vma)1310 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1311 {
1312 	struct vhost_vdpa *v = vma->vm_file->private_data;
1313 	struct vdpa_device *vdpa = v->vdpa;
1314 	const struct vdpa_config_ops *ops = vdpa->config;
1315 	struct vdpa_notification_area notify;
1316 	unsigned long index = vma->vm_pgoff;
1317 
1318 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1319 		return -EINVAL;
1320 	if ((vma->vm_flags & VM_SHARED) == 0)
1321 		return -EINVAL;
1322 	if (vma->vm_flags & VM_READ)
1323 		return -EINVAL;
1324 	if (index > 65535)
1325 		return -EINVAL;
1326 	if (!ops->get_vq_notification)
1327 		return -ENOTSUPP;
1328 
1329 	/* To be safe and easily modelled by userspace, We only
1330 	 * support the doorbell which sits on the page boundary and
1331 	 * does not share the page with other registers.
1332 	 */
1333 	notify = ops->get_vq_notification(vdpa, index);
1334 	if (notify.addr & (PAGE_SIZE - 1))
1335 		return -EINVAL;
1336 	if (vma->vm_end - vma->vm_start != notify.size)
1337 		return -ENOTSUPP;
1338 
1339 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1340 	vma->vm_ops = &vhost_vdpa_vm_ops;
1341 	return 0;
1342 }
1343 #endif /* CONFIG_MMU */
1344 
1345 static const struct file_operations vhost_vdpa_fops = {
1346 	.owner		= THIS_MODULE,
1347 	.open		= vhost_vdpa_open,
1348 	.release	= vhost_vdpa_release,
1349 	.write_iter	= vhost_vdpa_chr_write_iter,
1350 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
1351 #ifdef CONFIG_MMU
1352 	.mmap		= vhost_vdpa_mmap,
1353 #endif /* CONFIG_MMU */
1354 	.compat_ioctl	= compat_ptr_ioctl,
1355 };
1356 
vhost_vdpa_release_dev(struct device * device)1357 static void vhost_vdpa_release_dev(struct device *device)
1358 {
1359 	struct vhost_vdpa *v =
1360 	       container_of(device, struct vhost_vdpa, dev);
1361 
1362 	ida_simple_remove(&vhost_vdpa_ida, v->minor);
1363 	kfree(v->vqs);
1364 	kfree(v);
1365 }
1366 
vhost_vdpa_probe(struct vdpa_device * vdpa)1367 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1368 {
1369 	const struct vdpa_config_ops *ops = vdpa->config;
1370 	struct vhost_vdpa *v;
1371 	int minor;
1372 	int i, r;
1373 
1374 	/* We can't support platform IOMMU device with more than 1
1375 	 * group or as
1376 	 */
1377 	if (!ops->set_map && !ops->dma_map &&
1378 	    (vdpa->ngroups > 1 || vdpa->nas > 1))
1379 		return -EOPNOTSUPP;
1380 
1381 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1382 	if (!v)
1383 		return -ENOMEM;
1384 
1385 	minor = ida_simple_get(&vhost_vdpa_ida, 0,
1386 			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1387 	if (minor < 0) {
1388 		kfree(v);
1389 		return minor;
1390 	}
1391 
1392 	atomic_set(&v->opened, 0);
1393 	v->minor = minor;
1394 	v->vdpa = vdpa;
1395 	v->nvqs = vdpa->nvqs;
1396 	v->virtio_id = ops->get_device_id(vdpa);
1397 
1398 	device_initialize(&v->dev);
1399 	v->dev.release = vhost_vdpa_release_dev;
1400 	v->dev.parent = &vdpa->dev;
1401 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1402 	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1403 			       GFP_KERNEL);
1404 	if (!v->vqs) {
1405 		r = -ENOMEM;
1406 		goto err;
1407 	}
1408 
1409 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1410 	if (r)
1411 		goto err;
1412 
1413 	cdev_init(&v->cdev, &vhost_vdpa_fops);
1414 	v->cdev.owner = THIS_MODULE;
1415 
1416 	r = cdev_device_add(&v->cdev, &v->dev);
1417 	if (r)
1418 		goto err;
1419 
1420 	init_completion(&v->completion);
1421 	vdpa_set_drvdata(vdpa, v);
1422 
1423 	for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1424 		INIT_HLIST_HEAD(&v->as[i]);
1425 
1426 	return 0;
1427 
1428 err:
1429 	put_device(&v->dev);
1430 	return r;
1431 }
1432 
vhost_vdpa_remove(struct vdpa_device * vdpa)1433 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1434 {
1435 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1436 	int opened;
1437 
1438 	cdev_device_del(&v->cdev, &v->dev);
1439 
1440 	do {
1441 		opened = atomic_cmpxchg(&v->opened, 0, 1);
1442 		if (!opened)
1443 			break;
1444 		wait_for_completion(&v->completion);
1445 	} while (1);
1446 
1447 	put_device(&v->dev);
1448 }
1449 
1450 static struct vdpa_driver vhost_vdpa_driver = {
1451 	.driver = {
1452 		.name	= "vhost_vdpa",
1453 	},
1454 	.probe	= vhost_vdpa_probe,
1455 	.remove	= vhost_vdpa_remove,
1456 };
1457 
vhost_vdpa_init(void)1458 static int __init vhost_vdpa_init(void)
1459 {
1460 	int r;
1461 
1462 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1463 				"vhost-vdpa");
1464 	if (r)
1465 		goto err_alloc_chrdev;
1466 
1467 	r = vdpa_register_driver(&vhost_vdpa_driver);
1468 	if (r)
1469 		goto err_vdpa_register_driver;
1470 
1471 	return 0;
1472 
1473 err_vdpa_register_driver:
1474 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1475 err_alloc_chrdev:
1476 	return r;
1477 }
1478 module_init(vhost_vdpa_init);
1479 
vhost_vdpa_exit(void)1480 static void __exit vhost_vdpa_exit(void)
1481 {
1482 	vdpa_unregister_driver(&vhost_vdpa_driver);
1483 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1484 }
1485 module_exit(vhost_vdpa_exit);
1486 
1487 MODULE_VERSION("0.0.1");
1488 MODULE_LICENSE("GPL v2");
1489 MODULE_AUTHOR("Intel Corporation");
1490 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1491