• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VFIO core
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/cdev.h>
17 #include <linux/compat.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/fs.h>
22 #include <linux/idr.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/pci.h>
29 #include <linux/rwsem.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <linux/vfio.h>
36 #include <linux/wait.h>
37 
38 #define DRIVER_VERSION	"0.3"
39 #define DRIVER_AUTHOR	"Alex Williamson <alex.williamson@redhat.com>"
40 #define DRIVER_DESC	"VFIO - User Level meta-driver"
41 
42 static struct vfio {
43 	struct class			*class;
44 	struct list_head		iommu_drivers_list;
45 	struct mutex			iommu_drivers_lock;
46 	struct list_head		group_list;
47 	struct idr			group_idr;
48 	struct mutex			group_lock;
49 	struct cdev			group_cdev;
50 	dev_t				group_devt;
51 	wait_queue_head_t		release_q;
52 } vfio;
53 
54 struct vfio_iommu_driver {
55 	const struct vfio_iommu_driver_ops	*ops;
56 	struct list_head			vfio_next;
57 };
58 
59 struct vfio_container {
60 	struct kref			kref;
61 	struct list_head		group_list;
62 	struct rw_semaphore		group_lock;
63 	struct vfio_iommu_driver	*iommu_driver;
64 	void				*iommu_data;
65 };
66 
67 struct vfio_unbound_dev {
68 	struct device			*dev;
69 	struct list_head		unbound_next;
70 };
71 
72 struct vfio_group {
73 	struct kref			kref;
74 	int				minor;
75 	atomic_t			container_users;
76 	struct iommu_group		*iommu_group;
77 	struct vfio_container		*container;
78 	struct list_head		device_list;
79 	struct mutex			device_lock;
80 	struct device			*dev;
81 	struct notifier_block		nb;
82 	struct list_head		vfio_next;
83 	struct list_head		container_next;
84 	struct list_head		unbound_list;
85 	struct mutex			unbound_lock;
86 	atomic_t			opened;
87 };
88 
89 struct vfio_device {
90 	struct kref			kref;
91 	struct device			*dev;
92 	const struct vfio_device_ops	*ops;
93 	struct vfio_group		*group;
94 	struct list_head		group_next;
95 	void				*device_data;
96 };
97 
98 /**
99  * IOMMU driver registration
100  */
vfio_register_iommu_driver(const struct vfio_iommu_driver_ops * ops)101 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
102 {
103 	struct vfio_iommu_driver *driver, *tmp;
104 
105 	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
106 	if (!driver)
107 		return -ENOMEM;
108 
109 	driver->ops = ops;
110 
111 	mutex_lock(&vfio.iommu_drivers_lock);
112 
113 	/* Check for duplicates */
114 	list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
115 		if (tmp->ops == ops) {
116 			mutex_unlock(&vfio.iommu_drivers_lock);
117 			kfree(driver);
118 			return -EINVAL;
119 		}
120 	}
121 
122 	list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
123 
124 	mutex_unlock(&vfio.iommu_drivers_lock);
125 
126 	return 0;
127 }
128 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
129 
vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops * ops)130 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
131 {
132 	struct vfio_iommu_driver *driver;
133 
134 	mutex_lock(&vfio.iommu_drivers_lock);
135 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
136 		if (driver->ops == ops) {
137 			list_del(&driver->vfio_next);
138 			mutex_unlock(&vfio.iommu_drivers_lock);
139 			kfree(driver);
140 			return;
141 		}
142 	}
143 	mutex_unlock(&vfio.iommu_drivers_lock);
144 }
145 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
146 
147 /**
148  * Group minor allocation/free - both called with vfio.group_lock held
149  */
vfio_alloc_group_minor(struct vfio_group * group)150 static int vfio_alloc_group_minor(struct vfio_group *group)
151 {
152 	return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
153 }
154 
vfio_free_group_minor(int minor)155 static void vfio_free_group_minor(int minor)
156 {
157 	idr_remove(&vfio.group_idr, minor);
158 }
159 
160 static int vfio_iommu_group_notifier(struct notifier_block *nb,
161 				     unsigned long action, void *data);
162 static void vfio_group_get(struct vfio_group *group);
163 
164 /**
165  * Container objects - containers are created when /dev/vfio/vfio is
166  * opened, but their lifecycle extends until the last user is done, so
167  * it's freed via kref.  Must support container/group/device being
168  * closed in any order.
169  */
vfio_container_get(struct vfio_container * container)170 static void vfio_container_get(struct vfio_container *container)
171 {
172 	kref_get(&container->kref);
173 }
174 
vfio_container_release(struct kref * kref)175 static void vfio_container_release(struct kref *kref)
176 {
177 	struct vfio_container *container;
178 	container = container_of(kref, struct vfio_container, kref);
179 
180 	kfree(container);
181 }
182 
vfio_container_put(struct vfio_container * container)183 static void vfio_container_put(struct vfio_container *container)
184 {
185 	kref_put(&container->kref, vfio_container_release);
186 }
187 
vfio_group_unlock_and_free(struct vfio_group * group)188 static void vfio_group_unlock_and_free(struct vfio_group *group)
189 {
190 	mutex_unlock(&vfio.group_lock);
191 	/*
192 	 * Unregister outside of lock.  A spurious callback is harmless now
193 	 * that the group is no longer in vfio.group_list.
194 	 */
195 	iommu_group_unregister_notifier(group->iommu_group, &group->nb);
196 	kfree(group);
197 }
198 
199 /**
200  * Group objects - create, release, get, put, search
201  */
vfio_create_group(struct iommu_group * iommu_group)202 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
203 {
204 	struct vfio_group *group, *tmp;
205 	struct device *dev;
206 	int ret, minor;
207 
208 	group = kzalloc(sizeof(*group), GFP_KERNEL);
209 	if (!group)
210 		return ERR_PTR(-ENOMEM);
211 
212 	kref_init(&group->kref);
213 	INIT_LIST_HEAD(&group->device_list);
214 	mutex_init(&group->device_lock);
215 	INIT_LIST_HEAD(&group->unbound_list);
216 	mutex_init(&group->unbound_lock);
217 	atomic_set(&group->container_users, 0);
218 	atomic_set(&group->opened, 0);
219 	group->iommu_group = iommu_group;
220 
221 	group->nb.notifier_call = vfio_iommu_group_notifier;
222 
223 	/*
224 	 * blocking notifiers acquire a rwsem around registering and hold
225 	 * it around callback.  Therefore, need to register outside of
226 	 * vfio.group_lock to avoid A-B/B-A contention.  Our callback won't
227 	 * do anything unless it can find the group in vfio.group_list, so
228 	 * no harm in registering early.
229 	 */
230 	ret = iommu_group_register_notifier(iommu_group, &group->nb);
231 	if (ret) {
232 		kfree(group);
233 		return ERR_PTR(ret);
234 	}
235 
236 	mutex_lock(&vfio.group_lock);
237 
238 	/* Did we race creating this group? */
239 	list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
240 		if (tmp->iommu_group == iommu_group) {
241 			vfio_group_get(tmp);
242 			vfio_group_unlock_and_free(group);
243 			return tmp;
244 		}
245 	}
246 
247 	minor = vfio_alloc_group_minor(group);
248 	if (minor < 0) {
249 		vfio_group_unlock_and_free(group);
250 		return ERR_PTR(minor);
251 	}
252 
253 	dev = device_create(vfio.class, NULL,
254 			    MKDEV(MAJOR(vfio.group_devt), minor),
255 			    group, "%d", iommu_group_id(iommu_group));
256 	if (IS_ERR(dev)) {
257 		vfio_free_group_minor(minor);
258 		vfio_group_unlock_and_free(group);
259 		return (struct vfio_group *)dev; /* ERR_PTR */
260 	}
261 
262 	group->minor = minor;
263 	group->dev = dev;
264 
265 	list_add(&group->vfio_next, &vfio.group_list);
266 
267 	mutex_unlock(&vfio.group_lock);
268 
269 	return group;
270 }
271 
272 /* called with vfio.group_lock held */
vfio_group_release(struct kref * kref)273 static void vfio_group_release(struct kref *kref)
274 {
275 	struct vfio_group *group = container_of(kref, struct vfio_group, kref);
276 	struct vfio_unbound_dev *unbound, *tmp;
277 	struct iommu_group *iommu_group = group->iommu_group;
278 
279 	WARN_ON(!list_empty(&group->device_list));
280 
281 	list_for_each_entry_safe(unbound, tmp,
282 				 &group->unbound_list, unbound_next) {
283 		list_del(&unbound->unbound_next);
284 		kfree(unbound);
285 	}
286 
287 	device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
288 	list_del(&group->vfio_next);
289 	vfio_free_group_minor(group->minor);
290 	vfio_group_unlock_and_free(group);
291 	iommu_group_put(iommu_group);
292 }
293 
vfio_group_put(struct vfio_group * group)294 static void vfio_group_put(struct vfio_group *group)
295 {
296 	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
297 }
298 
299 struct vfio_group_put_work {
300 	struct work_struct work;
301 	struct vfio_group *group;
302 };
303 
vfio_group_put_bg(struct work_struct * work)304 static void vfio_group_put_bg(struct work_struct *work)
305 {
306 	struct vfio_group_put_work *do_work;
307 
308 	do_work = container_of(work, struct vfio_group_put_work, work);
309 
310 	vfio_group_put(do_work->group);
311 	kfree(do_work);
312 }
313 
vfio_group_schedule_put(struct vfio_group * group)314 static void vfio_group_schedule_put(struct vfio_group *group)
315 {
316 	struct vfio_group_put_work *do_work;
317 
318 	do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
319 	if (WARN_ON(!do_work))
320 		return;
321 
322 	INIT_WORK(&do_work->work, vfio_group_put_bg);
323 	do_work->group = group;
324 	schedule_work(&do_work->work);
325 }
326 
327 /* Assume group_lock or group reference is held */
vfio_group_get(struct vfio_group * group)328 static void vfio_group_get(struct vfio_group *group)
329 {
330 	kref_get(&group->kref);
331 }
332 
333 /*
334  * Not really a try as we will sleep for mutex, but we need to make
335  * sure the group pointer is valid under lock and get a reference.
336  */
vfio_group_try_get(struct vfio_group * group)337 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
338 {
339 	struct vfio_group *target = group;
340 
341 	mutex_lock(&vfio.group_lock);
342 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
343 		if (group == target) {
344 			vfio_group_get(group);
345 			mutex_unlock(&vfio.group_lock);
346 			return group;
347 		}
348 	}
349 	mutex_unlock(&vfio.group_lock);
350 
351 	return NULL;
352 }
353 
354 static
vfio_group_get_from_iommu(struct iommu_group * iommu_group)355 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
356 {
357 	struct vfio_group *group;
358 
359 	mutex_lock(&vfio.group_lock);
360 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
361 		if (group->iommu_group == iommu_group) {
362 			vfio_group_get(group);
363 			mutex_unlock(&vfio.group_lock);
364 			return group;
365 		}
366 	}
367 	mutex_unlock(&vfio.group_lock);
368 
369 	return NULL;
370 }
371 
vfio_group_get_from_minor(int minor)372 static struct vfio_group *vfio_group_get_from_minor(int minor)
373 {
374 	struct vfio_group *group;
375 
376 	mutex_lock(&vfio.group_lock);
377 	group = idr_find(&vfio.group_idr, minor);
378 	if (!group) {
379 		mutex_unlock(&vfio.group_lock);
380 		return NULL;
381 	}
382 	vfio_group_get(group);
383 	mutex_unlock(&vfio.group_lock);
384 
385 	return group;
386 }
387 
388 /**
389  * Device objects - create, release, get, put, search
390  */
391 static
vfio_group_create_device(struct vfio_group * group,struct device * dev,const struct vfio_device_ops * ops,void * device_data)392 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
393 					     struct device *dev,
394 					     const struct vfio_device_ops *ops,
395 					     void *device_data)
396 {
397 	struct vfio_device *device;
398 
399 	device = kzalloc(sizeof(*device), GFP_KERNEL);
400 	if (!device)
401 		return ERR_PTR(-ENOMEM);
402 
403 	kref_init(&device->kref);
404 	device->dev = dev;
405 	device->group = group;
406 	device->ops = ops;
407 	device->device_data = device_data;
408 	dev_set_drvdata(dev, device);
409 
410 	/* No need to get group_lock, caller has group reference */
411 	vfio_group_get(group);
412 
413 	mutex_lock(&group->device_lock);
414 	list_add(&device->group_next, &group->device_list);
415 	mutex_unlock(&group->device_lock);
416 
417 	return device;
418 }
419 
vfio_device_release(struct kref * kref)420 static void vfio_device_release(struct kref *kref)
421 {
422 	struct vfio_device *device = container_of(kref,
423 						  struct vfio_device, kref);
424 	struct vfio_group *group = device->group;
425 
426 	list_del(&device->group_next);
427 	mutex_unlock(&group->device_lock);
428 
429 	dev_set_drvdata(device->dev, NULL);
430 
431 	kfree(device);
432 
433 	/* vfio_del_group_dev may be waiting for this device */
434 	wake_up(&vfio.release_q);
435 }
436 
437 /* Device reference always implies a group reference */
vfio_device_put(struct vfio_device * device)438 void vfio_device_put(struct vfio_device *device)
439 {
440 	struct vfio_group *group = device->group;
441 	kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
442 	vfio_group_put(group);
443 }
444 EXPORT_SYMBOL_GPL(vfio_device_put);
445 
vfio_device_get(struct vfio_device * device)446 static void vfio_device_get(struct vfio_device *device)
447 {
448 	vfio_group_get(device->group);
449 	kref_get(&device->kref);
450 }
451 
vfio_group_get_device(struct vfio_group * group,struct device * dev)452 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
453 						 struct device *dev)
454 {
455 	struct vfio_device *device;
456 
457 	mutex_lock(&group->device_lock);
458 	list_for_each_entry(device, &group->device_list, group_next) {
459 		if (device->dev == dev) {
460 			vfio_device_get(device);
461 			mutex_unlock(&group->device_lock);
462 			return device;
463 		}
464 	}
465 	mutex_unlock(&group->device_lock);
466 	return NULL;
467 }
468 
469 /*
470  * Some drivers, like pci-stub, are only used to prevent other drivers from
471  * claiming a device and are therefore perfectly legitimate for a user owned
472  * group.  The pci-stub driver has no dependencies on DMA or the IOVA mapping
473  * of the device, but it does prevent the user from having direct access to
474  * the device, which is useful in some circumstances.
475  *
476  * We also assume that we can include PCI interconnect devices, ie. bridges.
477  * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
478  * then all of the downstream devices will be part of the same IOMMU group as
479  * the bridge.  Thus, if placing the bridge into the user owned IOVA space
480  * breaks anything, it only does so for user owned devices downstream.  Note
481  * that error notification via MSI can be affected for platforms that handle
482  * MSI within the same IOVA space as DMA.
483  */
484 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
485 
vfio_dev_whitelisted(struct device * dev,struct device_driver * drv)486 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
487 {
488 	int i;
489 
490 	if (dev_is_pci(dev)) {
491 		struct pci_dev *pdev = to_pci_dev(dev);
492 
493 		if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
494 			return true;
495 	}
496 
497 	for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
498 		if (!strcmp(drv->name, vfio_driver_whitelist[i]))
499 			return true;
500 	}
501 
502 	return false;
503 }
504 
505 /*
506  * A vfio group is viable for use by userspace if all devices are in
507  * one of the following states:
508  *  - driver-less
509  *  - bound to a vfio driver
510  *  - bound to a whitelisted driver
511  *  - a PCI interconnect device
512  *
513  * We use two methods to determine whether a device is bound to a vfio
514  * driver.  The first is to test whether the device exists in the vfio
515  * group.  The second is to test if the device exists on the group
516  * unbound_list, indicating it's in the middle of transitioning from
517  * a vfio driver to driver-less.
518  */
vfio_dev_viable(struct device * dev,void * data)519 static int vfio_dev_viable(struct device *dev, void *data)
520 {
521 	struct vfio_group *group = data;
522 	struct vfio_device *device;
523 	struct device_driver *drv = ACCESS_ONCE(dev->driver);
524 	struct vfio_unbound_dev *unbound;
525 	int ret = -EINVAL;
526 
527 	mutex_lock(&group->unbound_lock);
528 	list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
529 		if (dev == unbound->dev) {
530 			ret = 0;
531 			break;
532 		}
533 	}
534 	mutex_unlock(&group->unbound_lock);
535 
536 	if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
537 		return 0;
538 
539 	device = vfio_group_get_device(group, dev);
540 	if (device) {
541 		vfio_device_put(device);
542 		return 0;
543 	}
544 
545 	return ret;
546 }
547 
548 /**
549  * Async device support
550  */
vfio_group_nb_add_dev(struct vfio_group * group,struct device * dev)551 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
552 {
553 	struct vfio_device *device;
554 
555 	/* Do we already know about it?  We shouldn't */
556 	device = vfio_group_get_device(group, dev);
557 	if (WARN_ON_ONCE(device)) {
558 		vfio_device_put(device);
559 		return 0;
560 	}
561 
562 	/* Nothing to do for idle groups */
563 	if (!atomic_read(&group->container_users))
564 		return 0;
565 
566 	/* TODO Prevent device auto probing */
567 	WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
568 	     iommu_group_id(group->iommu_group));
569 
570 	return 0;
571 }
572 
vfio_group_nb_verify(struct vfio_group * group,struct device * dev)573 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
574 {
575 	/* We don't care what happens when the group isn't in use */
576 	if (!atomic_read(&group->container_users))
577 		return 0;
578 
579 	return vfio_dev_viable(dev, group);
580 }
581 
vfio_iommu_group_notifier(struct notifier_block * nb,unsigned long action,void * data)582 static int vfio_iommu_group_notifier(struct notifier_block *nb,
583 				     unsigned long action, void *data)
584 {
585 	struct vfio_group *group = container_of(nb, struct vfio_group, nb);
586 	struct device *dev = data;
587 	struct vfio_unbound_dev *unbound;
588 
589 	/*
590 	 * Need to go through a group_lock lookup to get a reference or we
591 	 * risk racing a group being removed.  Ignore spurious notifies.
592 	 */
593 	group = vfio_group_try_get(group);
594 	if (!group)
595 		return NOTIFY_OK;
596 
597 	switch (action) {
598 	case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
599 		vfio_group_nb_add_dev(group, dev);
600 		break;
601 	case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
602 		/*
603 		 * Nothing to do here.  If the device is in use, then the
604 		 * vfio sub-driver should block the remove callback until
605 		 * it is unused.  If the device is unused or attached to a
606 		 * stub driver, then it should be released and we don't
607 		 * care that it will be going away.
608 		 */
609 		break;
610 	case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
611 		pr_debug("%s: Device %s, group %d binding to driver\n",
612 			 __func__, dev_name(dev),
613 			 iommu_group_id(group->iommu_group));
614 		break;
615 	case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
616 		pr_debug("%s: Device %s, group %d bound to driver %s\n",
617 			 __func__, dev_name(dev),
618 			 iommu_group_id(group->iommu_group), dev->driver->name);
619 		BUG_ON(vfio_group_nb_verify(group, dev));
620 		break;
621 	case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
622 		pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
623 			 __func__, dev_name(dev),
624 			 iommu_group_id(group->iommu_group), dev->driver->name);
625 		break;
626 	case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
627 		pr_debug("%s: Device %s, group %d unbound from driver\n",
628 			 __func__, dev_name(dev),
629 			 iommu_group_id(group->iommu_group));
630 		/*
631 		 * XXX An unbound device in a live group is ok, but we'd
632 		 * really like to avoid the above BUG_ON by preventing other
633 		 * drivers from binding to it.  Once that occurs, we have to
634 		 * stop the system to maintain isolation.  At a minimum, we'd
635 		 * want a toggle to disable driver auto probe for this device.
636 		 */
637 
638 		mutex_lock(&group->unbound_lock);
639 		list_for_each_entry(unbound,
640 				    &group->unbound_list, unbound_next) {
641 			if (dev == unbound->dev) {
642 				list_del(&unbound->unbound_next);
643 				kfree(unbound);
644 				break;
645 			}
646 		}
647 		mutex_unlock(&group->unbound_lock);
648 		break;
649 	}
650 
651 	/*
652 	 * If we're the last reference to the group, the group will be
653 	 * released, which includes unregistering the iommu group notifier.
654 	 * We hold a read-lock on that notifier list, unregistering needs
655 	 * a write-lock... deadlock.  Release our reference asynchronously
656 	 * to avoid that situation.
657 	 */
658 	vfio_group_schedule_put(group);
659 	return NOTIFY_OK;
660 }
661 
662 /**
663  * VFIO driver API
664  */
vfio_add_group_dev(struct device * dev,const struct vfio_device_ops * ops,void * device_data)665 int vfio_add_group_dev(struct device *dev,
666 		       const struct vfio_device_ops *ops, void *device_data)
667 {
668 	struct iommu_group *iommu_group;
669 	struct vfio_group *group;
670 	struct vfio_device *device;
671 
672 	iommu_group = iommu_group_get(dev);
673 	if (!iommu_group)
674 		return -EINVAL;
675 
676 	group = vfio_group_get_from_iommu(iommu_group);
677 	if (!group) {
678 		group = vfio_create_group(iommu_group);
679 		if (IS_ERR(group)) {
680 			iommu_group_put(iommu_group);
681 			return PTR_ERR(group);
682 		}
683 	} else {
684 		/*
685 		 * A found vfio_group already holds a reference to the
686 		 * iommu_group.  A created vfio_group keeps the reference.
687 		 */
688 		iommu_group_put(iommu_group);
689 	}
690 
691 	device = vfio_group_get_device(group, dev);
692 	if (device) {
693 		WARN(1, "Device %s already exists on group %d\n",
694 		     dev_name(dev), iommu_group_id(iommu_group));
695 		vfio_device_put(device);
696 		vfio_group_put(group);
697 		return -EBUSY;
698 	}
699 
700 	device = vfio_group_create_device(group, dev, ops, device_data);
701 	if (IS_ERR(device)) {
702 		vfio_group_put(group);
703 		return PTR_ERR(device);
704 	}
705 
706 	/*
707 	 * Drop all but the vfio_device reference.  The vfio_device holds
708 	 * a reference to the vfio_group, which holds a reference to the
709 	 * iommu_group.
710 	 */
711 	vfio_group_put(group);
712 
713 	return 0;
714 }
715 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
716 
717 /**
718  * Get a reference to the vfio_device for a device.  Even if the
719  * caller thinks they own the device, they could be racing with a
720  * release call path, so we can't trust drvdata for the shortcut.
721  * Go the long way around, from the iommu_group to the vfio_group
722  * to the vfio_device.
723  */
vfio_device_get_from_dev(struct device * dev)724 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
725 {
726 	struct iommu_group *iommu_group;
727 	struct vfio_group *group;
728 	struct vfio_device *device;
729 
730 	iommu_group = iommu_group_get(dev);
731 	if (!iommu_group)
732 		return NULL;
733 
734 	group = vfio_group_get_from_iommu(iommu_group);
735 	iommu_group_put(iommu_group);
736 	if (!group)
737 		return NULL;
738 
739 	device = vfio_group_get_device(group, dev);
740 	vfio_group_put(group);
741 
742 	return device;
743 }
744 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
745 
vfio_device_get_from_name(struct vfio_group * group,char * buf)746 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
747 						     char *buf)
748 {
749 	struct vfio_device *it, *device = NULL;
750 
751 	mutex_lock(&group->device_lock);
752 	list_for_each_entry(it, &group->device_list, group_next) {
753 		if (!strcmp(dev_name(it->dev), buf)) {
754 			device = it;
755 			vfio_device_get(device);
756 			break;
757 		}
758 	}
759 	mutex_unlock(&group->device_lock);
760 
761 	return device;
762 }
763 
764 /*
765  * Caller must hold a reference to the vfio_device
766  */
vfio_device_data(struct vfio_device * device)767 void *vfio_device_data(struct vfio_device *device)
768 {
769 	return device->device_data;
770 }
771 EXPORT_SYMBOL_GPL(vfio_device_data);
772 
773 /* Given a referenced group, check if it contains the device */
vfio_dev_present(struct vfio_group * group,struct device * dev)774 static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
775 {
776 	struct vfio_device *device;
777 
778 	device = vfio_group_get_device(group, dev);
779 	if (!device)
780 		return false;
781 
782 	vfio_device_put(device);
783 	return true;
784 }
785 
786 /*
787  * Decrement the device reference count and wait for the device to be
788  * removed.  Open file descriptors for the device... */
vfio_del_group_dev(struct device * dev)789 void *vfio_del_group_dev(struct device *dev)
790 {
791 	struct vfio_device *device = dev_get_drvdata(dev);
792 	struct vfio_group *group = device->group;
793 	void *device_data = device->device_data;
794 	struct vfio_unbound_dev *unbound;
795 	unsigned int i = 0;
796 	long ret;
797 	bool interrupted = false;
798 
799 	/*
800 	 * The group exists so long as we have a device reference.  Get
801 	 * a group reference and use it to scan for the device going away.
802 	 */
803 	vfio_group_get(group);
804 
805 	/*
806 	 * When the device is removed from the group, the group suddenly
807 	 * becomes non-viable; the device has a driver (until the unbind
808 	 * completes), but it's not present in the group.  This is bad news
809 	 * for any external users that need to re-acquire a group reference
810 	 * in order to match and release their existing reference.  To
811 	 * solve this, we track such devices on the unbound_list to bridge
812 	 * the gap until they're fully unbound.
813 	 */
814 	unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
815 	if (unbound) {
816 		unbound->dev = dev;
817 		mutex_lock(&group->unbound_lock);
818 		list_add(&unbound->unbound_next, &group->unbound_list);
819 		mutex_unlock(&group->unbound_lock);
820 	}
821 	WARN_ON(!unbound);
822 
823 	vfio_device_put(device);
824 
825 	/*
826 	 * If the device is still present in the group after the above
827 	 * 'put', then it is in use and we need to request it from the
828 	 * bus driver.  The driver may in turn need to request the
829 	 * device from the user.  We send the request on an arbitrary
830 	 * interval with counter to allow the driver to take escalating
831 	 * measures to release the device if it has the ability to do so.
832 	 */
833 	do {
834 		device = vfio_group_get_device(group, dev);
835 		if (!device)
836 			break;
837 
838 		if (device->ops->request)
839 			device->ops->request(device_data, i++);
840 
841 		vfio_device_put(device);
842 
843 		if (interrupted) {
844 			ret = wait_event_timeout(vfio.release_q,
845 					!vfio_dev_present(group, dev), HZ * 10);
846 		} else {
847 			ret = wait_event_interruptible_timeout(vfio.release_q,
848 					!vfio_dev_present(group, dev), HZ * 10);
849 			if (ret == -ERESTARTSYS) {
850 				interrupted = true;
851 				dev_warn(dev,
852 					 "Device is currently in use, task"
853 					 " \"%s\" (%d) "
854 					 "blocked until device is released",
855 					 current->comm, task_pid_nr(current));
856 			}
857 		}
858 	} while (ret <= 0);
859 
860 	vfio_group_put(group);
861 
862 	return device_data;
863 }
864 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
865 
866 /**
867  * VFIO base fd, /dev/vfio/vfio
868  */
vfio_ioctl_check_extension(struct vfio_container * container,unsigned long arg)869 static long vfio_ioctl_check_extension(struct vfio_container *container,
870 				       unsigned long arg)
871 {
872 	struct vfio_iommu_driver *driver;
873 	long ret = 0;
874 
875 	down_read(&container->group_lock);
876 
877 	driver = container->iommu_driver;
878 
879 	switch (arg) {
880 		/* No base extensions yet */
881 	default:
882 		/*
883 		 * If no driver is set, poll all registered drivers for
884 		 * extensions and return the first positive result.  If
885 		 * a driver is already set, further queries will be passed
886 		 * only to that driver.
887 		 */
888 		if (!driver) {
889 			mutex_lock(&vfio.iommu_drivers_lock);
890 			list_for_each_entry(driver, &vfio.iommu_drivers_list,
891 					    vfio_next) {
892 				if (!try_module_get(driver->ops->owner))
893 					continue;
894 
895 				ret = driver->ops->ioctl(NULL,
896 							 VFIO_CHECK_EXTENSION,
897 							 arg);
898 				module_put(driver->ops->owner);
899 				if (ret > 0)
900 					break;
901 			}
902 			mutex_unlock(&vfio.iommu_drivers_lock);
903 		} else
904 			ret = driver->ops->ioctl(container->iommu_data,
905 						 VFIO_CHECK_EXTENSION, arg);
906 	}
907 
908 	up_read(&container->group_lock);
909 
910 	return ret;
911 }
912 
913 /* hold write lock on container->group_lock */
__vfio_container_attach_groups(struct vfio_container * container,struct vfio_iommu_driver * driver,void * data)914 static int __vfio_container_attach_groups(struct vfio_container *container,
915 					  struct vfio_iommu_driver *driver,
916 					  void *data)
917 {
918 	struct vfio_group *group;
919 	int ret = -ENODEV;
920 
921 	list_for_each_entry(group, &container->group_list, container_next) {
922 		ret = driver->ops->attach_group(data, group->iommu_group);
923 		if (ret)
924 			goto unwind;
925 	}
926 
927 	return ret;
928 
929 unwind:
930 	list_for_each_entry_continue_reverse(group, &container->group_list,
931 					     container_next) {
932 		driver->ops->detach_group(data, group->iommu_group);
933 	}
934 
935 	return ret;
936 }
937 
vfio_ioctl_set_iommu(struct vfio_container * container,unsigned long arg)938 static long vfio_ioctl_set_iommu(struct vfio_container *container,
939 				 unsigned long arg)
940 {
941 	struct vfio_iommu_driver *driver;
942 	long ret = -ENODEV;
943 
944 	down_write(&container->group_lock);
945 
946 	/*
947 	 * The container is designed to be an unprivileged interface while
948 	 * the group can be assigned to specific users.  Therefore, only by
949 	 * adding a group to a container does the user get the privilege of
950 	 * enabling the iommu, which may allocate finite resources.  There
951 	 * is no unset_iommu, but by removing all the groups from a container,
952 	 * the container is deprivileged and returns to an unset state.
953 	 */
954 	if (list_empty(&container->group_list) || container->iommu_driver) {
955 		up_write(&container->group_lock);
956 		return -EINVAL;
957 	}
958 
959 	mutex_lock(&vfio.iommu_drivers_lock);
960 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
961 		void *data;
962 
963 		if (!try_module_get(driver->ops->owner))
964 			continue;
965 
966 		/*
967 		 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
968 		 * so test which iommu driver reported support for this
969 		 * extension and call open on them.  We also pass them the
970 		 * magic, allowing a single driver to support multiple
971 		 * interfaces if they'd like.
972 		 */
973 		if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
974 			module_put(driver->ops->owner);
975 			continue;
976 		}
977 
978 		/* module reference holds the driver we're working on */
979 		mutex_unlock(&vfio.iommu_drivers_lock);
980 
981 		data = driver->ops->open(arg);
982 		if (IS_ERR(data)) {
983 			ret = PTR_ERR(data);
984 			module_put(driver->ops->owner);
985 			goto skip_drivers_unlock;
986 		}
987 
988 		ret = __vfio_container_attach_groups(container, driver, data);
989 		if (!ret) {
990 			container->iommu_driver = driver;
991 			container->iommu_data = data;
992 		} else {
993 			driver->ops->release(data);
994 			module_put(driver->ops->owner);
995 		}
996 
997 		goto skip_drivers_unlock;
998 	}
999 
1000 	mutex_unlock(&vfio.iommu_drivers_lock);
1001 skip_drivers_unlock:
1002 	up_write(&container->group_lock);
1003 
1004 	return ret;
1005 }
1006 
vfio_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1007 static long vfio_fops_unl_ioctl(struct file *filep,
1008 				unsigned int cmd, unsigned long arg)
1009 {
1010 	struct vfio_container *container = filep->private_data;
1011 	struct vfio_iommu_driver *driver;
1012 	void *data;
1013 	long ret = -EINVAL;
1014 
1015 	if (!container)
1016 		return ret;
1017 
1018 	switch (cmd) {
1019 	case VFIO_GET_API_VERSION:
1020 		ret = VFIO_API_VERSION;
1021 		break;
1022 	case VFIO_CHECK_EXTENSION:
1023 		ret = vfio_ioctl_check_extension(container, arg);
1024 		break;
1025 	case VFIO_SET_IOMMU:
1026 		ret = vfio_ioctl_set_iommu(container, arg);
1027 		break;
1028 	default:
1029 		down_read(&container->group_lock);
1030 
1031 		driver = container->iommu_driver;
1032 		data = container->iommu_data;
1033 
1034 		if (driver) /* passthrough all unrecognized ioctls */
1035 			ret = driver->ops->ioctl(data, cmd, arg);
1036 
1037 		up_read(&container->group_lock);
1038 	}
1039 
1040 	return ret;
1041 }
1042 
1043 #ifdef CONFIG_COMPAT
vfio_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1044 static long vfio_fops_compat_ioctl(struct file *filep,
1045 				   unsigned int cmd, unsigned long arg)
1046 {
1047 	arg = (unsigned long)compat_ptr(arg);
1048 	return vfio_fops_unl_ioctl(filep, cmd, arg);
1049 }
1050 #endif	/* CONFIG_COMPAT */
1051 
vfio_fops_open(struct inode * inode,struct file * filep)1052 static int vfio_fops_open(struct inode *inode, struct file *filep)
1053 {
1054 	struct vfio_container *container;
1055 
1056 	container = kzalloc(sizeof(*container), GFP_KERNEL);
1057 	if (!container)
1058 		return -ENOMEM;
1059 
1060 	INIT_LIST_HEAD(&container->group_list);
1061 	init_rwsem(&container->group_lock);
1062 	kref_init(&container->kref);
1063 
1064 	filep->private_data = container;
1065 
1066 	return 0;
1067 }
1068 
vfio_fops_release(struct inode * inode,struct file * filep)1069 static int vfio_fops_release(struct inode *inode, struct file *filep)
1070 {
1071 	struct vfio_container *container = filep->private_data;
1072 
1073 	filep->private_data = NULL;
1074 
1075 	vfio_container_put(container);
1076 
1077 	return 0;
1078 }
1079 
1080 /*
1081  * Once an iommu driver is set, we optionally pass read/write/mmap
1082  * on to the driver, allowing management interfaces beyond ioctl.
1083  */
vfio_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)1084 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1085 			      size_t count, loff_t *ppos)
1086 {
1087 	struct vfio_container *container = filep->private_data;
1088 	struct vfio_iommu_driver *driver;
1089 	ssize_t ret = -EINVAL;
1090 
1091 	down_read(&container->group_lock);
1092 
1093 	driver = container->iommu_driver;
1094 	if (likely(driver && driver->ops->read))
1095 		ret = driver->ops->read(container->iommu_data,
1096 					buf, count, ppos);
1097 
1098 	up_read(&container->group_lock);
1099 
1100 	return ret;
1101 }
1102 
vfio_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)1103 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1104 			       size_t count, loff_t *ppos)
1105 {
1106 	struct vfio_container *container = filep->private_data;
1107 	struct vfio_iommu_driver *driver;
1108 	ssize_t ret = -EINVAL;
1109 
1110 	down_read(&container->group_lock);
1111 
1112 	driver = container->iommu_driver;
1113 	if (likely(driver && driver->ops->write))
1114 		ret = driver->ops->write(container->iommu_data,
1115 					 buf, count, ppos);
1116 
1117 	up_read(&container->group_lock);
1118 
1119 	return ret;
1120 }
1121 
vfio_fops_mmap(struct file * filep,struct vm_area_struct * vma)1122 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1123 {
1124 	struct vfio_container *container = filep->private_data;
1125 	struct vfio_iommu_driver *driver;
1126 	int ret = -EINVAL;
1127 
1128 	down_read(&container->group_lock);
1129 
1130 	driver = container->iommu_driver;
1131 	if (likely(driver && driver->ops->mmap))
1132 		ret = driver->ops->mmap(container->iommu_data, vma);
1133 
1134 	up_read(&container->group_lock);
1135 
1136 	return ret;
1137 }
1138 
1139 static const struct file_operations vfio_fops = {
1140 	.owner		= THIS_MODULE,
1141 	.open		= vfio_fops_open,
1142 	.release	= vfio_fops_release,
1143 	.read		= vfio_fops_read,
1144 	.write		= vfio_fops_write,
1145 	.unlocked_ioctl	= vfio_fops_unl_ioctl,
1146 #ifdef CONFIG_COMPAT
1147 	.compat_ioctl	= vfio_fops_compat_ioctl,
1148 #endif
1149 	.mmap		= vfio_fops_mmap,
1150 };
1151 
1152 /**
1153  * VFIO Group fd, /dev/vfio/$GROUP
1154  */
__vfio_group_unset_container(struct vfio_group * group)1155 static void __vfio_group_unset_container(struct vfio_group *group)
1156 {
1157 	struct vfio_container *container = group->container;
1158 	struct vfio_iommu_driver *driver;
1159 
1160 	down_write(&container->group_lock);
1161 
1162 	driver = container->iommu_driver;
1163 	if (driver)
1164 		driver->ops->detach_group(container->iommu_data,
1165 					  group->iommu_group);
1166 
1167 	group->container = NULL;
1168 	list_del(&group->container_next);
1169 
1170 	/* Detaching the last group deprivileges a container, remove iommu */
1171 	if (driver && list_empty(&container->group_list)) {
1172 		driver->ops->release(container->iommu_data);
1173 		module_put(driver->ops->owner);
1174 		container->iommu_driver = NULL;
1175 		container->iommu_data = NULL;
1176 	}
1177 
1178 	up_write(&container->group_lock);
1179 
1180 	vfio_container_put(container);
1181 }
1182 
1183 /*
1184  * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1185  * if there was no container to unset.  Since the ioctl is called on
1186  * the group, we know that still exists, therefore the only valid
1187  * transition here is 1->0.
1188  */
vfio_group_unset_container(struct vfio_group * group)1189 static int vfio_group_unset_container(struct vfio_group *group)
1190 {
1191 	int users = atomic_cmpxchg(&group->container_users, 1, 0);
1192 
1193 	if (!users)
1194 		return -EINVAL;
1195 	if (users != 1)
1196 		return -EBUSY;
1197 
1198 	__vfio_group_unset_container(group);
1199 
1200 	return 0;
1201 }
1202 
1203 /*
1204  * When removing container users, anything that removes the last user
1205  * implicitly removes the group from the container.  That is, if the
1206  * group file descriptor is closed, as well as any device file descriptors,
1207  * the group is free.
1208  */
vfio_group_try_dissolve_container(struct vfio_group * group)1209 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1210 {
1211 	if (0 == atomic_dec_if_positive(&group->container_users))
1212 		__vfio_group_unset_container(group);
1213 }
1214 
vfio_group_set_container(struct vfio_group * group,int container_fd)1215 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1216 {
1217 	struct fd f;
1218 	struct vfio_container *container;
1219 	struct vfio_iommu_driver *driver;
1220 	int ret = 0;
1221 
1222 	if (atomic_read(&group->container_users))
1223 		return -EINVAL;
1224 
1225 	f = fdget(container_fd);
1226 	if (!f.file)
1227 		return -EBADF;
1228 
1229 	/* Sanity check, is this really our fd? */
1230 	if (f.file->f_op != &vfio_fops) {
1231 		fdput(f);
1232 		return -EINVAL;
1233 	}
1234 
1235 	container = f.file->private_data;
1236 	WARN_ON(!container); /* fget ensures we don't race vfio_release */
1237 
1238 	down_write(&container->group_lock);
1239 
1240 	driver = container->iommu_driver;
1241 	if (driver) {
1242 		ret = driver->ops->attach_group(container->iommu_data,
1243 						group->iommu_group);
1244 		if (ret)
1245 			goto unlock_out;
1246 	}
1247 
1248 	group->container = container;
1249 	list_add(&group->container_next, &container->group_list);
1250 
1251 	/* Get a reference on the container and mark a user within the group */
1252 	vfio_container_get(container);
1253 	atomic_inc(&group->container_users);
1254 
1255 unlock_out:
1256 	up_write(&container->group_lock);
1257 	fdput(f);
1258 	return ret;
1259 }
1260 
vfio_group_viable(struct vfio_group * group)1261 static bool vfio_group_viable(struct vfio_group *group)
1262 {
1263 	return (iommu_group_for_each_dev(group->iommu_group,
1264 					 group, vfio_dev_viable) == 0);
1265 }
1266 
1267 static const struct file_operations vfio_device_fops;
1268 
vfio_group_get_device_fd(struct vfio_group * group,char * buf)1269 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1270 {
1271 	struct vfio_device *device;
1272 	struct file *filep;
1273 	int ret;
1274 
1275 	if (0 == atomic_read(&group->container_users) ||
1276 	    !group->container->iommu_driver || !vfio_group_viable(group))
1277 		return -EINVAL;
1278 
1279 	device = vfio_device_get_from_name(group, buf);
1280 	if (!device)
1281 		return -ENODEV;
1282 
1283 	ret = device->ops->open(device->device_data);
1284 	if (ret) {
1285 		vfio_device_put(device);
1286 		return ret;
1287 	}
1288 
1289 	/*
1290 	 * We can't use anon_inode_getfd() because we need to modify
1291 	 * the f_mode flags directly to allow more than just ioctls
1292 	 */
1293 	ret = get_unused_fd_flags(O_CLOEXEC);
1294 	if (ret < 0) {
1295 		device->ops->release(device->device_data);
1296 		vfio_device_put(device);
1297 		return ret;
1298 	}
1299 
1300 	filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1301 				   device, O_RDWR);
1302 	if (IS_ERR(filep)) {
1303 		put_unused_fd(ret);
1304 		ret = PTR_ERR(filep);
1305 		device->ops->release(device->device_data);
1306 		vfio_device_put(device);
1307 		return ret;
1308 	}
1309 
1310 	/*
1311 	 * TODO: add an anon_inode interface to do this.
1312 	 * Appears to be missing by lack of need rather than
1313 	 * explicitly prevented.  Now there's need.
1314 	 */
1315 	filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1316 
1317 	atomic_inc(&group->container_users);
1318 
1319 	fd_install(ret, filep);
1320 
1321 	return ret;
1322 }
1323 
vfio_group_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1324 static long vfio_group_fops_unl_ioctl(struct file *filep,
1325 				      unsigned int cmd, unsigned long arg)
1326 {
1327 	struct vfio_group *group = filep->private_data;
1328 	long ret = -ENOTTY;
1329 
1330 	switch (cmd) {
1331 	case VFIO_GROUP_GET_STATUS:
1332 	{
1333 		struct vfio_group_status status;
1334 		unsigned long minsz;
1335 
1336 		minsz = offsetofend(struct vfio_group_status, flags);
1337 
1338 		if (copy_from_user(&status, (void __user *)arg, minsz))
1339 			return -EFAULT;
1340 
1341 		if (status.argsz < minsz)
1342 			return -EINVAL;
1343 
1344 		status.flags = 0;
1345 
1346 		if (vfio_group_viable(group))
1347 			status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1348 
1349 		if (group->container)
1350 			status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1351 
1352 		if (copy_to_user((void __user *)arg, &status, minsz))
1353 			return -EFAULT;
1354 
1355 		ret = 0;
1356 		break;
1357 	}
1358 	case VFIO_GROUP_SET_CONTAINER:
1359 	{
1360 		int fd;
1361 
1362 		if (get_user(fd, (int __user *)arg))
1363 			return -EFAULT;
1364 
1365 		if (fd < 0)
1366 			return -EINVAL;
1367 
1368 		ret = vfio_group_set_container(group, fd);
1369 		break;
1370 	}
1371 	case VFIO_GROUP_UNSET_CONTAINER:
1372 		ret = vfio_group_unset_container(group);
1373 		break;
1374 	case VFIO_GROUP_GET_DEVICE_FD:
1375 	{
1376 		char *buf;
1377 
1378 		buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1379 		if (IS_ERR(buf))
1380 			return PTR_ERR(buf);
1381 
1382 		ret = vfio_group_get_device_fd(group, buf);
1383 		kfree(buf);
1384 		break;
1385 	}
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 #ifdef CONFIG_COMPAT
vfio_group_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1392 static long vfio_group_fops_compat_ioctl(struct file *filep,
1393 					 unsigned int cmd, unsigned long arg)
1394 {
1395 	arg = (unsigned long)compat_ptr(arg);
1396 	return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1397 }
1398 #endif	/* CONFIG_COMPAT */
1399 
vfio_group_fops_open(struct inode * inode,struct file * filep)1400 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1401 {
1402 	struct vfio_group *group;
1403 	int opened;
1404 
1405 	group = vfio_group_get_from_minor(iminor(inode));
1406 	if (!group)
1407 		return -ENODEV;
1408 
1409 	/* Do we need multiple instances of the group open?  Seems not. */
1410 	opened = atomic_cmpxchg(&group->opened, 0, 1);
1411 	if (opened) {
1412 		vfio_group_put(group);
1413 		return -EBUSY;
1414 	}
1415 
1416 	/* Is something still in use from a previous open? */
1417 	if (group->container) {
1418 		atomic_dec(&group->opened);
1419 		vfio_group_put(group);
1420 		return -EBUSY;
1421 	}
1422 
1423 	filep->private_data = group;
1424 
1425 	return 0;
1426 }
1427 
vfio_group_fops_release(struct inode * inode,struct file * filep)1428 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1429 {
1430 	struct vfio_group *group = filep->private_data;
1431 
1432 	filep->private_data = NULL;
1433 
1434 	vfio_group_try_dissolve_container(group);
1435 
1436 	atomic_dec(&group->opened);
1437 
1438 	vfio_group_put(group);
1439 
1440 	return 0;
1441 }
1442 
1443 static const struct file_operations vfio_group_fops = {
1444 	.owner		= THIS_MODULE,
1445 	.unlocked_ioctl	= vfio_group_fops_unl_ioctl,
1446 #ifdef CONFIG_COMPAT
1447 	.compat_ioctl	= vfio_group_fops_compat_ioctl,
1448 #endif
1449 	.open		= vfio_group_fops_open,
1450 	.release	= vfio_group_fops_release,
1451 };
1452 
1453 /**
1454  * VFIO Device fd
1455  */
vfio_device_fops_release(struct inode * inode,struct file * filep)1456 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1457 {
1458 	struct vfio_device *device = filep->private_data;
1459 
1460 	device->ops->release(device->device_data);
1461 
1462 	vfio_group_try_dissolve_container(device->group);
1463 
1464 	vfio_device_put(device);
1465 
1466 	return 0;
1467 }
1468 
vfio_device_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1469 static long vfio_device_fops_unl_ioctl(struct file *filep,
1470 				       unsigned int cmd, unsigned long arg)
1471 {
1472 	struct vfio_device *device = filep->private_data;
1473 
1474 	if (unlikely(!device->ops->ioctl))
1475 		return -EINVAL;
1476 
1477 	return device->ops->ioctl(device->device_data, cmd, arg);
1478 }
1479 
vfio_device_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)1480 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1481 				     size_t count, loff_t *ppos)
1482 {
1483 	struct vfio_device *device = filep->private_data;
1484 
1485 	if (unlikely(!device->ops->read))
1486 		return -EINVAL;
1487 
1488 	return device->ops->read(device->device_data, buf, count, ppos);
1489 }
1490 
vfio_device_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)1491 static ssize_t vfio_device_fops_write(struct file *filep,
1492 				      const char __user *buf,
1493 				      size_t count, loff_t *ppos)
1494 {
1495 	struct vfio_device *device = filep->private_data;
1496 
1497 	if (unlikely(!device->ops->write))
1498 		return -EINVAL;
1499 
1500 	return device->ops->write(device->device_data, buf, count, ppos);
1501 }
1502 
vfio_device_fops_mmap(struct file * filep,struct vm_area_struct * vma)1503 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1504 {
1505 	struct vfio_device *device = filep->private_data;
1506 
1507 	if (unlikely(!device->ops->mmap))
1508 		return -EINVAL;
1509 
1510 	return device->ops->mmap(device->device_data, vma);
1511 }
1512 
1513 #ifdef CONFIG_COMPAT
vfio_device_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1514 static long vfio_device_fops_compat_ioctl(struct file *filep,
1515 					  unsigned int cmd, unsigned long arg)
1516 {
1517 	arg = (unsigned long)compat_ptr(arg);
1518 	return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1519 }
1520 #endif	/* CONFIG_COMPAT */
1521 
1522 static const struct file_operations vfio_device_fops = {
1523 	.owner		= THIS_MODULE,
1524 	.release	= vfio_device_fops_release,
1525 	.read		= vfio_device_fops_read,
1526 	.write		= vfio_device_fops_write,
1527 	.unlocked_ioctl	= vfio_device_fops_unl_ioctl,
1528 #ifdef CONFIG_COMPAT
1529 	.compat_ioctl	= vfio_device_fops_compat_ioctl,
1530 #endif
1531 	.mmap		= vfio_device_fops_mmap,
1532 };
1533 
1534 /**
1535  * External user API, exported by symbols to be linked dynamically.
1536  *
1537  * The protocol includes:
1538  *  1. do normal VFIO init operation:
1539  *	- opening a new container;
1540  *	- attaching group(s) to it;
1541  *	- setting an IOMMU driver for a container.
1542  * When IOMMU is set for a container, all groups in it are
1543  * considered ready to use by an external user.
1544  *
1545  * 2. User space passes a group fd to an external user.
1546  * The external user calls vfio_group_get_external_user()
1547  * to verify that:
1548  *	- the group is initialized;
1549  *	- IOMMU is set for it.
1550  * If both checks passed, vfio_group_get_external_user()
1551  * increments the container user counter to prevent
1552  * the VFIO group from disposal before KVM exits.
1553  *
1554  * 3. The external user calls vfio_external_user_iommu_id()
1555  * to know an IOMMU ID.
1556  *
1557  * 4. When the external KVM finishes, it calls
1558  * vfio_group_put_external_user() to release the VFIO group.
1559  * This call decrements the container user counter.
1560  */
vfio_group_get_external_user(struct file * filep)1561 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1562 {
1563 	struct vfio_group *group = filep->private_data;
1564 
1565 	if (filep->f_op != &vfio_group_fops)
1566 		return ERR_PTR(-EINVAL);
1567 
1568 	if (!atomic_inc_not_zero(&group->container_users))
1569 		return ERR_PTR(-EINVAL);
1570 
1571 	if (!group->container->iommu_driver ||
1572 			!vfio_group_viable(group)) {
1573 		atomic_dec(&group->container_users);
1574 		return ERR_PTR(-EINVAL);
1575 	}
1576 
1577 	vfio_group_get(group);
1578 
1579 	return group;
1580 }
1581 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1582 
vfio_group_put_external_user(struct vfio_group * group)1583 void vfio_group_put_external_user(struct vfio_group *group)
1584 {
1585 	vfio_group_put(group);
1586 	vfio_group_try_dissolve_container(group);
1587 }
1588 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1589 
vfio_external_group_match_file(struct vfio_group * test_group,struct file * filep)1590 bool vfio_external_group_match_file(struct vfio_group *test_group,
1591 				    struct file *filep)
1592 {
1593 	struct vfio_group *group = filep->private_data;
1594 
1595 	return (filep->f_op == &vfio_group_fops) && (group == test_group);
1596 }
1597 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1598 
vfio_external_user_iommu_id(struct vfio_group * group)1599 int vfio_external_user_iommu_id(struct vfio_group *group)
1600 {
1601 	return iommu_group_id(group->iommu_group);
1602 }
1603 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1604 
vfio_external_check_extension(struct vfio_group * group,unsigned long arg)1605 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1606 {
1607 	return vfio_ioctl_check_extension(group->container, arg);
1608 }
1609 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1610 
1611 /**
1612  * Module/class support
1613  */
vfio_devnode(struct device * dev,umode_t * mode)1614 static char *vfio_devnode(struct device *dev, umode_t *mode)
1615 {
1616 	return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
1617 }
1618 
1619 static struct miscdevice vfio_dev = {
1620 	.minor = VFIO_MINOR,
1621 	.name = "vfio",
1622 	.fops = &vfio_fops,
1623 	.nodename = "vfio/vfio",
1624 	.mode = S_IRUGO | S_IWUGO,
1625 };
1626 
vfio_init(void)1627 static int __init vfio_init(void)
1628 {
1629 	int ret;
1630 
1631 	idr_init(&vfio.group_idr);
1632 	mutex_init(&vfio.group_lock);
1633 	mutex_init(&vfio.iommu_drivers_lock);
1634 	INIT_LIST_HEAD(&vfio.group_list);
1635 	INIT_LIST_HEAD(&vfio.iommu_drivers_list);
1636 	init_waitqueue_head(&vfio.release_q);
1637 
1638 	ret = misc_register(&vfio_dev);
1639 	if (ret) {
1640 		pr_err("vfio: misc device register failed\n");
1641 		return ret;
1642 	}
1643 
1644 	/* /dev/vfio/$GROUP */
1645 	vfio.class = class_create(THIS_MODULE, "vfio");
1646 	if (IS_ERR(vfio.class)) {
1647 		ret = PTR_ERR(vfio.class);
1648 		goto err_class;
1649 	}
1650 
1651 	vfio.class->devnode = vfio_devnode;
1652 
1653 	ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
1654 	if (ret)
1655 		goto err_alloc_chrdev;
1656 
1657 	cdev_init(&vfio.group_cdev, &vfio_group_fops);
1658 	ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
1659 	if (ret)
1660 		goto err_cdev_add;
1661 
1662 	pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1663 
1664 	/*
1665 	 * Attempt to load known iommu-drivers.  This gives us a working
1666 	 * environment without the user needing to explicitly load iommu
1667 	 * drivers.
1668 	 */
1669 	request_module_nowait("vfio_iommu_type1");
1670 	request_module_nowait("vfio_iommu_spapr_tce");
1671 
1672 	return 0;
1673 
1674 err_cdev_add:
1675 	unregister_chrdev_region(vfio.group_devt, MINORMASK);
1676 err_alloc_chrdev:
1677 	class_destroy(vfio.class);
1678 	vfio.class = NULL;
1679 err_class:
1680 	misc_deregister(&vfio_dev);
1681 	return ret;
1682 }
1683 
vfio_cleanup(void)1684 static void __exit vfio_cleanup(void)
1685 {
1686 	WARN_ON(!list_empty(&vfio.group_list));
1687 
1688 	idr_destroy(&vfio.group_idr);
1689 	cdev_del(&vfio.group_cdev);
1690 	unregister_chrdev_region(vfio.group_devt, MINORMASK);
1691 	class_destroy(vfio.class);
1692 	vfio.class = NULL;
1693 	misc_deregister(&vfio_dev);
1694 }
1695 
1696 module_init(vfio_init);
1697 module_exit(vfio_cleanup);
1698 
1699 MODULE_VERSION(DRIVER_VERSION);
1700 MODULE_LICENSE("GPL v2");
1701 MODULE_AUTHOR(DRIVER_AUTHOR);
1702 MODULE_DESCRIPTION(DRIVER_DESC);
1703 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
1704 MODULE_ALIAS("devname:vfio/vfio");
1705