• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VFIO core
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/cdev.h>
17 #include <linux/compat.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/fs.h>
22 #include <linux/idr.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/pci.h>
29 #include <linux/rwsem.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <linux/vfio.h>
36 #include <linux/wait.h>
37 #include <linux/sched/signal.h>
38 
39 #define DRIVER_VERSION	"0.3"
40 #define DRIVER_AUTHOR	"Alex Williamson <alex.williamson@redhat.com>"
41 #define DRIVER_DESC	"VFIO - User Level meta-driver"
42 
43 static struct vfio {
44 	struct class			*class;
45 	struct list_head		iommu_drivers_list;
46 	struct mutex			iommu_drivers_lock;
47 	struct list_head		group_list;
48 	struct idr			group_idr;
49 	struct mutex			group_lock;
50 	struct cdev			group_cdev;
51 	dev_t				group_devt;
52 	wait_queue_head_t		release_q;
53 } vfio;
54 
55 struct vfio_iommu_driver {
56 	const struct vfio_iommu_driver_ops	*ops;
57 	struct list_head			vfio_next;
58 };
59 
60 struct vfio_container {
61 	struct kref			kref;
62 	struct list_head		group_list;
63 	struct rw_semaphore		group_lock;
64 	struct vfio_iommu_driver	*iommu_driver;
65 	void				*iommu_data;
66 	bool				noiommu;
67 };
68 
69 struct vfio_unbound_dev {
70 	struct device			*dev;
71 	struct list_head		unbound_next;
72 };
73 
74 struct vfio_group {
75 	struct kref			kref;
76 	int				minor;
77 	atomic_t			container_users;
78 	struct iommu_group		*iommu_group;
79 	struct vfio_container		*container;
80 	struct list_head		device_list;
81 	struct mutex			device_lock;
82 	struct device			*dev;
83 	struct notifier_block		nb;
84 	struct list_head		vfio_next;
85 	struct list_head		container_next;
86 	struct list_head		unbound_list;
87 	struct mutex			unbound_lock;
88 	atomic_t			opened;
89 	wait_queue_head_t		container_q;
90 	bool				noiommu;
91 	struct kvm			*kvm;
92 	struct blocking_notifier_head	notifier;
93 };
94 
95 struct vfio_device {
96 	struct kref			kref;
97 	struct device			*dev;
98 	const struct vfio_device_ops	*ops;
99 	struct vfio_group		*group;
100 	struct list_head		group_next;
101 	void				*device_data;
102 };
103 
104 #ifdef CONFIG_VFIO_NOIOMMU
105 static bool noiommu __read_mostly;
106 module_param_named(enable_unsafe_noiommu_mode,
107 		   noiommu, bool, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel.  If you do not know what this is for, step away. (default: false)");
109 #endif
110 
111 /*
112  * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
113  * and remove functions, any use cases other than acquiring the first
114  * reference for the purpose of calling vfio_add_group_dev() or removing
115  * that symmetric reference after vfio_del_group_dev() should use the raw
116  * iommu_group_{get,put} functions.  In particular, vfio_iommu_group_put()
117  * removes the device from the dummy group and cannot be nested.
118  */
vfio_iommu_group_get(struct device * dev)119 struct iommu_group *vfio_iommu_group_get(struct device *dev)
120 {
121 	struct iommu_group *group;
122 	int __maybe_unused ret;
123 
124 	group = iommu_group_get(dev);
125 
126 #ifdef CONFIG_VFIO_NOIOMMU
127 	/*
128 	 * With noiommu enabled, an IOMMU group will be created for a device
129 	 * that doesn't already have one and doesn't have an iommu_ops on their
130 	 * bus.  We set iommudata simply to be able to identify these groups
131 	 * as special use and for reclamation later.
132 	 */
133 	if (group || !noiommu || iommu_present(dev->bus))
134 		return group;
135 
136 	group = iommu_group_alloc();
137 	if (IS_ERR(group))
138 		return NULL;
139 
140 	iommu_group_set_name(group, "vfio-noiommu");
141 	iommu_group_set_iommudata(group, &noiommu, NULL);
142 	ret = iommu_group_add_device(group, dev);
143 	if (ret) {
144 		iommu_group_put(group);
145 		return NULL;
146 	}
147 
148 	/*
149 	 * Where to taint?  At this point we've added an IOMMU group for a
150 	 * device that is not backed by iommu_ops, therefore any iommu_
151 	 * callback using iommu_ops can legitimately Oops.  So, while we may
152 	 * be about to give a DMA capable device to a user without IOMMU
153 	 * protection, which is clearly taint-worthy, let's go ahead and do
154 	 * it here.
155 	 */
156 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
157 	dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
158 #endif
159 
160 	return group;
161 }
162 EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
163 
vfio_iommu_group_put(struct iommu_group * group,struct device * dev)164 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
165 {
166 #ifdef CONFIG_VFIO_NOIOMMU
167 	if (iommu_group_get_iommudata(group) == &noiommu)
168 		iommu_group_remove_device(dev);
169 #endif
170 
171 	iommu_group_put(group);
172 }
173 EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
174 
175 #ifdef CONFIG_VFIO_NOIOMMU
vfio_noiommu_open(unsigned long arg)176 static void *vfio_noiommu_open(unsigned long arg)
177 {
178 	if (arg != VFIO_NOIOMMU_IOMMU)
179 		return ERR_PTR(-EINVAL);
180 	if (!capable(CAP_SYS_RAWIO))
181 		return ERR_PTR(-EPERM);
182 
183 	return NULL;
184 }
185 
vfio_noiommu_release(void * iommu_data)186 static void vfio_noiommu_release(void *iommu_data)
187 {
188 }
189 
vfio_noiommu_ioctl(void * iommu_data,unsigned int cmd,unsigned long arg)190 static long vfio_noiommu_ioctl(void *iommu_data,
191 			       unsigned int cmd, unsigned long arg)
192 {
193 	if (cmd == VFIO_CHECK_EXTENSION)
194 		return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
195 
196 	return -ENOTTY;
197 }
198 
vfio_noiommu_attach_group(void * iommu_data,struct iommu_group * iommu_group)199 static int vfio_noiommu_attach_group(void *iommu_data,
200 				     struct iommu_group *iommu_group)
201 {
202 	return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
203 }
204 
vfio_noiommu_detach_group(void * iommu_data,struct iommu_group * iommu_group)205 static void vfio_noiommu_detach_group(void *iommu_data,
206 				      struct iommu_group *iommu_group)
207 {
208 }
209 
210 static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 	.name = "vfio-noiommu",
212 	.owner = THIS_MODULE,
213 	.open = vfio_noiommu_open,
214 	.release = vfio_noiommu_release,
215 	.ioctl = vfio_noiommu_ioctl,
216 	.attach_group = vfio_noiommu_attach_group,
217 	.detach_group = vfio_noiommu_detach_group,
218 };
219 #endif
220 
221 
222 /**
223  * IOMMU driver registration
224  */
vfio_register_iommu_driver(const struct vfio_iommu_driver_ops * ops)225 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
226 {
227 	struct vfio_iommu_driver *driver, *tmp;
228 
229 	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
230 	if (!driver)
231 		return -ENOMEM;
232 
233 	driver->ops = ops;
234 
235 	mutex_lock(&vfio.iommu_drivers_lock);
236 
237 	/* Check for duplicates */
238 	list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
239 		if (tmp->ops == ops) {
240 			mutex_unlock(&vfio.iommu_drivers_lock);
241 			kfree(driver);
242 			return -EINVAL;
243 		}
244 	}
245 
246 	list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
247 
248 	mutex_unlock(&vfio.iommu_drivers_lock);
249 
250 	return 0;
251 }
252 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
253 
vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops * ops)254 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
255 {
256 	struct vfio_iommu_driver *driver;
257 
258 	mutex_lock(&vfio.iommu_drivers_lock);
259 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
260 		if (driver->ops == ops) {
261 			list_del(&driver->vfio_next);
262 			mutex_unlock(&vfio.iommu_drivers_lock);
263 			kfree(driver);
264 			return;
265 		}
266 	}
267 	mutex_unlock(&vfio.iommu_drivers_lock);
268 }
269 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
270 
271 /**
272  * Group minor allocation/free - both called with vfio.group_lock held
273  */
vfio_alloc_group_minor(struct vfio_group * group)274 static int vfio_alloc_group_minor(struct vfio_group *group)
275 {
276 	return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
277 }
278 
vfio_free_group_minor(int minor)279 static void vfio_free_group_minor(int minor)
280 {
281 	idr_remove(&vfio.group_idr, minor);
282 }
283 
284 static int vfio_iommu_group_notifier(struct notifier_block *nb,
285 				     unsigned long action, void *data);
286 static void vfio_group_get(struct vfio_group *group);
287 
288 /**
289  * Container objects - containers are created when /dev/vfio/vfio is
290  * opened, but their lifecycle extends until the last user is done, so
291  * it's freed via kref.  Must support container/group/device being
292  * closed in any order.
293  */
vfio_container_get(struct vfio_container * container)294 static void vfio_container_get(struct vfio_container *container)
295 {
296 	kref_get(&container->kref);
297 }
298 
vfio_container_release(struct kref * kref)299 static void vfio_container_release(struct kref *kref)
300 {
301 	struct vfio_container *container;
302 	container = container_of(kref, struct vfio_container, kref);
303 
304 	kfree(container);
305 }
306 
vfio_container_put(struct vfio_container * container)307 static void vfio_container_put(struct vfio_container *container)
308 {
309 	kref_put(&container->kref, vfio_container_release);
310 }
311 
vfio_group_unlock_and_free(struct vfio_group * group)312 static void vfio_group_unlock_and_free(struct vfio_group *group)
313 {
314 	mutex_unlock(&vfio.group_lock);
315 	/*
316 	 * Unregister outside of lock.  A spurious callback is harmless now
317 	 * that the group is no longer in vfio.group_list.
318 	 */
319 	iommu_group_unregister_notifier(group->iommu_group, &group->nb);
320 	kfree(group);
321 }
322 
323 /**
324  * Group objects - create, release, get, put, search
325  */
vfio_create_group(struct iommu_group * iommu_group)326 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
327 {
328 	struct vfio_group *group, *tmp;
329 	struct device *dev;
330 	int ret, minor;
331 
332 	group = kzalloc(sizeof(*group), GFP_KERNEL);
333 	if (!group)
334 		return ERR_PTR(-ENOMEM);
335 
336 	kref_init(&group->kref);
337 	INIT_LIST_HEAD(&group->device_list);
338 	mutex_init(&group->device_lock);
339 	INIT_LIST_HEAD(&group->unbound_list);
340 	mutex_init(&group->unbound_lock);
341 	atomic_set(&group->container_users, 0);
342 	atomic_set(&group->opened, 0);
343 	init_waitqueue_head(&group->container_q);
344 	group->iommu_group = iommu_group;
345 #ifdef CONFIG_VFIO_NOIOMMU
346 	group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
347 #endif
348 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
349 
350 	group->nb.notifier_call = vfio_iommu_group_notifier;
351 
352 	/*
353 	 * blocking notifiers acquire a rwsem around registering and hold
354 	 * it around callback.  Therefore, need to register outside of
355 	 * vfio.group_lock to avoid A-B/B-A contention.  Our callback won't
356 	 * do anything unless it can find the group in vfio.group_list, so
357 	 * no harm in registering early.
358 	 */
359 	ret = iommu_group_register_notifier(iommu_group, &group->nb);
360 	if (ret) {
361 		kfree(group);
362 		return ERR_PTR(ret);
363 	}
364 
365 	mutex_lock(&vfio.group_lock);
366 
367 	/* Did we race creating this group? */
368 	list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
369 		if (tmp->iommu_group == iommu_group) {
370 			vfio_group_get(tmp);
371 			vfio_group_unlock_and_free(group);
372 			return tmp;
373 		}
374 	}
375 
376 	minor = vfio_alloc_group_minor(group);
377 	if (minor < 0) {
378 		vfio_group_unlock_and_free(group);
379 		return ERR_PTR(minor);
380 	}
381 
382 	dev = device_create(vfio.class, NULL,
383 			    MKDEV(MAJOR(vfio.group_devt), minor),
384 			    group, "%s%d", group->noiommu ? "noiommu-" : "",
385 			    iommu_group_id(iommu_group));
386 	if (IS_ERR(dev)) {
387 		vfio_free_group_minor(minor);
388 		vfio_group_unlock_and_free(group);
389 		return ERR_CAST(dev);
390 	}
391 
392 	group->minor = minor;
393 	group->dev = dev;
394 
395 	list_add(&group->vfio_next, &vfio.group_list);
396 
397 	mutex_unlock(&vfio.group_lock);
398 
399 	return group;
400 }
401 
402 /* called with vfio.group_lock held */
vfio_group_release(struct kref * kref)403 static void vfio_group_release(struct kref *kref)
404 {
405 	struct vfio_group *group = container_of(kref, struct vfio_group, kref);
406 	struct vfio_unbound_dev *unbound, *tmp;
407 	struct iommu_group *iommu_group = group->iommu_group;
408 
409 	WARN_ON(!list_empty(&group->device_list));
410 	WARN_ON(group->notifier.head);
411 
412 	list_for_each_entry_safe(unbound, tmp,
413 				 &group->unbound_list, unbound_next) {
414 		list_del(&unbound->unbound_next);
415 		kfree(unbound);
416 	}
417 
418 	device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
419 	list_del(&group->vfio_next);
420 	vfio_free_group_minor(group->minor);
421 	vfio_group_unlock_and_free(group);
422 	iommu_group_put(iommu_group);
423 }
424 
vfio_group_put(struct vfio_group * group)425 static void vfio_group_put(struct vfio_group *group)
426 {
427 	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
428 }
429 
430 struct vfio_group_put_work {
431 	struct work_struct work;
432 	struct vfio_group *group;
433 };
434 
vfio_group_put_bg(struct work_struct * work)435 static void vfio_group_put_bg(struct work_struct *work)
436 {
437 	struct vfio_group_put_work *do_work;
438 
439 	do_work = container_of(work, struct vfio_group_put_work, work);
440 
441 	vfio_group_put(do_work->group);
442 	kfree(do_work);
443 }
444 
vfio_group_schedule_put(struct vfio_group * group)445 static void vfio_group_schedule_put(struct vfio_group *group)
446 {
447 	struct vfio_group_put_work *do_work;
448 
449 	do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
450 	if (WARN_ON(!do_work))
451 		return;
452 
453 	INIT_WORK(&do_work->work, vfio_group_put_bg);
454 	do_work->group = group;
455 	schedule_work(&do_work->work);
456 }
457 
458 /* Assume group_lock or group reference is held */
vfio_group_get(struct vfio_group * group)459 static void vfio_group_get(struct vfio_group *group)
460 {
461 	kref_get(&group->kref);
462 }
463 
464 /*
465  * Not really a try as we will sleep for mutex, but we need to make
466  * sure the group pointer is valid under lock and get a reference.
467  */
vfio_group_try_get(struct vfio_group * group)468 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
469 {
470 	struct vfio_group *target = group;
471 
472 	mutex_lock(&vfio.group_lock);
473 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
474 		if (group == target) {
475 			vfio_group_get(group);
476 			mutex_unlock(&vfio.group_lock);
477 			return group;
478 		}
479 	}
480 	mutex_unlock(&vfio.group_lock);
481 
482 	return NULL;
483 }
484 
485 static
vfio_group_get_from_iommu(struct iommu_group * iommu_group)486 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
487 {
488 	struct vfio_group *group;
489 
490 	mutex_lock(&vfio.group_lock);
491 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
492 		if (group->iommu_group == iommu_group) {
493 			vfio_group_get(group);
494 			mutex_unlock(&vfio.group_lock);
495 			return group;
496 		}
497 	}
498 	mutex_unlock(&vfio.group_lock);
499 
500 	return NULL;
501 }
502 
vfio_group_get_from_minor(int minor)503 static struct vfio_group *vfio_group_get_from_minor(int minor)
504 {
505 	struct vfio_group *group;
506 
507 	mutex_lock(&vfio.group_lock);
508 	group = idr_find(&vfio.group_idr, minor);
509 	if (!group) {
510 		mutex_unlock(&vfio.group_lock);
511 		return NULL;
512 	}
513 	vfio_group_get(group);
514 	mutex_unlock(&vfio.group_lock);
515 
516 	return group;
517 }
518 
vfio_group_get_from_dev(struct device * dev)519 static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
520 {
521 	struct iommu_group *iommu_group;
522 	struct vfio_group *group;
523 
524 	iommu_group = iommu_group_get(dev);
525 	if (!iommu_group)
526 		return NULL;
527 
528 	group = vfio_group_get_from_iommu(iommu_group);
529 	iommu_group_put(iommu_group);
530 
531 	return group;
532 }
533 
534 /**
535  * Device objects - create, release, get, put, search
536  */
537 static
vfio_group_create_device(struct vfio_group * group,struct device * dev,const struct vfio_device_ops * ops,void * device_data)538 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
539 					     struct device *dev,
540 					     const struct vfio_device_ops *ops,
541 					     void *device_data)
542 {
543 	struct vfio_device *device;
544 
545 	device = kzalloc(sizeof(*device), GFP_KERNEL);
546 	if (!device)
547 		return ERR_PTR(-ENOMEM);
548 
549 	kref_init(&device->kref);
550 	device->dev = dev;
551 	device->group = group;
552 	device->ops = ops;
553 	device->device_data = device_data;
554 	dev_set_drvdata(dev, device);
555 
556 	/* No need to get group_lock, caller has group reference */
557 	vfio_group_get(group);
558 
559 	mutex_lock(&group->device_lock);
560 	list_add(&device->group_next, &group->device_list);
561 	mutex_unlock(&group->device_lock);
562 
563 	return device;
564 }
565 
vfio_device_release(struct kref * kref)566 static void vfio_device_release(struct kref *kref)
567 {
568 	struct vfio_device *device = container_of(kref,
569 						  struct vfio_device, kref);
570 	struct vfio_group *group = device->group;
571 
572 	list_del(&device->group_next);
573 	mutex_unlock(&group->device_lock);
574 
575 	dev_set_drvdata(device->dev, NULL);
576 
577 	kfree(device);
578 
579 	/* vfio_del_group_dev may be waiting for this device */
580 	wake_up(&vfio.release_q);
581 }
582 
583 /* Device reference always implies a group reference */
vfio_device_put(struct vfio_device * device)584 void vfio_device_put(struct vfio_device *device)
585 {
586 	struct vfio_group *group = device->group;
587 	kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
588 	vfio_group_put(group);
589 }
590 EXPORT_SYMBOL_GPL(vfio_device_put);
591 
vfio_device_get(struct vfio_device * device)592 static void vfio_device_get(struct vfio_device *device)
593 {
594 	vfio_group_get(device->group);
595 	kref_get(&device->kref);
596 }
597 
vfio_group_get_device(struct vfio_group * group,struct device * dev)598 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
599 						 struct device *dev)
600 {
601 	struct vfio_device *device;
602 
603 	mutex_lock(&group->device_lock);
604 	list_for_each_entry(device, &group->device_list, group_next) {
605 		if (device->dev == dev) {
606 			vfio_device_get(device);
607 			mutex_unlock(&group->device_lock);
608 			return device;
609 		}
610 	}
611 	mutex_unlock(&group->device_lock);
612 	return NULL;
613 }
614 
615 /*
616  * Some drivers, like pci-stub, are only used to prevent other drivers from
617  * claiming a device and are therefore perfectly legitimate for a user owned
618  * group.  The pci-stub driver has no dependencies on DMA or the IOVA mapping
619  * of the device, but it does prevent the user from having direct access to
620  * the device, which is useful in some circumstances.
621  *
622  * We also assume that we can include PCI interconnect devices, ie. bridges.
623  * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
624  * then all of the downstream devices will be part of the same IOMMU group as
625  * the bridge.  Thus, if placing the bridge into the user owned IOVA space
626  * breaks anything, it only does so for user owned devices downstream.  Note
627  * that error notification via MSI can be affected for platforms that handle
628  * MSI within the same IOVA space as DMA.
629  */
630 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
631 
vfio_dev_whitelisted(struct device * dev,struct device_driver * drv)632 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
633 {
634 	if (dev_is_pci(dev)) {
635 		struct pci_dev *pdev = to_pci_dev(dev);
636 
637 		if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
638 			return true;
639 	}
640 
641 	return match_string(vfio_driver_whitelist,
642 			    ARRAY_SIZE(vfio_driver_whitelist),
643 			    drv->name) >= 0;
644 }
645 
646 /*
647  * A vfio group is viable for use by userspace if all devices are in
648  * one of the following states:
649  *  - driver-less
650  *  - bound to a vfio driver
651  *  - bound to a whitelisted driver
652  *  - a PCI interconnect device
653  *
654  * We use two methods to determine whether a device is bound to a vfio
655  * driver.  The first is to test whether the device exists in the vfio
656  * group.  The second is to test if the device exists on the group
657  * unbound_list, indicating it's in the middle of transitioning from
658  * a vfio driver to driver-less.
659  */
vfio_dev_viable(struct device * dev,void * data)660 static int vfio_dev_viable(struct device *dev, void *data)
661 {
662 	struct vfio_group *group = data;
663 	struct vfio_device *device;
664 	struct device_driver *drv = READ_ONCE(dev->driver);
665 	struct vfio_unbound_dev *unbound;
666 	int ret = -EINVAL;
667 
668 	mutex_lock(&group->unbound_lock);
669 	list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
670 		if (dev == unbound->dev) {
671 			ret = 0;
672 			break;
673 		}
674 	}
675 	mutex_unlock(&group->unbound_lock);
676 
677 	if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
678 		return 0;
679 
680 	device = vfio_group_get_device(group, dev);
681 	if (device) {
682 		vfio_device_put(device);
683 		return 0;
684 	}
685 
686 	return ret;
687 }
688 
689 /**
690  * Async device support
691  */
vfio_group_nb_add_dev(struct vfio_group * group,struct device * dev)692 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
693 {
694 	struct vfio_device *device;
695 
696 	/* Do we already know about it?  We shouldn't */
697 	device = vfio_group_get_device(group, dev);
698 	if (WARN_ON_ONCE(device)) {
699 		vfio_device_put(device);
700 		return 0;
701 	}
702 
703 	/* Nothing to do for idle groups */
704 	if (!atomic_read(&group->container_users))
705 		return 0;
706 
707 	/* TODO Prevent device auto probing */
708 	WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
709 	     iommu_group_id(group->iommu_group));
710 
711 	return 0;
712 }
713 
vfio_group_nb_verify(struct vfio_group * group,struct device * dev)714 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
715 {
716 	/* We don't care what happens when the group isn't in use */
717 	if (!atomic_read(&group->container_users))
718 		return 0;
719 
720 	return vfio_dev_viable(dev, group);
721 }
722 
vfio_iommu_group_notifier(struct notifier_block * nb,unsigned long action,void * data)723 static int vfio_iommu_group_notifier(struct notifier_block *nb,
724 				     unsigned long action, void *data)
725 {
726 	struct vfio_group *group = container_of(nb, struct vfio_group, nb);
727 	struct device *dev = data;
728 	struct vfio_unbound_dev *unbound;
729 
730 	/*
731 	 * Need to go through a group_lock lookup to get a reference or we
732 	 * risk racing a group being removed.  Ignore spurious notifies.
733 	 */
734 	group = vfio_group_try_get(group);
735 	if (!group)
736 		return NOTIFY_OK;
737 
738 	switch (action) {
739 	case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
740 		vfio_group_nb_add_dev(group, dev);
741 		break;
742 	case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
743 		/*
744 		 * Nothing to do here.  If the device is in use, then the
745 		 * vfio sub-driver should block the remove callback until
746 		 * it is unused.  If the device is unused or attached to a
747 		 * stub driver, then it should be released and we don't
748 		 * care that it will be going away.
749 		 */
750 		break;
751 	case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
752 		pr_debug("%s: Device %s, group %d binding to driver\n",
753 			 __func__, dev_name(dev),
754 			 iommu_group_id(group->iommu_group));
755 		break;
756 	case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
757 		pr_debug("%s: Device %s, group %d bound to driver %s\n",
758 			 __func__, dev_name(dev),
759 			 iommu_group_id(group->iommu_group), dev->driver->name);
760 		BUG_ON(vfio_group_nb_verify(group, dev));
761 		break;
762 	case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
763 		pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
764 			 __func__, dev_name(dev),
765 			 iommu_group_id(group->iommu_group), dev->driver->name);
766 		break;
767 	case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
768 		pr_debug("%s: Device %s, group %d unbound from driver\n",
769 			 __func__, dev_name(dev),
770 			 iommu_group_id(group->iommu_group));
771 		/*
772 		 * XXX An unbound device in a live group is ok, but we'd
773 		 * really like to avoid the above BUG_ON by preventing other
774 		 * drivers from binding to it.  Once that occurs, we have to
775 		 * stop the system to maintain isolation.  At a minimum, we'd
776 		 * want a toggle to disable driver auto probe for this device.
777 		 */
778 
779 		mutex_lock(&group->unbound_lock);
780 		list_for_each_entry(unbound,
781 				    &group->unbound_list, unbound_next) {
782 			if (dev == unbound->dev) {
783 				list_del(&unbound->unbound_next);
784 				kfree(unbound);
785 				break;
786 			}
787 		}
788 		mutex_unlock(&group->unbound_lock);
789 		break;
790 	}
791 
792 	/*
793 	 * If we're the last reference to the group, the group will be
794 	 * released, which includes unregistering the iommu group notifier.
795 	 * We hold a read-lock on that notifier list, unregistering needs
796 	 * a write-lock... deadlock.  Release our reference asynchronously
797 	 * to avoid that situation.
798 	 */
799 	vfio_group_schedule_put(group);
800 	return NOTIFY_OK;
801 }
802 
803 /**
804  * VFIO driver API
805  */
vfio_add_group_dev(struct device * dev,const struct vfio_device_ops * ops,void * device_data)806 int vfio_add_group_dev(struct device *dev,
807 		       const struct vfio_device_ops *ops, void *device_data)
808 {
809 	struct iommu_group *iommu_group;
810 	struct vfio_group *group;
811 	struct vfio_device *device;
812 
813 	iommu_group = iommu_group_get(dev);
814 	if (!iommu_group)
815 		return -EINVAL;
816 
817 	group = vfio_group_get_from_iommu(iommu_group);
818 	if (!group) {
819 		group = vfio_create_group(iommu_group);
820 		if (IS_ERR(group)) {
821 			iommu_group_put(iommu_group);
822 			return PTR_ERR(group);
823 		}
824 	} else {
825 		/*
826 		 * A found vfio_group already holds a reference to the
827 		 * iommu_group.  A created vfio_group keeps the reference.
828 		 */
829 		iommu_group_put(iommu_group);
830 	}
831 
832 	device = vfio_group_get_device(group, dev);
833 	if (device) {
834 		WARN(1, "Device %s already exists on group %d\n",
835 		     dev_name(dev), iommu_group_id(iommu_group));
836 		vfio_device_put(device);
837 		vfio_group_put(group);
838 		return -EBUSY;
839 	}
840 
841 	device = vfio_group_create_device(group, dev, ops, device_data);
842 	if (IS_ERR(device)) {
843 		vfio_group_put(group);
844 		return PTR_ERR(device);
845 	}
846 
847 	/*
848 	 * Drop all but the vfio_device reference.  The vfio_device holds
849 	 * a reference to the vfio_group, which holds a reference to the
850 	 * iommu_group.
851 	 */
852 	vfio_group_put(group);
853 
854 	return 0;
855 }
856 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
857 
858 /**
859  * Get a reference to the vfio_device for a device.  Even if the
860  * caller thinks they own the device, they could be racing with a
861  * release call path, so we can't trust drvdata for the shortcut.
862  * Go the long way around, from the iommu_group to the vfio_group
863  * to the vfio_device.
864  */
vfio_device_get_from_dev(struct device * dev)865 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
866 {
867 	struct vfio_group *group;
868 	struct vfio_device *device;
869 
870 	group = vfio_group_get_from_dev(dev);
871 	if (!group)
872 		return NULL;
873 
874 	device = vfio_group_get_device(group, dev);
875 	vfio_group_put(group);
876 
877 	return device;
878 }
879 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
880 
vfio_device_get_from_name(struct vfio_group * group,char * buf)881 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
882 						     char *buf)
883 {
884 	struct vfio_device *it, *device = NULL;
885 
886 	mutex_lock(&group->device_lock);
887 	list_for_each_entry(it, &group->device_list, group_next) {
888 		if (!strcmp(dev_name(it->dev), buf)) {
889 			device = it;
890 			vfio_device_get(device);
891 			break;
892 		}
893 	}
894 	mutex_unlock(&group->device_lock);
895 
896 	return device;
897 }
898 
899 /*
900  * Caller must hold a reference to the vfio_device
901  */
vfio_device_data(struct vfio_device * device)902 void *vfio_device_data(struct vfio_device *device)
903 {
904 	return device->device_data;
905 }
906 EXPORT_SYMBOL_GPL(vfio_device_data);
907 
908 /*
909  * Decrement the device reference count and wait for the device to be
910  * removed.  Open file descriptors for the device... */
vfio_del_group_dev(struct device * dev)911 void *vfio_del_group_dev(struct device *dev)
912 {
913 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
914 	struct vfio_device *device = dev_get_drvdata(dev);
915 	struct vfio_group *group = device->group;
916 	void *device_data = device->device_data;
917 	struct vfio_unbound_dev *unbound;
918 	unsigned int i = 0;
919 	bool interrupted = false;
920 
921 	/*
922 	 * The group exists so long as we have a device reference.  Get
923 	 * a group reference and use it to scan for the device going away.
924 	 */
925 	vfio_group_get(group);
926 
927 	/*
928 	 * When the device is removed from the group, the group suddenly
929 	 * becomes non-viable; the device has a driver (until the unbind
930 	 * completes), but it's not present in the group.  This is bad news
931 	 * for any external users that need to re-acquire a group reference
932 	 * in order to match and release their existing reference.  To
933 	 * solve this, we track such devices on the unbound_list to bridge
934 	 * the gap until they're fully unbound.
935 	 */
936 	unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
937 	if (unbound) {
938 		unbound->dev = dev;
939 		mutex_lock(&group->unbound_lock);
940 		list_add(&unbound->unbound_next, &group->unbound_list);
941 		mutex_unlock(&group->unbound_lock);
942 	}
943 	WARN_ON(!unbound);
944 
945 	vfio_device_put(device);
946 
947 	/*
948 	 * If the device is still present in the group after the above
949 	 * 'put', then it is in use and we need to request it from the
950 	 * bus driver.  The driver may in turn need to request the
951 	 * device from the user.  We send the request on an arbitrary
952 	 * interval with counter to allow the driver to take escalating
953 	 * measures to release the device if it has the ability to do so.
954 	 */
955 	add_wait_queue(&vfio.release_q, &wait);
956 
957 	do {
958 		device = vfio_group_get_device(group, dev);
959 		if (!device)
960 			break;
961 
962 		if (device->ops->request)
963 			device->ops->request(device_data, i++);
964 
965 		vfio_device_put(device);
966 
967 		if (interrupted) {
968 			wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
969 		} else {
970 			wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
971 			if (signal_pending(current)) {
972 				interrupted = true;
973 				dev_warn(dev,
974 					 "Device is currently in use, task"
975 					 " \"%s\" (%d) "
976 					 "blocked until device is released",
977 					 current->comm, task_pid_nr(current));
978 			}
979 		}
980 
981 	} while (1);
982 
983 	remove_wait_queue(&vfio.release_q, &wait);
984 	/*
985 	 * In order to support multiple devices per group, devices can be
986 	 * plucked from the group while other devices in the group are still
987 	 * in use.  The container persists with this group and those remaining
988 	 * devices still attached.  If the user creates an isolation violation
989 	 * by binding this device to another driver while the group is still in
990 	 * use, that's their fault.  However, in the case of removing the last,
991 	 * or potentially the only, device in the group there can be no other
992 	 * in-use devices in the group.  The user has done their due diligence
993 	 * and we should lay no claims to those devices.  In order to do that,
994 	 * we need to make sure the group is detached from the container.
995 	 * Without this stall, we're potentially racing with a user process
996 	 * that may attempt to immediately bind this device to another driver.
997 	 */
998 	if (list_empty(&group->device_list))
999 		wait_event(group->container_q, !group->container);
1000 
1001 	vfio_group_put(group);
1002 
1003 	return device_data;
1004 }
1005 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1006 
1007 /**
1008  * VFIO base fd, /dev/vfio/vfio
1009  */
vfio_ioctl_check_extension(struct vfio_container * container,unsigned long arg)1010 static long vfio_ioctl_check_extension(struct vfio_container *container,
1011 				       unsigned long arg)
1012 {
1013 	struct vfio_iommu_driver *driver;
1014 	long ret = 0;
1015 
1016 	down_read(&container->group_lock);
1017 
1018 	driver = container->iommu_driver;
1019 
1020 	switch (arg) {
1021 		/* No base extensions yet */
1022 	default:
1023 		/*
1024 		 * If no driver is set, poll all registered drivers for
1025 		 * extensions and return the first positive result.  If
1026 		 * a driver is already set, further queries will be passed
1027 		 * only to that driver.
1028 		 */
1029 		if (!driver) {
1030 			mutex_lock(&vfio.iommu_drivers_lock);
1031 			list_for_each_entry(driver, &vfio.iommu_drivers_list,
1032 					    vfio_next) {
1033 
1034 #ifdef CONFIG_VFIO_NOIOMMU
1035 				if (!list_empty(&container->group_list) &&
1036 				    (container->noiommu !=
1037 				     (driver->ops == &vfio_noiommu_ops)))
1038 					continue;
1039 #endif
1040 
1041 				if (!try_module_get(driver->ops->owner))
1042 					continue;
1043 
1044 				ret = driver->ops->ioctl(NULL,
1045 							 VFIO_CHECK_EXTENSION,
1046 							 arg);
1047 				module_put(driver->ops->owner);
1048 				if (ret > 0)
1049 					break;
1050 			}
1051 			mutex_unlock(&vfio.iommu_drivers_lock);
1052 		} else
1053 			ret = driver->ops->ioctl(container->iommu_data,
1054 						 VFIO_CHECK_EXTENSION, arg);
1055 	}
1056 
1057 	up_read(&container->group_lock);
1058 
1059 	return ret;
1060 }
1061 
1062 /* hold write lock on container->group_lock */
__vfio_container_attach_groups(struct vfio_container * container,struct vfio_iommu_driver * driver,void * data)1063 static int __vfio_container_attach_groups(struct vfio_container *container,
1064 					  struct vfio_iommu_driver *driver,
1065 					  void *data)
1066 {
1067 	struct vfio_group *group;
1068 	int ret = -ENODEV;
1069 
1070 	list_for_each_entry(group, &container->group_list, container_next) {
1071 		ret = driver->ops->attach_group(data, group->iommu_group);
1072 		if (ret)
1073 			goto unwind;
1074 	}
1075 
1076 	return ret;
1077 
1078 unwind:
1079 	list_for_each_entry_continue_reverse(group, &container->group_list,
1080 					     container_next) {
1081 		driver->ops->detach_group(data, group->iommu_group);
1082 	}
1083 
1084 	return ret;
1085 }
1086 
vfio_ioctl_set_iommu(struct vfio_container * container,unsigned long arg)1087 static long vfio_ioctl_set_iommu(struct vfio_container *container,
1088 				 unsigned long arg)
1089 {
1090 	struct vfio_iommu_driver *driver;
1091 	long ret = -ENODEV;
1092 
1093 	down_write(&container->group_lock);
1094 
1095 	/*
1096 	 * The container is designed to be an unprivileged interface while
1097 	 * the group can be assigned to specific users.  Therefore, only by
1098 	 * adding a group to a container does the user get the privilege of
1099 	 * enabling the iommu, which may allocate finite resources.  There
1100 	 * is no unset_iommu, but by removing all the groups from a container,
1101 	 * the container is deprivileged and returns to an unset state.
1102 	 */
1103 	if (list_empty(&container->group_list) || container->iommu_driver) {
1104 		up_write(&container->group_lock);
1105 		return -EINVAL;
1106 	}
1107 
1108 	mutex_lock(&vfio.iommu_drivers_lock);
1109 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1110 		void *data;
1111 
1112 #ifdef CONFIG_VFIO_NOIOMMU
1113 		/*
1114 		 * Only noiommu containers can use vfio-noiommu and noiommu
1115 		 * containers can only use vfio-noiommu.
1116 		 */
1117 		if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1118 			continue;
1119 #endif
1120 
1121 		if (!try_module_get(driver->ops->owner))
1122 			continue;
1123 
1124 		/*
1125 		 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1126 		 * so test which iommu driver reported support for this
1127 		 * extension and call open on them.  We also pass them the
1128 		 * magic, allowing a single driver to support multiple
1129 		 * interfaces if they'd like.
1130 		 */
1131 		if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1132 			module_put(driver->ops->owner);
1133 			continue;
1134 		}
1135 
1136 		data = driver->ops->open(arg);
1137 		if (IS_ERR(data)) {
1138 			ret = PTR_ERR(data);
1139 			module_put(driver->ops->owner);
1140 			continue;
1141 		}
1142 
1143 		ret = __vfio_container_attach_groups(container, driver, data);
1144 		if (ret) {
1145 			driver->ops->release(data);
1146 			module_put(driver->ops->owner);
1147 			continue;
1148 		}
1149 
1150 		container->iommu_driver = driver;
1151 		container->iommu_data = data;
1152 		break;
1153 	}
1154 
1155 	mutex_unlock(&vfio.iommu_drivers_lock);
1156 	up_write(&container->group_lock);
1157 
1158 	return ret;
1159 }
1160 
vfio_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1161 static long vfio_fops_unl_ioctl(struct file *filep,
1162 				unsigned int cmd, unsigned long arg)
1163 {
1164 	struct vfio_container *container = filep->private_data;
1165 	struct vfio_iommu_driver *driver;
1166 	void *data;
1167 	long ret = -EINVAL;
1168 
1169 	if (!container)
1170 		return ret;
1171 
1172 	switch (cmd) {
1173 	case VFIO_GET_API_VERSION:
1174 		ret = VFIO_API_VERSION;
1175 		break;
1176 	case VFIO_CHECK_EXTENSION:
1177 		ret = vfio_ioctl_check_extension(container, arg);
1178 		break;
1179 	case VFIO_SET_IOMMU:
1180 		ret = vfio_ioctl_set_iommu(container, arg);
1181 		break;
1182 	default:
1183 		driver = container->iommu_driver;
1184 		data = container->iommu_data;
1185 
1186 		if (driver) /* passthrough all unrecognized ioctls */
1187 			ret = driver->ops->ioctl(data, cmd, arg);
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 #ifdef CONFIG_COMPAT
vfio_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1194 static long vfio_fops_compat_ioctl(struct file *filep,
1195 				   unsigned int cmd, unsigned long arg)
1196 {
1197 	arg = (unsigned long)compat_ptr(arg);
1198 	return vfio_fops_unl_ioctl(filep, cmd, arg);
1199 }
1200 #endif	/* CONFIG_COMPAT */
1201 
vfio_fops_open(struct inode * inode,struct file * filep)1202 static int vfio_fops_open(struct inode *inode, struct file *filep)
1203 {
1204 	struct vfio_container *container;
1205 
1206 	container = kzalloc(sizeof(*container), GFP_KERNEL);
1207 	if (!container)
1208 		return -ENOMEM;
1209 
1210 	INIT_LIST_HEAD(&container->group_list);
1211 	init_rwsem(&container->group_lock);
1212 	kref_init(&container->kref);
1213 
1214 	filep->private_data = container;
1215 
1216 	return 0;
1217 }
1218 
vfio_fops_release(struct inode * inode,struct file * filep)1219 static int vfio_fops_release(struct inode *inode, struct file *filep)
1220 {
1221 	struct vfio_container *container = filep->private_data;
1222 
1223 	filep->private_data = NULL;
1224 
1225 	vfio_container_put(container);
1226 
1227 	return 0;
1228 }
1229 
1230 /*
1231  * Once an iommu driver is set, we optionally pass read/write/mmap
1232  * on to the driver, allowing management interfaces beyond ioctl.
1233  */
vfio_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)1234 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1235 			      size_t count, loff_t *ppos)
1236 {
1237 	struct vfio_container *container = filep->private_data;
1238 	struct vfio_iommu_driver *driver;
1239 	ssize_t ret = -EINVAL;
1240 
1241 	driver = container->iommu_driver;
1242 	if (likely(driver && driver->ops->read))
1243 		ret = driver->ops->read(container->iommu_data,
1244 					buf, count, ppos);
1245 
1246 	return ret;
1247 }
1248 
vfio_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)1249 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1250 			       size_t count, loff_t *ppos)
1251 {
1252 	struct vfio_container *container = filep->private_data;
1253 	struct vfio_iommu_driver *driver;
1254 	ssize_t ret = -EINVAL;
1255 
1256 	driver = container->iommu_driver;
1257 	if (likely(driver && driver->ops->write))
1258 		ret = driver->ops->write(container->iommu_data,
1259 					 buf, count, ppos);
1260 
1261 	return ret;
1262 }
1263 
vfio_fops_mmap(struct file * filep,struct vm_area_struct * vma)1264 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1265 {
1266 	struct vfio_container *container = filep->private_data;
1267 	struct vfio_iommu_driver *driver;
1268 	int ret = -EINVAL;
1269 
1270 	driver = container->iommu_driver;
1271 	if (likely(driver && driver->ops->mmap))
1272 		ret = driver->ops->mmap(container->iommu_data, vma);
1273 
1274 	return ret;
1275 }
1276 
1277 static const struct file_operations vfio_fops = {
1278 	.owner		= THIS_MODULE,
1279 	.open		= vfio_fops_open,
1280 	.release	= vfio_fops_release,
1281 	.read		= vfio_fops_read,
1282 	.write		= vfio_fops_write,
1283 	.unlocked_ioctl	= vfio_fops_unl_ioctl,
1284 #ifdef CONFIG_COMPAT
1285 	.compat_ioctl	= vfio_fops_compat_ioctl,
1286 #endif
1287 	.mmap		= vfio_fops_mmap,
1288 };
1289 
1290 /**
1291  * VFIO Group fd, /dev/vfio/$GROUP
1292  */
__vfio_group_unset_container(struct vfio_group * group)1293 static void __vfio_group_unset_container(struct vfio_group *group)
1294 {
1295 	struct vfio_container *container = group->container;
1296 	struct vfio_iommu_driver *driver;
1297 
1298 	down_write(&container->group_lock);
1299 
1300 	driver = container->iommu_driver;
1301 	if (driver)
1302 		driver->ops->detach_group(container->iommu_data,
1303 					  group->iommu_group);
1304 
1305 	group->container = NULL;
1306 	wake_up(&group->container_q);
1307 	list_del(&group->container_next);
1308 
1309 	/* Detaching the last group deprivileges a container, remove iommu */
1310 	if (driver && list_empty(&container->group_list)) {
1311 		driver->ops->release(container->iommu_data);
1312 		module_put(driver->ops->owner);
1313 		container->iommu_driver = NULL;
1314 		container->iommu_data = NULL;
1315 	}
1316 
1317 	up_write(&container->group_lock);
1318 
1319 	vfio_container_put(container);
1320 }
1321 
1322 /*
1323  * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1324  * if there was no container to unset.  Since the ioctl is called on
1325  * the group, we know that still exists, therefore the only valid
1326  * transition here is 1->0.
1327  */
vfio_group_unset_container(struct vfio_group * group)1328 static int vfio_group_unset_container(struct vfio_group *group)
1329 {
1330 	int users = atomic_cmpxchg(&group->container_users, 1, 0);
1331 
1332 	if (!users)
1333 		return -EINVAL;
1334 	if (users != 1)
1335 		return -EBUSY;
1336 
1337 	__vfio_group_unset_container(group);
1338 
1339 	return 0;
1340 }
1341 
1342 /*
1343  * When removing container users, anything that removes the last user
1344  * implicitly removes the group from the container.  That is, if the
1345  * group file descriptor is closed, as well as any device file descriptors,
1346  * the group is free.
1347  */
vfio_group_try_dissolve_container(struct vfio_group * group)1348 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1349 {
1350 	if (0 == atomic_dec_if_positive(&group->container_users))
1351 		__vfio_group_unset_container(group);
1352 }
1353 
vfio_group_set_container(struct vfio_group * group,int container_fd)1354 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1355 {
1356 	struct fd f;
1357 	struct vfio_container *container;
1358 	struct vfio_iommu_driver *driver;
1359 	int ret = 0;
1360 
1361 	if (atomic_read(&group->container_users))
1362 		return -EINVAL;
1363 
1364 	if (group->noiommu && !capable(CAP_SYS_RAWIO))
1365 		return -EPERM;
1366 
1367 	f = fdget(container_fd);
1368 	if (!f.file)
1369 		return -EBADF;
1370 
1371 	/* Sanity check, is this really our fd? */
1372 	if (f.file->f_op != &vfio_fops) {
1373 		fdput(f);
1374 		return -EINVAL;
1375 	}
1376 
1377 	container = f.file->private_data;
1378 	WARN_ON(!container); /* fget ensures we don't race vfio_release */
1379 
1380 	down_write(&container->group_lock);
1381 
1382 	/* Real groups and fake groups cannot mix */
1383 	if (!list_empty(&container->group_list) &&
1384 	    container->noiommu != group->noiommu) {
1385 		ret = -EPERM;
1386 		goto unlock_out;
1387 	}
1388 
1389 	driver = container->iommu_driver;
1390 	if (driver) {
1391 		ret = driver->ops->attach_group(container->iommu_data,
1392 						group->iommu_group);
1393 		if (ret)
1394 			goto unlock_out;
1395 	}
1396 
1397 	group->container = container;
1398 	container->noiommu = group->noiommu;
1399 	list_add(&group->container_next, &container->group_list);
1400 
1401 	/* Get a reference on the container and mark a user within the group */
1402 	vfio_container_get(container);
1403 	atomic_inc(&group->container_users);
1404 
1405 unlock_out:
1406 	up_write(&container->group_lock);
1407 	fdput(f);
1408 	return ret;
1409 }
1410 
vfio_group_viable(struct vfio_group * group)1411 static bool vfio_group_viable(struct vfio_group *group)
1412 {
1413 	return (iommu_group_for_each_dev(group->iommu_group,
1414 					 group, vfio_dev_viable) == 0);
1415 }
1416 
vfio_group_add_container_user(struct vfio_group * group)1417 static int vfio_group_add_container_user(struct vfio_group *group)
1418 {
1419 	if (!atomic_inc_not_zero(&group->container_users))
1420 		return -EINVAL;
1421 
1422 	if (group->noiommu) {
1423 		atomic_dec(&group->container_users);
1424 		return -EPERM;
1425 	}
1426 	if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1427 		atomic_dec(&group->container_users);
1428 		return -EINVAL;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static const struct file_operations vfio_device_fops;
1435 
vfio_group_get_device_fd(struct vfio_group * group,char * buf)1436 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1437 {
1438 	struct vfio_device *device;
1439 	struct file *filep;
1440 	int ret;
1441 
1442 	if (0 == atomic_read(&group->container_users) ||
1443 	    !group->container->iommu_driver || !vfio_group_viable(group))
1444 		return -EINVAL;
1445 
1446 	if (group->noiommu && !capable(CAP_SYS_RAWIO))
1447 		return -EPERM;
1448 
1449 	device = vfio_device_get_from_name(group, buf);
1450 	if (!device)
1451 		return -ENODEV;
1452 
1453 	ret = device->ops->open(device->device_data);
1454 	if (ret) {
1455 		vfio_device_put(device);
1456 		return ret;
1457 	}
1458 
1459 	/*
1460 	 * We can't use anon_inode_getfd() because we need to modify
1461 	 * the f_mode flags directly to allow more than just ioctls
1462 	 */
1463 	ret = get_unused_fd_flags(O_CLOEXEC);
1464 	if (ret < 0) {
1465 		device->ops->release(device->device_data);
1466 		vfio_device_put(device);
1467 		return ret;
1468 	}
1469 
1470 	filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1471 				   device, O_RDWR);
1472 	if (IS_ERR(filep)) {
1473 		put_unused_fd(ret);
1474 		ret = PTR_ERR(filep);
1475 		device->ops->release(device->device_data);
1476 		vfio_device_put(device);
1477 		return ret;
1478 	}
1479 
1480 	/*
1481 	 * TODO: add an anon_inode interface to do this.
1482 	 * Appears to be missing by lack of need rather than
1483 	 * explicitly prevented.  Now there's need.
1484 	 */
1485 	filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1486 
1487 	atomic_inc(&group->container_users);
1488 
1489 	fd_install(ret, filep);
1490 
1491 	if (group->noiommu)
1492 		dev_warn(device->dev, "vfio-noiommu device opened by user "
1493 			 "(%s:%d)\n", current->comm, task_pid_nr(current));
1494 
1495 	return ret;
1496 }
1497 
vfio_group_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1498 static long vfio_group_fops_unl_ioctl(struct file *filep,
1499 				      unsigned int cmd, unsigned long arg)
1500 {
1501 	struct vfio_group *group = filep->private_data;
1502 	long ret = -ENOTTY;
1503 
1504 	switch (cmd) {
1505 	case VFIO_GROUP_GET_STATUS:
1506 	{
1507 		struct vfio_group_status status;
1508 		unsigned long minsz;
1509 
1510 		minsz = offsetofend(struct vfio_group_status, flags);
1511 
1512 		if (copy_from_user(&status, (void __user *)arg, minsz))
1513 			return -EFAULT;
1514 
1515 		if (status.argsz < minsz)
1516 			return -EINVAL;
1517 
1518 		status.flags = 0;
1519 
1520 		if (vfio_group_viable(group))
1521 			status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1522 
1523 		if (group->container)
1524 			status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1525 
1526 		if (copy_to_user((void __user *)arg, &status, minsz))
1527 			return -EFAULT;
1528 
1529 		ret = 0;
1530 		break;
1531 	}
1532 	case VFIO_GROUP_SET_CONTAINER:
1533 	{
1534 		int fd;
1535 
1536 		if (get_user(fd, (int __user *)arg))
1537 			return -EFAULT;
1538 
1539 		if (fd < 0)
1540 			return -EINVAL;
1541 
1542 		ret = vfio_group_set_container(group, fd);
1543 		break;
1544 	}
1545 	case VFIO_GROUP_UNSET_CONTAINER:
1546 		ret = vfio_group_unset_container(group);
1547 		break;
1548 	case VFIO_GROUP_GET_DEVICE_FD:
1549 	{
1550 		char *buf;
1551 
1552 		buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1553 		if (IS_ERR(buf))
1554 			return PTR_ERR(buf);
1555 
1556 		ret = vfio_group_get_device_fd(group, buf);
1557 		kfree(buf);
1558 		break;
1559 	}
1560 	}
1561 
1562 	return ret;
1563 }
1564 
1565 #ifdef CONFIG_COMPAT
vfio_group_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1566 static long vfio_group_fops_compat_ioctl(struct file *filep,
1567 					 unsigned int cmd, unsigned long arg)
1568 {
1569 	arg = (unsigned long)compat_ptr(arg);
1570 	return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1571 }
1572 #endif	/* CONFIG_COMPAT */
1573 
vfio_group_fops_open(struct inode * inode,struct file * filep)1574 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1575 {
1576 	struct vfio_group *group;
1577 	int opened;
1578 
1579 	group = vfio_group_get_from_minor(iminor(inode));
1580 	if (!group)
1581 		return -ENODEV;
1582 
1583 	if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1584 		vfio_group_put(group);
1585 		return -EPERM;
1586 	}
1587 
1588 	/* Do we need multiple instances of the group open?  Seems not. */
1589 	opened = atomic_cmpxchg(&group->opened, 0, 1);
1590 	if (opened) {
1591 		vfio_group_put(group);
1592 		return -EBUSY;
1593 	}
1594 
1595 	/* Is something still in use from a previous open? */
1596 	if (group->container) {
1597 		atomic_dec(&group->opened);
1598 		vfio_group_put(group);
1599 		return -EBUSY;
1600 	}
1601 
1602 	/* Warn if previous user didn't cleanup and re-init to drop them */
1603 	if (WARN_ON(group->notifier.head))
1604 		BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1605 
1606 	filep->private_data = group;
1607 
1608 	return 0;
1609 }
1610 
vfio_group_fops_release(struct inode * inode,struct file * filep)1611 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1612 {
1613 	struct vfio_group *group = filep->private_data;
1614 
1615 	filep->private_data = NULL;
1616 
1617 	vfio_group_try_dissolve_container(group);
1618 
1619 	atomic_dec(&group->opened);
1620 
1621 	vfio_group_put(group);
1622 
1623 	return 0;
1624 }
1625 
1626 static const struct file_operations vfio_group_fops = {
1627 	.owner		= THIS_MODULE,
1628 	.unlocked_ioctl	= vfio_group_fops_unl_ioctl,
1629 #ifdef CONFIG_COMPAT
1630 	.compat_ioctl	= vfio_group_fops_compat_ioctl,
1631 #endif
1632 	.open		= vfio_group_fops_open,
1633 	.release	= vfio_group_fops_release,
1634 };
1635 
1636 /**
1637  * VFIO Device fd
1638  */
vfio_device_fops_release(struct inode * inode,struct file * filep)1639 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1640 {
1641 	struct vfio_device *device = filep->private_data;
1642 
1643 	device->ops->release(device->device_data);
1644 
1645 	vfio_group_try_dissolve_container(device->group);
1646 
1647 	vfio_device_put(device);
1648 
1649 	return 0;
1650 }
1651 
vfio_device_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1652 static long vfio_device_fops_unl_ioctl(struct file *filep,
1653 				       unsigned int cmd, unsigned long arg)
1654 {
1655 	struct vfio_device *device = filep->private_data;
1656 
1657 	if (unlikely(!device->ops->ioctl))
1658 		return -EINVAL;
1659 
1660 	return device->ops->ioctl(device->device_data, cmd, arg);
1661 }
1662 
vfio_device_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)1663 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1664 				     size_t count, loff_t *ppos)
1665 {
1666 	struct vfio_device *device = filep->private_data;
1667 
1668 	if (unlikely(!device->ops->read))
1669 		return -EINVAL;
1670 
1671 	return device->ops->read(device->device_data, buf, count, ppos);
1672 }
1673 
vfio_device_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)1674 static ssize_t vfio_device_fops_write(struct file *filep,
1675 				      const char __user *buf,
1676 				      size_t count, loff_t *ppos)
1677 {
1678 	struct vfio_device *device = filep->private_data;
1679 
1680 	if (unlikely(!device->ops->write))
1681 		return -EINVAL;
1682 
1683 	return device->ops->write(device->device_data, buf, count, ppos);
1684 }
1685 
vfio_device_fops_mmap(struct file * filep,struct vm_area_struct * vma)1686 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1687 {
1688 	struct vfio_device *device = filep->private_data;
1689 
1690 	if (unlikely(!device->ops->mmap))
1691 		return -EINVAL;
1692 
1693 	return device->ops->mmap(device->device_data, vma);
1694 }
1695 
1696 #ifdef CONFIG_COMPAT
vfio_device_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1697 static long vfio_device_fops_compat_ioctl(struct file *filep,
1698 					  unsigned int cmd, unsigned long arg)
1699 {
1700 	arg = (unsigned long)compat_ptr(arg);
1701 	return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1702 }
1703 #endif	/* CONFIG_COMPAT */
1704 
1705 static const struct file_operations vfio_device_fops = {
1706 	.owner		= THIS_MODULE,
1707 	.release	= vfio_device_fops_release,
1708 	.read		= vfio_device_fops_read,
1709 	.write		= vfio_device_fops_write,
1710 	.unlocked_ioctl	= vfio_device_fops_unl_ioctl,
1711 #ifdef CONFIG_COMPAT
1712 	.compat_ioctl	= vfio_device_fops_compat_ioctl,
1713 #endif
1714 	.mmap		= vfio_device_fops_mmap,
1715 };
1716 
1717 /**
1718  * External user API, exported by symbols to be linked dynamically.
1719  *
1720  * The protocol includes:
1721  *  1. do normal VFIO init operation:
1722  *	- opening a new container;
1723  *	- attaching group(s) to it;
1724  *	- setting an IOMMU driver for a container.
1725  * When IOMMU is set for a container, all groups in it are
1726  * considered ready to use by an external user.
1727  *
1728  * 2. User space passes a group fd to an external user.
1729  * The external user calls vfio_group_get_external_user()
1730  * to verify that:
1731  *	- the group is initialized;
1732  *	- IOMMU is set for it.
1733  * If both checks passed, vfio_group_get_external_user()
1734  * increments the container user counter to prevent
1735  * the VFIO group from disposal before KVM exits.
1736  *
1737  * 3. The external user calls vfio_external_user_iommu_id()
1738  * to know an IOMMU ID.
1739  *
1740  * 4. When the external KVM finishes, it calls
1741  * vfio_group_put_external_user() to release the VFIO group.
1742  * This call decrements the container user counter.
1743  */
vfio_group_get_external_user(struct file * filep)1744 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1745 {
1746 	struct vfio_group *group = filep->private_data;
1747 	int ret;
1748 
1749 	if (filep->f_op != &vfio_group_fops)
1750 		return ERR_PTR(-EINVAL);
1751 
1752 	ret = vfio_group_add_container_user(group);
1753 	if (ret)
1754 		return ERR_PTR(ret);
1755 
1756 	vfio_group_get(group);
1757 
1758 	return group;
1759 }
1760 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1761 
vfio_group_put_external_user(struct vfio_group * group)1762 void vfio_group_put_external_user(struct vfio_group *group)
1763 {
1764 	vfio_group_try_dissolve_container(group);
1765 	vfio_group_put(group);
1766 }
1767 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1768 
vfio_external_group_match_file(struct vfio_group * test_group,struct file * filep)1769 bool vfio_external_group_match_file(struct vfio_group *test_group,
1770 				    struct file *filep)
1771 {
1772 	struct vfio_group *group = filep->private_data;
1773 
1774 	return (filep->f_op == &vfio_group_fops) && (group == test_group);
1775 }
1776 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1777 
vfio_external_user_iommu_id(struct vfio_group * group)1778 int vfio_external_user_iommu_id(struct vfio_group *group)
1779 {
1780 	return iommu_group_id(group->iommu_group);
1781 }
1782 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1783 
vfio_external_check_extension(struct vfio_group * group,unsigned long arg)1784 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1785 {
1786 	return vfio_ioctl_check_extension(group->container, arg);
1787 }
1788 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1789 
1790 /**
1791  * Sub-module support
1792  */
1793 /*
1794  * Helper for managing a buffer of info chain capabilities, allocate or
1795  * reallocate a buffer with additional @size, filling in @id and @version
1796  * of the capability.  A pointer to the new capability is returned.
1797  *
1798  * NB. The chain is based at the head of the buffer, so new entries are
1799  * added to the tail, vfio_info_cap_shift() should be called to fixup the
1800  * next offsets prior to copying to the user buffer.
1801  */
vfio_info_cap_add(struct vfio_info_cap * caps,size_t size,u16 id,u16 version)1802 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1803 					       size_t size, u16 id, u16 version)
1804 {
1805 	void *buf;
1806 	struct vfio_info_cap_header *header, *tmp;
1807 
1808 	buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1809 	if (!buf) {
1810 		kfree(caps->buf);
1811 		caps->size = 0;
1812 		return ERR_PTR(-ENOMEM);
1813 	}
1814 
1815 	caps->buf = buf;
1816 	header = buf + caps->size;
1817 
1818 	/* Eventually copied to user buffer, zero */
1819 	memset(header, 0, size);
1820 
1821 	header->id = id;
1822 	header->version = version;
1823 
1824 	/* Add to the end of the capability chain */
1825 	for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1826 		; /* nothing */
1827 
1828 	tmp->next = caps->size;
1829 	caps->size += size;
1830 
1831 	return header;
1832 }
1833 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1834 
vfio_info_cap_shift(struct vfio_info_cap * caps,size_t offset)1835 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1836 {
1837 	struct vfio_info_cap_header *tmp;
1838 	void *buf = (void *)caps->buf;
1839 
1840 	for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1841 		tmp->next += offset;
1842 }
1843 EXPORT_SYMBOL(vfio_info_cap_shift);
1844 
vfio_info_add_capability(struct vfio_info_cap * caps,struct vfio_info_cap_header * cap,size_t size)1845 int vfio_info_add_capability(struct vfio_info_cap *caps,
1846 			     struct vfio_info_cap_header *cap, size_t size)
1847 {
1848 	struct vfio_info_cap_header *header;
1849 
1850 	header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1851 	if (IS_ERR(header))
1852 		return PTR_ERR(header);
1853 
1854 	memcpy(header + 1, cap + 1, size - sizeof(*header));
1855 
1856 	return 0;
1857 }
1858 EXPORT_SYMBOL(vfio_info_add_capability);
1859 
vfio_set_irqs_validate_and_prepare(struct vfio_irq_set * hdr,int num_irqs,int max_irq_type,size_t * data_size)1860 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1861 				       int max_irq_type, size_t *data_size)
1862 {
1863 	unsigned long minsz;
1864 	size_t size;
1865 
1866 	minsz = offsetofend(struct vfio_irq_set, count);
1867 
1868 	if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1869 	    (hdr->count >= (U32_MAX - hdr->start)) ||
1870 	    (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1871 				VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1872 		return -EINVAL;
1873 
1874 	if (data_size)
1875 		*data_size = 0;
1876 
1877 	if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1878 		return -EINVAL;
1879 
1880 	switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1881 	case VFIO_IRQ_SET_DATA_NONE:
1882 		size = 0;
1883 		break;
1884 	case VFIO_IRQ_SET_DATA_BOOL:
1885 		size = sizeof(uint8_t);
1886 		break;
1887 	case VFIO_IRQ_SET_DATA_EVENTFD:
1888 		size = sizeof(int32_t);
1889 		break;
1890 	default:
1891 		return -EINVAL;
1892 	}
1893 
1894 	if (size) {
1895 		if (hdr->argsz - minsz < hdr->count * size)
1896 			return -EINVAL;
1897 
1898 		if (!data_size)
1899 			return -EINVAL;
1900 
1901 		*data_size = hdr->count * size;
1902 	}
1903 
1904 	return 0;
1905 }
1906 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1907 
1908 /*
1909  * Pin a set of guest PFNs and return their associated host PFNs for local
1910  * domain only.
1911  * @dev [in]     : device
1912  * @user_pfn [in]: array of user/guest PFNs to be pinned.
1913  * @npage [in]   : count of elements in user_pfn array.  This count should not
1914  *		   be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1915  * @prot [in]    : protection flags
1916  * @phys_pfn[out]: array of host PFNs
1917  * Return error or number of pages pinned.
1918  */
vfio_pin_pages(struct device * dev,unsigned long * user_pfn,int npage,int prot,unsigned long * phys_pfn)1919 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1920 		   int prot, unsigned long *phys_pfn)
1921 {
1922 	struct vfio_container *container;
1923 	struct vfio_group *group;
1924 	struct vfio_iommu_driver *driver;
1925 	int ret;
1926 
1927 	if (!dev || !user_pfn || !phys_pfn || !npage)
1928 		return -EINVAL;
1929 
1930 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1931 		return -E2BIG;
1932 
1933 	group = vfio_group_get_from_dev(dev);
1934 	if (!group)
1935 		return -ENODEV;
1936 
1937 	ret = vfio_group_add_container_user(group);
1938 	if (ret)
1939 		goto err_pin_pages;
1940 
1941 	container = group->container;
1942 	driver = container->iommu_driver;
1943 	if (likely(driver && driver->ops->pin_pages))
1944 		ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
1945 					     npage, prot, phys_pfn);
1946 	else
1947 		ret = -ENOTTY;
1948 
1949 	vfio_group_try_dissolve_container(group);
1950 
1951 err_pin_pages:
1952 	vfio_group_put(group);
1953 	return ret;
1954 }
1955 EXPORT_SYMBOL(vfio_pin_pages);
1956 
1957 /*
1958  * Unpin set of host PFNs for local domain only.
1959  * @dev [in]     : device
1960  * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1961  *		   PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1962  * @npage [in]   : count of elements in user_pfn array.  This count should not
1963  *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1964  * Return error or number of pages unpinned.
1965  */
vfio_unpin_pages(struct device * dev,unsigned long * user_pfn,int npage)1966 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
1967 {
1968 	struct vfio_container *container;
1969 	struct vfio_group *group;
1970 	struct vfio_iommu_driver *driver;
1971 	int ret;
1972 
1973 	if (!dev || !user_pfn || !npage)
1974 		return -EINVAL;
1975 
1976 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1977 		return -E2BIG;
1978 
1979 	group = vfio_group_get_from_dev(dev);
1980 	if (!group)
1981 		return -ENODEV;
1982 
1983 	ret = vfio_group_add_container_user(group);
1984 	if (ret)
1985 		goto err_unpin_pages;
1986 
1987 	container = group->container;
1988 	driver = container->iommu_driver;
1989 	if (likely(driver && driver->ops->unpin_pages))
1990 		ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
1991 					       npage);
1992 	else
1993 		ret = -ENOTTY;
1994 
1995 	vfio_group_try_dissolve_container(group);
1996 
1997 err_unpin_pages:
1998 	vfio_group_put(group);
1999 	return ret;
2000 }
2001 EXPORT_SYMBOL(vfio_unpin_pages);
2002 
vfio_register_iommu_notifier(struct vfio_group * group,unsigned long * events,struct notifier_block * nb)2003 static int vfio_register_iommu_notifier(struct vfio_group *group,
2004 					unsigned long *events,
2005 					struct notifier_block *nb)
2006 {
2007 	struct vfio_container *container;
2008 	struct vfio_iommu_driver *driver;
2009 	int ret;
2010 
2011 	ret = vfio_group_add_container_user(group);
2012 	if (ret)
2013 		return -EINVAL;
2014 
2015 	container = group->container;
2016 	driver = container->iommu_driver;
2017 	if (likely(driver && driver->ops->register_notifier))
2018 		ret = driver->ops->register_notifier(container->iommu_data,
2019 						     events, nb);
2020 	else
2021 		ret = -ENOTTY;
2022 
2023 	vfio_group_try_dissolve_container(group);
2024 
2025 	return ret;
2026 }
2027 
vfio_unregister_iommu_notifier(struct vfio_group * group,struct notifier_block * nb)2028 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2029 					  struct notifier_block *nb)
2030 {
2031 	struct vfio_container *container;
2032 	struct vfio_iommu_driver *driver;
2033 	int ret;
2034 
2035 	ret = vfio_group_add_container_user(group);
2036 	if (ret)
2037 		return -EINVAL;
2038 
2039 	container = group->container;
2040 	driver = container->iommu_driver;
2041 	if (likely(driver && driver->ops->unregister_notifier))
2042 		ret = driver->ops->unregister_notifier(container->iommu_data,
2043 						       nb);
2044 	else
2045 		ret = -ENOTTY;
2046 
2047 	vfio_group_try_dissolve_container(group);
2048 
2049 	return ret;
2050 }
2051 
vfio_group_set_kvm(struct vfio_group * group,struct kvm * kvm)2052 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2053 {
2054 	group->kvm = kvm;
2055 	blocking_notifier_call_chain(&group->notifier,
2056 				VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2057 }
2058 EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2059 
vfio_register_group_notifier(struct vfio_group * group,unsigned long * events,struct notifier_block * nb)2060 static int vfio_register_group_notifier(struct vfio_group *group,
2061 					unsigned long *events,
2062 					struct notifier_block *nb)
2063 {
2064 	int ret;
2065 	bool set_kvm = false;
2066 
2067 	if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2068 		set_kvm = true;
2069 
2070 	/* clear known events */
2071 	*events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2072 
2073 	/* refuse to continue if still events remaining */
2074 	if (*events)
2075 		return -EINVAL;
2076 
2077 	ret = vfio_group_add_container_user(group);
2078 	if (ret)
2079 		return -EINVAL;
2080 
2081 	ret = blocking_notifier_chain_register(&group->notifier, nb);
2082 
2083 	/*
2084 	 * The attaching of kvm and vfio_group might already happen, so
2085 	 * here we replay once upon registration.
2086 	 */
2087 	if (!ret && set_kvm && group->kvm)
2088 		blocking_notifier_call_chain(&group->notifier,
2089 					VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2090 
2091 	vfio_group_try_dissolve_container(group);
2092 
2093 	return ret;
2094 }
2095 
vfio_unregister_group_notifier(struct vfio_group * group,struct notifier_block * nb)2096 static int vfio_unregister_group_notifier(struct vfio_group *group,
2097 					 struct notifier_block *nb)
2098 {
2099 	int ret;
2100 
2101 	ret = vfio_group_add_container_user(group);
2102 	if (ret)
2103 		return -EINVAL;
2104 
2105 	ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2106 
2107 	vfio_group_try_dissolve_container(group);
2108 
2109 	return ret;
2110 }
2111 
vfio_register_notifier(struct device * dev,enum vfio_notify_type type,unsigned long * events,struct notifier_block * nb)2112 int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2113 			   unsigned long *events, struct notifier_block *nb)
2114 {
2115 	struct vfio_group *group;
2116 	int ret;
2117 
2118 	if (!dev || !nb || !events || (*events == 0))
2119 		return -EINVAL;
2120 
2121 	group = vfio_group_get_from_dev(dev);
2122 	if (!group)
2123 		return -ENODEV;
2124 
2125 	switch (type) {
2126 	case VFIO_IOMMU_NOTIFY:
2127 		ret = vfio_register_iommu_notifier(group, events, nb);
2128 		break;
2129 	case VFIO_GROUP_NOTIFY:
2130 		ret = vfio_register_group_notifier(group, events, nb);
2131 		break;
2132 	default:
2133 		ret = -EINVAL;
2134 	}
2135 
2136 	vfio_group_put(group);
2137 	return ret;
2138 }
2139 EXPORT_SYMBOL(vfio_register_notifier);
2140 
vfio_unregister_notifier(struct device * dev,enum vfio_notify_type type,struct notifier_block * nb)2141 int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2142 			     struct notifier_block *nb)
2143 {
2144 	struct vfio_group *group;
2145 	int ret;
2146 
2147 	if (!dev || !nb)
2148 		return -EINVAL;
2149 
2150 	group = vfio_group_get_from_dev(dev);
2151 	if (!group)
2152 		return -ENODEV;
2153 
2154 	switch (type) {
2155 	case VFIO_IOMMU_NOTIFY:
2156 		ret = vfio_unregister_iommu_notifier(group, nb);
2157 		break;
2158 	case VFIO_GROUP_NOTIFY:
2159 		ret = vfio_unregister_group_notifier(group, nb);
2160 		break;
2161 	default:
2162 		ret = -EINVAL;
2163 	}
2164 
2165 	vfio_group_put(group);
2166 	return ret;
2167 }
2168 EXPORT_SYMBOL(vfio_unregister_notifier);
2169 
2170 /**
2171  * Module/class support
2172  */
vfio_devnode(struct device * dev,umode_t * mode)2173 static char *vfio_devnode(struct device *dev, umode_t *mode)
2174 {
2175 	return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2176 }
2177 
2178 static struct miscdevice vfio_dev = {
2179 	.minor = VFIO_MINOR,
2180 	.name = "vfio",
2181 	.fops = &vfio_fops,
2182 	.nodename = "vfio/vfio",
2183 	.mode = S_IRUGO | S_IWUGO,
2184 };
2185 
vfio_init(void)2186 static int __init vfio_init(void)
2187 {
2188 	int ret;
2189 
2190 	idr_init(&vfio.group_idr);
2191 	mutex_init(&vfio.group_lock);
2192 	mutex_init(&vfio.iommu_drivers_lock);
2193 	INIT_LIST_HEAD(&vfio.group_list);
2194 	INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2195 	init_waitqueue_head(&vfio.release_q);
2196 
2197 	ret = misc_register(&vfio_dev);
2198 	if (ret) {
2199 		pr_err("vfio: misc device register failed\n");
2200 		return ret;
2201 	}
2202 
2203 	/* /dev/vfio/$GROUP */
2204 	vfio.class = class_create(THIS_MODULE, "vfio");
2205 	if (IS_ERR(vfio.class)) {
2206 		ret = PTR_ERR(vfio.class);
2207 		goto err_class;
2208 	}
2209 
2210 	vfio.class->devnode = vfio_devnode;
2211 
2212 	ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
2213 	if (ret)
2214 		goto err_alloc_chrdev;
2215 
2216 	cdev_init(&vfio.group_cdev, &vfio_group_fops);
2217 	ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
2218 	if (ret)
2219 		goto err_cdev_add;
2220 
2221 	pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2222 
2223 #ifdef CONFIG_VFIO_NOIOMMU
2224 	vfio_register_iommu_driver(&vfio_noiommu_ops);
2225 #endif
2226 	return 0;
2227 
2228 err_cdev_add:
2229 	unregister_chrdev_region(vfio.group_devt, MINORMASK);
2230 err_alloc_chrdev:
2231 	class_destroy(vfio.class);
2232 	vfio.class = NULL;
2233 err_class:
2234 	misc_deregister(&vfio_dev);
2235 	return ret;
2236 }
2237 
vfio_cleanup(void)2238 static void __exit vfio_cleanup(void)
2239 {
2240 	WARN_ON(!list_empty(&vfio.group_list));
2241 
2242 #ifdef CONFIG_VFIO_NOIOMMU
2243 	vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2244 #endif
2245 	idr_destroy(&vfio.group_idr);
2246 	cdev_del(&vfio.group_cdev);
2247 	unregister_chrdev_region(vfio.group_devt, MINORMASK);
2248 	class_destroy(vfio.class);
2249 	vfio.class = NULL;
2250 	misc_deregister(&vfio_dev);
2251 }
2252 
2253 module_init(vfio_init);
2254 module_exit(vfio_cleanup);
2255 
2256 MODULE_VERSION(DRIVER_VERSION);
2257 MODULE_LICENSE("GPL v2");
2258 MODULE_AUTHOR(DRIVER_AUTHOR);
2259 MODULE_DESCRIPTION(DRIVER_DESC);
2260 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2261 MODULE_ALIAS("devname:vfio/vfio");
2262 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");
2263