• Home
  • Raw
  • Download

Lines Matching full:group

105  * removes the device from the dummy group and cannot be nested.
109 struct iommu_group *group; in vfio_iommu_group_get() local
112 group = iommu_group_get(dev); in vfio_iommu_group_get()
116 * With noiommu enabled, an IOMMU group will be created for a device in vfio_iommu_group_get()
121 if (group || !noiommu || iommu_present(dev->bus)) in vfio_iommu_group_get()
122 return group; in vfio_iommu_group_get()
124 group = iommu_group_alloc(); in vfio_iommu_group_get()
125 if (IS_ERR(group)) in vfio_iommu_group_get()
128 iommu_group_set_name(group, "vfio-noiommu"); in vfio_iommu_group_get()
129 iommu_group_set_iommudata(group, &noiommu, NULL); in vfio_iommu_group_get()
130 ret = iommu_group_add_device(group, dev); in vfio_iommu_group_get()
132 iommu_group_put(group); in vfio_iommu_group_get()
137 * Where to taint? At this point we've added an IOMMU group for a in vfio_iommu_group_get()
145 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); in vfio_iommu_group_get()
148 return group; in vfio_iommu_group_get()
152 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) in vfio_iommu_group_put() argument
155 if (iommu_group_get_iommudata(group) == &noiommu) in vfio_iommu_group_put()
159 iommu_group_put(group); in vfio_iommu_group_put()
260 * Group minor allocation/free - both called with vfio.group_lock held
262 static int vfio_alloc_group_minor(struct vfio_group *group) in vfio_alloc_group_minor() argument
264 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); in vfio_alloc_group_minor()
274 static void vfio_group_get(struct vfio_group *group);
279 * it's freed via kref. Must support container/group/device being
300 static void vfio_group_unlock_and_free(struct vfio_group *group) in vfio_group_unlock_and_free() argument
305 * that the group is no longer in vfio.group_list. in vfio_group_unlock_and_free()
307 iommu_group_unregister_notifier(group->iommu_group, &group->nb); in vfio_group_unlock_and_free()
308 kfree(group); in vfio_group_unlock_and_free()
312 * Group objects - create, release, get, put, search
316 struct vfio_group *group, *tmp; in vfio_create_group() local
320 group = kzalloc(sizeof(*group), GFP_KERNEL); in vfio_create_group()
321 if (!group) in vfio_create_group()
324 kref_init(&group->kref); in vfio_create_group()
325 INIT_LIST_HEAD(&group->device_list); in vfio_create_group()
326 mutex_init(&group->device_lock); in vfio_create_group()
327 INIT_LIST_HEAD(&group->unbound_list); in vfio_create_group()
328 mutex_init(&group->unbound_lock); in vfio_create_group()
329 atomic_set(&group->container_users, 0); in vfio_create_group()
330 atomic_set(&group->opened, 0); in vfio_create_group()
331 init_waitqueue_head(&group->container_q); in vfio_create_group()
332 group->iommu_group = iommu_group; in vfio_create_group()
334 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); in vfio_create_group()
336 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_create_group()
338 group->nb.notifier_call = vfio_iommu_group_notifier; in vfio_create_group()
344 * do anything unless it can find the group in vfio.group_list, so in vfio_create_group()
347 ret = iommu_group_register_notifier(iommu_group, &group->nb); in vfio_create_group()
349 kfree(group); in vfio_create_group()
355 /* Did we race creating this group? */ in vfio_create_group()
359 vfio_group_unlock_and_free(group); in vfio_create_group()
364 minor = vfio_alloc_group_minor(group); in vfio_create_group()
366 vfio_group_unlock_and_free(group); in vfio_create_group()
372 group, "%s%d", group->noiommu ? "noiommu-" : "", in vfio_create_group()
376 vfio_group_unlock_and_free(group); in vfio_create_group()
380 group->minor = minor; in vfio_create_group()
381 group->dev = dev; in vfio_create_group()
383 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group()
387 return group; in vfio_create_group()
393 struct vfio_group *group = container_of(kref, struct vfio_group, kref); in vfio_group_release() local
395 struct iommu_group *iommu_group = group->iommu_group; in vfio_group_release()
397 WARN_ON(!list_empty(&group->device_list)); in vfio_group_release()
398 WARN_ON(group->notifier.head); in vfio_group_release()
401 &group->unbound_list, unbound_next) { in vfio_group_release()
406 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); in vfio_group_release()
407 list_del(&group->vfio_next); in vfio_group_release()
408 vfio_free_group_minor(group->minor); in vfio_group_release()
409 vfio_group_unlock_and_free(group); in vfio_group_release()
413 static void vfio_group_put(struct vfio_group *group) in vfio_group_put() argument
415 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); in vfio_group_put()
420 struct vfio_group *group; member
429 vfio_group_put(do_work->group); in vfio_group_put_bg()
433 static void vfio_group_schedule_put(struct vfio_group *group) in vfio_group_schedule_put() argument
442 do_work->group = group; in vfio_group_schedule_put()
446 /* Assume group_lock or group reference is held */
447 static void vfio_group_get(struct vfio_group *group) in vfio_group_get() argument
449 kref_get(&group->kref); in vfio_group_get()
454 * sure the group pointer is valid under lock and get a reference.
456 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) in vfio_group_try_get() argument
458 struct vfio_group *target = group; in vfio_group_try_get()
461 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_try_get()
462 if (group == target) { in vfio_group_try_get()
463 vfio_group_get(group); in vfio_group_try_get()
465 return group; in vfio_group_try_get()
476 struct vfio_group *group; in vfio_group_get_from_iommu() local
479 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_get_from_iommu()
480 if (group->iommu_group == iommu_group) { in vfio_group_get_from_iommu()
481 vfio_group_get(group); in vfio_group_get_from_iommu()
483 return group; in vfio_group_get_from_iommu()
493 struct vfio_group *group; in vfio_group_get_from_minor() local
496 group = idr_find(&vfio.group_idr, minor); in vfio_group_get_from_minor()
497 if (!group) { in vfio_group_get_from_minor()
501 vfio_group_get(group); in vfio_group_get_from_minor()
504 return group; in vfio_group_get_from_minor()
510 struct vfio_group *group; in vfio_group_get_from_dev() local
516 group = vfio_group_get_from_iommu(iommu_group); in vfio_group_get_from_dev()
519 return group; in vfio_group_get_from_dev()
525 /* Device reference always implies a group reference */
538 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, in vfio_group_get_device() argument
543 mutex_lock(&group->device_lock); in vfio_group_get_device()
544 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device()
546 mutex_unlock(&group->device_lock); in vfio_group_get_device()
550 mutex_unlock(&group->device_lock); in vfio_group_get_device()
557 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
563 * then all of the downstream devices will be part of the same IOMMU group as
587 * A vfio group is viable for use by userspace if all devices are in
596 * group. The second is to test if the device exists on the group
602 struct vfio_group *group = data; in vfio_dev_viable() local
608 mutex_lock(&group->unbound_lock); in vfio_dev_viable()
609 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { in vfio_dev_viable()
615 mutex_unlock(&group->unbound_lock); in vfio_dev_viable()
620 device = vfio_group_get_device(group, dev); in vfio_dev_viable()
632 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) in vfio_group_nb_add_dev() argument
637 device = vfio_group_get_device(group, dev); in vfio_group_nb_add_dev()
644 if (!atomic_read(&group->container_users)) in vfio_group_nb_add_dev()
648 dev_WARN(dev, "Device added to live group %d!\n", in vfio_group_nb_add_dev()
649 iommu_group_id(group->iommu_group)); in vfio_group_nb_add_dev()
654 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) in vfio_group_nb_verify() argument
656 /* We don't care what happens when the group isn't in use */ in vfio_group_nb_verify()
657 if (!atomic_read(&group->container_users)) in vfio_group_nb_verify()
660 return vfio_dev_viable(dev, group); in vfio_group_nb_verify()
666 struct vfio_group *group = container_of(nb, struct vfio_group, nb); in vfio_iommu_group_notifier() local
672 * risk racing a group being removed. Ignore spurious notifies. in vfio_iommu_group_notifier()
674 group = vfio_group_try_get(group); in vfio_iommu_group_notifier()
675 if (!group) in vfio_iommu_group_notifier()
680 vfio_group_nb_add_dev(group, dev); in vfio_iommu_group_notifier()
692 dev_dbg(dev, "%s: group %d binding to driver\n", __func__, in vfio_iommu_group_notifier()
693 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
696 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__, in vfio_iommu_group_notifier()
697 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
698 BUG_ON(vfio_group_nb_verify(group, dev)); in vfio_iommu_group_notifier()
701 dev_dbg(dev, "%s: group %d unbinding from driver %s\n", in vfio_iommu_group_notifier()
702 __func__, iommu_group_id(group->iommu_group), in vfio_iommu_group_notifier()
706 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__, in vfio_iommu_group_notifier()
707 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
709 * XXX An unbound device in a live group is ok, but we'd in vfio_iommu_group_notifier()
716 mutex_lock(&group->unbound_lock); in vfio_iommu_group_notifier()
718 &group->unbound_list, unbound_next) { in vfio_iommu_group_notifier()
725 mutex_unlock(&group->unbound_lock); in vfio_iommu_group_notifier()
730 * If we're the last reference to the group, the group will be in vfio_iommu_group_notifier()
731 * released, which includes unregistering the iommu group notifier. in vfio_iommu_group_notifier()
736 vfio_group_schedule_put(group); in vfio_iommu_group_notifier()
757 struct vfio_group *group; in vfio_register_group_dev() local
763 group = vfio_group_get_from_iommu(iommu_group); in vfio_register_group_dev()
764 if (!group) { in vfio_register_group_dev()
765 group = vfio_create_group(iommu_group); in vfio_register_group_dev()
766 if (IS_ERR(group)) { in vfio_register_group_dev()
768 return PTR_ERR(group); in vfio_register_group_dev()
778 existing_device = vfio_group_get_device(group, device->dev); in vfio_register_group_dev()
780 dev_WARN(device->dev, "Device already exists on group %d\n", in vfio_register_group_dev()
783 vfio_group_put(group); in vfio_register_group_dev()
787 /* Our reference on group is moved to the device */ in vfio_register_group_dev()
788 device->group = group; in vfio_register_group_dev()
793 mutex_lock(&group->device_lock); in vfio_register_group_dev()
794 list_add(&device->group_next, &group->device_list); in vfio_register_group_dev()
795 group->dev_counter++; in vfio_register_group_dev()
796 mutex_unlock(&group->device_lock); in vfio_register_group_dev()
834 struct vfio_group *group; in vfio_device_get_from_dev() local
837 group = vfio_group_get_from_dev(dev); in vfio_device_get_from_dev()
838 if (!group) in vfio_device_get_from_dev()
841 device = vfio_group_get_device(group, dev); in vfio_device_get_from_dev()
842 vfio_group_put(group); in vfio_device_get_from_dev()
848 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
853 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
854 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
872 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
891 struct vfio_group *group = device->group; in vfio_unregister_group_dev() local
898 * When the device is removed from the group, the group suddenly in vfio_unregister_group_dev()
900 * completes), but it's not present in the group. This is bad news in vfio_unregister_group_dev()
901 * for any external users that need to re-acquire a group reference in vfio_unregister_group_dev()
909 mutex_lock(&group->unbound_lock); in vfio_unregister_group_dev()
910 list_add(&unbound->unbound_next, &group->unbound_list); in vfio_unregister_group_dev()
911 mutex_unlock(&group->unbound_lock); in vfio_unregister_group_dev()
938 mutex_lock(&group->device_lock); in vfio_unregister_group_dev()
940 group->dev_counter--; in vfio_unregister_group_dev()
941 mutex_unlock(&group->device_lock); in vfio_unregister_group_dev()
944 * In order to support multiple devices per group, devices can be in vfio_unregister_group_dev()
945 * plucked from the group while other devices in the group are still in vfio_unregister_group_dev()
946 * in use. The container persists with this group and those remaining in vfio_unregister_group_dev()
948 * by binding this device to another driver while the group is still in in vfio_unregister_group_dev()
950 * or potentially the only, device in the group there can be no other in vfio_unregister_group_dev()
951 * in-use devices in the group. The user has done their due diligence in vfio_unregister_group_dev()
953 * we need to make sure the group is detached from the container. in vfio_unregister_group_dev()
957 if (list_empty(&group->device_list)) in vfio_unregister_group_dev()
958 wait_event(group->container_q, !group->container); in vfio_unregister_group_dev()
961 vfio_group_put(group); in vfio_unregister_group_dev()
1037 struct vfio_group *group; in __vfio_container_attach_groups() local
1040 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
1041 ret = driver->ops->attach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1049 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
1051 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1067 * the group can be assigned to specific users. Therefore, only by in vfio_ioctl_set_iommu()
1068 * adding a group to a container does the user get the privilege of in vfio_ioctl_set_iommu()
1250 * VFIO Group fd, /dev/vfio/$GROUP
1252 static void __vfio_group_unset_container(struct vfio_group *group) in __vfio_group_unset_container() argument
1254 struct vfio_container *container = group->container; in __vfio_group_unset_container()
1262 group->iommu_group); in __vfio_group_unset_container()
1264 group->container = NULL; in __vfio_group_unset_container()
1265 wake_up(&group->container_q); in __vfio_group_unset_container()
1266 list_del(&group->container_next); in __vfio_group_unset_container()
1268 /* Detaching the last group deprivileges a container, remove iommu */ in __vfio_group_unset_container()
1284 * the group, we know that still exists, therefore the only valid
1287 static int vfio_group_unset_container(struct vfio_group *group) in vfio_group_unset_container() argument
1289 int users = atomic_cmpxchg(&group->container_users, 1, 0); in vfio_group_unset_container()
1296 __vfio_group_unset_container(group); in vfio_group_unset_container()
1303 * implicitly removes the group from the container. That is, if the
1304 * group file descriptor is closed, as well as any device file descriptors,
1305 * the group is free.
1307 static void vfio_group_try_dissolve_container(struct vfio_group *group) in vfio_group_try_dissolve_container() argument
1309 if (0 == atomic_dec_if_positive(&group->container_users)) in vfio_group_try_dissolve_container()
1310 __vfio_group_unset_container(group); in vfio_group_try_dissolve_container()
1313 static int vfio_group_set_container(struct vfio_group *group, int container_fd) in vfio_group_set_container() argument
1320 if (atomic_read(&group->container_users)) in vfio_group_set_container()
1323 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_set_container()
1343 container->noiommu != group->noiommu) { in vfio_group_set_container()
1351 group->iommu_group); in vfio_group_set_container()
1356 group->container = container; in vfio_group_set_container()
1357 container->noiommu = group->noiommu; in vfio_group_set_container()
1358 list_add(&group->container_next, &container->group_list); in vfio_group_set_container()
1360 /* Get a reference on the container and mark a user within the group */ in vfio_group_set_container()
1362 atomic_inc(&group->container_users); in vfio_group_set_container()
1370 static bool vfio_group_viable(struct vfio_group *group) in vfio_group_viable() argument
1372 return (iommu_group_for_each_dev(group->iommu_group, in vfio_group_viable()
1373 group, vfio_dev_viable) == 0); in vfio_group_viable()
1376 static int vfio_group_add_container_user(struct vfio_group *group) in vfio_group_add_container_user() argument
1378 if (!atomic_inc_not_zero(&group->container_users)) in vfio_group_add_container_user()
1381 if (group->noiommu) { in vfio_group_add_container_user()
1382 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1385 if (!group->container->iommu_driver || !vfio_group_viable(group)) { in vfio_group_add_container_user()
1386 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1395 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) in vfio_group_get_device_fd() argument
1401 if (0 == atomic_read(&group->container_users) || in vfio_group_get_device_fd()
1402 !group->container->iommu_driver || !vfio_group_viable(group)) in vfio_group_get_device_fd()
1405 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_get_device_fd()
1408 device = vfio_device_get_from_name(group, buf); in vfio_group_get_device_fd()
1446 atomic_inc(&group->container_users); in vfio_group_get_device_fd()
1450 if (group->noiommu) in vfio_group_get_device_fd()
1460 struct vfio_group *group = filep->private_data; in vfio_group_fops_unl_ioctl() local
1479 if (vfio_group_viable(group)) in vfio_group_fops_unl_ioctl()
1482 if (group->container) in vfio_group_fops_unl_ioctl()
1501 ret = vfio_group_set_container(group, fd); in vfio_group_fops_unl_ioctl()
1505 ret = vfio_group_unset_container(group); in vfio_group_fops_unl_ioctl()
1515 ret = vfio_group_get_device_fd(group, buf); in vfio_group_fops_unl_ioctl()
1526 struct vfio_group *group; in vfio_group_fops_open() local
1529 group = vfio_group_get_from_minor(iminor(inode)); in vfio_group_fops_open()
1530 if (!group) in vfio_group_fops_open()
1533 if (group->noiommu && !capable(CAP_SYS_RAWIO)) { in vfio_group_fops_open()
1534 vfio_group_put(group); in vfio_group_fops_open()
1538 /* Do we need multiple instances of the group open? Seems not. */ in vfio_group_fops_open()
1539 opened = atomic_cmpxchg(&group->opened, 0, 1); in vfio_group_fops_open()
1541 vfio_group_put(group); in vfio_group_fops_open()
1546 if (group->container) { in vfio_group_fops_open()
1547 atomic_dec(&group->opened); in vfio_group_fops_open()
1548 vfio_group_put(group); in vfio_group_fops_open()
1553 if (WARN_ON(group->notifier.head)) in vfio_group_fops_open()
1554 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_group_fops_open()
1556 filep->private_data = group; in vfio_group_fops_open()
1563 struct vfio_group *group = filep->private_data; in vfio_group_fops_release() local
1567 vfio_group_try_dissolve_container(group); in vfio_group_fops_release()
1569 atomic_dec(&group->opened); in vfio_group_fops_release()
1571 vfio_group_put(group); in vfio_group_fops_release()
1593 vfio_group_try_dissolve_container(device->group); in vfio_device_fops_release()
1660 * - attaching group(s) to it;
1665 * 2. User space passes a group fd to an external user.
1668 * - the group is initialized;
1672 * the VFIO group from disposal before KVM exits.
1678 * vfio_group_put_external_user() to release the VFIO group.
1683 struct vfio_group *group = filep->private_data; in vfio_group_get_external_user() local
1689 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user()
1693 vfio_group_get(group); in vfio_group_get_external_user()
1695 return group; in vfio_group_get_external_user()
1703 * - A VFIO group is assiciated with the device;
1704 * - IOMMU is set for the group.
1706 * increments the container user counter to prevent the VFIO group
1708 * to the VFIO group.
1710 * When the external user finishes using the VFIO group, it calls
1711 * vfio_group_put_external_user() to release the VFIO group and
1715 * Return error PTR or pointer to VFIO group.
1720 struct vfio_group *group; in vfio_group_get_external_user_from_dev() local
1723 group = vfio_group_get_from_dev(dev); in vfio_group_get_external_user_from_dev()
1724 if (!group) in vfio_group_get_external_user_from_dev()
1727 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user_from_dev()
1729 vfio_group_put(group); in vfio_group_get_external_user_from_dev()
1733 return group; in vfio_group_get_external_user_from_dev()
1737 void vfio_group_put_external_user(struct vfio_group *group) in vfio_group_put_external_user() argument
1739 vfio_group_try_dissolve_container(group); in vfio_group_put_external_user()
1740 vfio_group_put(group); in vfio_group_put_external_user()
1747 struct vfio_group *group = filep->private_data; in vfio_external_group_match_file() local
1749 return (filep->f_op == &vfio_group_fops) && (group == test_group); in vfio_external_group_match_file()
1753 int vfio_external_user_iommu_id(struct vfio_group *group) in vfio_external_user_iommu_id() argument
1755 return iommu_group_id(group->iommu_group); in vfio_external_user_iommu_id()
1759 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) in vfio_external_check_extension() argument
1761 return vfio_ioctl_check_extension(group->container, arg); in vfio_external_check_extension()
1899 struct vfio_group *group; in vfio_pin_pages() local
1909 group = vfio_group_get_from_dev(dev); in vfio_pin_pages()
1910 if (!group) in vfio_pin_pages()
1913 if (group->dev_counter > 1) { in vfio_pin_pages()
1918 ret = vfio_group_add_container_user(group); in vfio_pin_pages()
1922 container = group->container; in vfio_pin_pages()
1926 group->iommu_group, user_pfn, in vfio_pin_pages()
1931 vfio_group_try_dissolve_container(group); in vfio_pin_pages()
1934 vfio_group_put(group); in vfio_pin_pages()
1951 struct vfio_group *group; in vfio_unpin_pages() local
1961 group = vfio_group_get_from_dev(dev); in vfio_unpin_pages()
1962 if (!group) in vfio_unpin_pages()
1965 ret = vfio_group_add_container_user(group); in vfio_unpin_pages()
1969 container = group->container; in vfio_unpin_pages()
1977 vfio_group_try_dissolve_container(group); in vfio_unpin_pages()
1980 vfio_group_put(group); in vfio_unpin_pages()
1987 * VFIO group.
1991 * so as to prevent the VFIO group from disposal in the middle of the call.
1992 * But it can keep the reference to the VFIO group for several calls into
1994 * After finishing using of the VFIO group, the caller needs to release the
1995 * VFIO group by calling vfio_group_put_external_user().
1997 * @group [in] : VFIO group
2006 int vfio_group_pin_pages(struct vfio_group *group, in vfio_group_pin_pages() argument
2014 if (!group || !user_iova_pfn || !phys_pfn || !npage) in vfio_group_pin_pages()
2017 if (group->dev_counter > 1) in vfio_group_pin_pages()
2023 container = group->container; in vfio_group_pin_pages()
2027 group->iommu_group, user_iova_pfn, in vfio_group_pin_pages()
2037 * Unpin a set of guest IOVA PFNs for a VFIO group.
2041 * so as to prevent the VFIO group from disposal in the middle of the call.
2042 * But it can keep the reference to the VFIO group for several calls into
2044 * After finishing using of the VFIO group, the caller needs to release the
2045 * VFIO group by calling vfio_group_put_external_user().
2047 * @group [in] : vfio group
2054 int vfio_group_unpin_pages(struct vfio_group *group, in vfio_group_unpin_pages() argument
2061 if (!group || !user_iova_pfn || !npage) in vfio_group_unpin_pages()
2067 container = group->container; in vfio_group_unpin_pages()
2092 * so as to prevent the VFIO group from disposal in the middle of the call.
2093 * But it can keep the reference to the VFIO group for several calls into
2095 * After finishing using of the VFIO group, the caller needs to release the
2096 * VFIO group by calling vfio_group_put_external_user().
2098 * @group [in] : VFIO group
2105 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, in vfio_dma_rw() argument
2112 if (!group || !data || len <= 0) in vfio_dma_rw()
2115 container = group->container; in vfio_dma_rw()
2128 static int vfio_register_iommu_notifier(struct vfio_group *group, in vfio_register_iommu_notifier() argument
2136 ret = vfio_group_add_container_user(group); in vfio_register_iommu_notifier()
2140 container = group->container; in vfio_register_iommu_notifier()
2148 vfio_group_try_dissolve_container(group); in vfio_register_iommu_notifier()
2153 static int vfio_unregister_iommu_notifier(struct vfio_group *group, in vfio_unregister_iommu_notifier() argument
2160 ret = vfio_group_add_container_user(group); in vfio_unregister_iommu_notifier()
2164 container = group->container; in vfio_unregister_iommu_notifier()
2172 vfio_group_try_dissolve_container(group); in vfio_unregister_iommu_notifier()
2177 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) in vfio_group_set_kvm() argument
2179 group->kvm = kvm; in vfio_group_set_kvm()
2180 blocking_notifier_call_chain(&group->notifier, in vfio_group_set_kvm()
2185 static int vfio_register_group_notifier(struct vfio_group *group, in vfio_register_group_notifier() argument
2202 ret = vfio_group_add_container_user(group); in vfio_register_group_notifier()
2206 ret = blocking_notifier_chain_register(&group->notifier, nb); in vfio_register_group_notifier()
2212 if (!ret && set_kvm && group->kvm) in vfio_register_group_notifier()
2213 blocking_notifier_call_chain(&group->notifier, in vfio_register_group_notifier()
2214 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); in vfio_register_group_notifier()
2216 vfio_group_try_dissolve_container(group); in vfio_register_group_notifier()
2221 static int vfio_unregister_group_notifier(struct vfio_group *group, in vfio_unregister_group_notifier() argument
2226 ret = vfio_group_add_container_user(group); in vfio_unregister_group_notifier()
2230 ret = blocking_notifier_chain_unregister(&group->notifier, nb); in vfio_unregister_group_notifier()
2232 vfio_group_try_dissolve_container(group); in vfio_unregister_group_notifier()
2240 struct vfio_group *group; in vfio_register_notifier() local
2246 group = vfio_group_get_from_dev(dev); in vfio_register_notifier()
2247 if (!group) in vfio_register_notifier()
2252 ret = vfio_register_iommu_notifier(group, events, nb); in vfio_register_notifier()
2255 ret = vfio_register_group_notifier(group, events, nb); in vfio_register_notifier()
2261 vfio_group_put(group); in vfio_register_notifier()
2269 struct vfio_group *group; in vfio_unregister_notifier() local
2275 group = vfio_group_get_from_dev(dev); in vfio_unregister_notifier()
2276 if (!group) in vfio_unregister_notifier()
2281 ret = vfio_unregister_iommu_notifier(group, nb); in vfio_unregister_notifier()
2284 ret = vfio_unregister_group_notifier(group, nb); in vfio_unregister_notifier()
2290 vfio_group_put(group); in vfio_unregister_notifier()
2327 /* /dev/vfio/$GROUP */ in vfio_init()