• Home
  • Raw
  • Download

Lines Matching full:group

99 	struct vfio_group		*group;  member
117 * removes the device from the dummy group and cannot be nested.
121 struct iommu_group *group; in vfio_iommu_group_get() local
124 group = iommu_group_get(dev); in vfio_iommu_group_get()
128 * With noiommu enabled, an IOMMU group will be created for a device in vfio_iommu_group_get()
133 if (group || !noiommu || iommu_present(dev->bus)) in vfio_iommu_group_get()
134 return group; in vfio_iommu_group_get()
136 group = iommu_group_alloc(); in vfio_iommu_group_get()
137 if (IS_ERR(group)) in vfio_iommu_group_get()
140 iommu_group_set_name(group, "vfio-noiommu"); in vfio_iommu_group_get()
141 iommu_group_set_iommudata(group, &noiommu, NULL); in vfio_iommu_group_get()
142 ret = iommu_group_add_device(group, dev); in vfio_iommu_group_get()
144 iommu_group_put(group); in vfio_iommu_group_get()
149 * Where to taint? At this point we've added an IOMMU group for a in vfio_iommu_group_get()
157 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); in vfio_iommu_group_get()
160 return group; in vfio_iommu_group_get()
164 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) in vfio_iommu_group_put() argument
167 if (iommu_group_get_iommudata(group) == &noiommu) in vfio_iommu_group_put()
171 iommu_group_put(group); in vfio_iommu_group_put()
272 * Group minor allocation/free - both called with vfio.group_lock held
274 static int vfio_alloc_group_minor(struct vfio_group *group) in vfio_alloc_group_minor() argument
276 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); in vfio_alloc_group_minor()
286 static void vfio_group_get(struct vfio_group *group);
291 * it's freed via kref. Must support container/group/device being
312 static void vfio_group_unlock_and_free(struct vfio_group *group) in vfio_group_unlock_and_free() argument
317 * that the group is no longer in vfio.group_list. in vfio_group_unlock_and_free()
319 iommu_group_unregister_notifier(group->iommu_group, &group->nb); in vfio_group_unlock_and_free()
320 kfree(group); in vfio_group_unlock_and_free()
324 * Group objects - create, release, get, put, search
328 struct vfio_group *group, *tmp; in vfio_create_group() local
332 group = kzalloc(sizeof(*group), GFP_KERNEL); in vfio_create_group()
333 if (!group) in vfio_create_group()
336 kref_init(&group->kref); in vfio_create_group()
337 INIT_LIST_HEAD(&group->device_list); in vfio_create_group()
338 mutex_init(&group->device_lock); in vfio_create_group()
339 INIT_LIST_HEAD(&group->unbound_list); in vfio_create_group()
340 mutex_init(&group->unbound_lock); in vfio_create_group()
341 atomic_set(&group->container_users, 0); in vfio_create_group()
342 atomic_set(&group->opened, 0); in vfio_create_group()
343 init_waitqueue_head(&group->container_q); in vfio_create_group()
344 group->iommu_group = iommu_group; in vfio_create_group()
346 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); in vfio_create_group()
348 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_create_group()
350 group->nb.notifier_call = vfio_iommu_group_notifier; in vfio_create_group()
356 * do anything unless it can find the group in vfio.group_list, so in vfio_create_group()
359 ret = iommu_group_register_notifier(iommu_group, &group->nb); in vfio_create_group()
361 kfree(group); in vfio_create_group()
367 /* Did we race creating this group? */ in vfio_create_group()
371 vfio_group_unlock_and_free(group); in vfio_create_group()
376 minor = vfio_alloc_group_minor(group); in vfio_create_group()
378 vfio_group_unlock_and_free(group); in vfio_create_group()
384 group, "%s%d", group->noiommu ? "noiommu-" : "", in vfio_create_group()
388 vfio_group_unlock_and_free(group); in vfio_create_group()
392 group->minor = minor; in vfio_create_group()
393 group->dev = dev; in vfio_create_group()
395 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group()
399 return group; in vfio_create_group()
405 struct vfio_group *group = container_of(kref, struct vfio_group, kref); in vfio_group_release() local
407 struct iommu_group *iommu_group = group->iommu_group; in vfio_group_release()
409 WARN_ON(!list_empty(&group->device_list)); in vfio_group_release()
410 WARN_ON(group->notifier.head); in vfio_group_release()
413 &group->unbound_list, unbound_next) { in vfio_group_release()
418 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); in vfio_group_release()
419 list_del(&group->vfio_next); in vfio_group_release()
420 vfio_free_group_minor(group->minor); in vfio_group_release()
421 vfio_group_unlock_and_free(group); in vfio_group_release()
425 static void vfio_group_put(struct vfio_group *group) in vfio_group_put() argument
427 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); in vfio_group_put()
432 struct vfio_group *group; member
441 vfio_group_put(do_work->group); in vfio_group_put_bg()
445 static void vfio_group_schedule_put(struct vfio_group *group) in vfio_group_schedule_put() argument
454 do_work->group = group; in vfio_group_schedule_put()
458 /* Assume group_lock or group reference is held */
459 static void vfio_group_get(struct vfio_group *group) in vfio_group_get() argument
461 kref_get(&group->kref); in vfio_group_get()
466 * sure the group pointer is valid under lock and get a reference.
468 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) in vfio_group_try_get() argument
470 struct vfio_group *target = group; in vfio_group_try_get()
473 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_try_get()
474 if (group == target) { in vfio_group_try_get()
475 vfio_group_get(group); in vfio_group_try_get()
477 return group; in vfio_group_try_get()
488 struct vfio_group *group; in vfio_group_get_from_iommu() local
491 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_get_from_iommu()
492 if (group->iommu_group == iommu_group) { in vfio_group_get_from_iommu()
493 vfio_group_get(group); in vfio_group_get_from_iommu()
495 return group; in vfio_group_get_from_iommu()
505 struct vfio_group *group; in vfio_group_get_from_minor() local
508 group = idr_find(&vfio.group_idr, minor); in vfio_group_get_from_minor()
509 if (!group) { in vfio_group_get_from_minor()
513 vfio_group_get(group); in vfio_group_get_from_minor()
516 return group; in vfio_group_get_from_minor()
522 struct vfio_group *group; in vfio_group_get_from_dev() local
528 group = vfio_group_get_from_iommu(iommu_group); in vfio_group_get_from_dev()
531 return group; in vfio_group_get_from_dev()
538 struct vfio_device *vfio_group_create_device(struct vfio_group *group, in vfio_group_create_device() argument
551 device->group = group; in vfio_group_create_device()
556 /* No need to get group_lock, caller has group reference */ in vfio_group_create_device()
557 vfio_group_get(group); in vfio_group_create_device()
559 mutex_lock(&group->device_lock); in vfio_group_create_device()
560 list_add(&device->group_next, &group->device_list); in vfio_group_create_device()
561 mutex_unlock(&group->device_lock); in vfio_group_create_device()
570 struct vfio_group *group = device->group; in vfio_device_release() local
573 mutex_unlock(&group->device_lock); in vfio_device_release()
583 /* Device reference always implies a group reference */
586 struct vfio_group *group = device->group; in vfio_device_put() local
587 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); in vfio_device_put()
588 vfio_group_put(group); in vfio_device_put()
594 vfio_group_get(device->group); in vfio_device_get()
598 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, in vfio_group_get_device() argument
603 mutex_lock(&group->device_lock); in vfio_group_get_device()
604 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device()
607 mutex_unlock(&group->device_lock); in vfio_group_get_device()
611 mutex_unlock(&group->device_lock); in vfio_group_get_device()
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
624 * then all of the downstream devices will be part of the same IOMMU group as
647 * A vfio group is viable for use by userspace if all devices are in
656 * group. The second is to test if the device exists on the group
662 struct vfio_group *group = data; in vfio_dev_viable() local
668 mutex_lock(&group->unbound_lock); in vfio_dev_viable()
669 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { in vfio_dev_viable()
675 mutex_unlock(&group->unbound_lock); in vfio_dev_viable()
680 device = vfio_group_get_device(group, dev); in vfio_dev_viable()
692 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) in vfio_group_nb_add_dev() argument
697 device = vfio_group_get_device(group, dev); in vfio_group_nb_add_dev()
704 if (!atomic_read(&group->container_users)) in vfio_group_nb_add_dev()
708 WARN(1, "Device %s added to live group %d!\n", dev_name(dev), in vfio_group_nb_add_dev()
709 iommu_group_id(group->iommu_group)); in vfio_group_nb_add_dev()
714 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) in vfio_group_nb_verify() argument
716 /* We don't care what happens when the group isn't in use */ in vfio_group_nb_verify()
717 if (!atomic_read(&group->container_users)) in vfio_group_nb_verify()
720 return vfio_dev_viable(dev, group); in vfio_group_nb_verify()
726 struct vfio_group *group = container_of(nb, struct vfio_group, nb); in vfio_iommu_group_notifier() local
732 * risk racing a group being removed. Ignore spurious notifies. in vfio_iommu_group_notifier()
734 group = vfio_group_try_get(group); in vfio_iommu_group_notifier()
735 if (!group) in vfio_iommu_group_notifier()
740 vfio_group_nb_add_dev(group, dev); in vfio_iommu_group_notifier()
752 pr_debug("%s: Device %s, group %d binding to driver\n", in vfio_iommu_group_notifier()
754 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
757 pr_debug("%s: Device %s, group %d bound to driver %s\n", in vfio_iommu_group_notifier()
759 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
760 BUG_ON(vfio_group_nb_verify(group, dev)); in vfio_iommu_group_notifier()
763 pr_debug("%s: Device %s, group %d unbinding from driver %s\n", in vfio_iommu_group_notifier()
765 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
768 pr_debug("%s: Device %s, group %d unbound from driver\n", in vfio_iommu_group_notifier()
770 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
772 * XXX An unbound device in a live group is ok, but we'd in vfio_iommu_group_notifier()
779 mutex_lock(&group->unbound_lock); in vfio_iommu_group_notifier()
781 &group->unbound_list, unbound_next) { in vfio_iommu_group_notifier()
788 mutex_unlock(&group->unbound_lock); in vfio_iommu_group_notifier()
793 * If we're the last reference to the group, the group will be in vfio_iommu_group_notifier()
794 * released, which includes unregistering the iommu group notifier. in vfio_iommu_group_notifier()
799 vfio_group_schedule_put(group); in vfio_iommu_group_notifier()
810 struct vfio_group *group; in vfio_add_group_dev() local
817 group = vfio_group_get_from_iommu(iommu_group); in vfio_add_group_dev()
818 if (!group) { in vfio_add_group_dev()
819 group = vfio_create_group(iommu_group); in vfio_add_group_dev()
820 if (IS_ERR(group)) { in vfio_add_group_dev()
822 return PTR_ERR(group); in vfio_add_group_dev()
832 device = vfio_group_get_device(group, dev); in vfio_add_group_dev()
834 WARN(1, "Device %s already exists on group %d\n", in vfio_add_group_dev()
837 vfio_group_put(group); in vfio_add_group_dev()
841 device = vfio_group_create_device(group, dev, ops, device_data); in vfio_add_group_dev()
843 vfio_group_put(group); in vfio_add_group_dev()
852 vfio_group_put(group); in vfio_add_group_dev()
867 struct vfio_group *group; in vfio_device_get_from_dev() local
870 group = vfio_group_get_from_dev(dev); in vfio_device_get_from_dev()
871 if (!group) in vfio_device_get_from_dev()
874 device = vfio_group_get_device(group, dev); in vfio_device_get_from_dev()
875 vfio_group_put(group); in vfio_device_get_from_dev()
881 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
886 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
887 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
894 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
915 struct vfio_group *group = device->group; in vfio_del_group_dev() local
922 * The group exists so long as we have a device reference. Get in vfio_del_group_dev()
923 * a group reference and use it to scan for the device going away. in vfio_del_group_dev()
925 vfio_group_get(group); in vfio_del_group_dev()
928 * When the device is removed from the group, the group suddenly in vfio_del_group_dev()
930 * completes), but it's not present in the group. This is bad news in vfio_del_group_dev()
931 * for any external users that need to re-acquire a group reference in vfio_del_group_dev()
939 mutex_lock(&group->unbound_lock); in vfio_del_group_dev()
940 list_add(&unbound->unbound_next, &group->unbound_list); in vfio_del_group_dev()
941 mutex_unlock(&group->unbound_lock); in vfio_del_group_dev()
948 * If the device is still present in the group after the above in vfio_del_group_dev()
958 device = vfio_group_get_device(group, dev); in vfio_del_group_dev()
985 * In order to support multiple devices per group, devices can be in vfio_del_group_dev()
986 * plucked from the group while other devices in the group are still in vfio_del_group_dev()
987 * in use. The container persists with this group and those remaining in vfio_del_group_dev()
989 * by binding this device to another driver while the group is still in in vfio_del_group_dev()
991 * or potentially the only, device in the group there can be no other in vfio_del_group_dev()
992 * in-use devices in the group. The user has done their due diligence in vfio_del_group_dev()
994 * we need to make sure the group is detached from the container. in vfio_del_group_dev()
998 if (list_empty(&group->device_list)) in vfio_del_group_dev()
999 wait_event(group->container_q, !group->container); in vfio_del_group_dev()
1001 vfio_group_put(group); in vfio_del_group_dev()
1067 struct vfio_group *group; in __vfio_container_attach_groups() local
1070 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
1071 ret = driver->ops->attach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1079 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
1081 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1097 * the group can be assigned to specific users. Therefore, only by in vfio_ioctl_set_iommu()
1098 * adding a group to a container does the user get the privilege of in vfio_ioctl_set_iommu()
1291 * VFIO Group fd, /dev/vfio/$GROUP
1293 static void __vfio_group_unset_container(struct vfio_group *group) in __vfio_group_unset_container() argument
1295 struct vfio_container *container = group->container; in __vfio_group_unset_container()
1303 group->iommu_group); in __vfio_group_unset_container()
1305 group->container = NULL; in __vfio_group_unset_container()
1306 wake_up(&group->container_q); in __vfio_group_unset_container()
1307 list_del(&group->container_next); in __vfio_group_unset_container()
1309 /* Detaching the last group deprivileges a container, remove iommu */ in __vfio_group_unset_container()
1325 * the group, we know that still exists, therefore the only valid
1328 static int vfio_group_unset_container(struct vfio_group *group) in vfio_group_unset_container() argument
1330 int users = atomic_cmpxchg(&group->container_users, 1, 0); in vfio_group_unset_container()
1337 __vfio_group_unset_container(group); in vfio_group_unset_container()
1344 * implicitly removes the group from the container. That is, if the
1345 * group file descriptor is closed, as well as any device file descriptors,
1346 * the group is free.
1348 static void vfio_group_try_dissolve_container(struct vfio_group *group) in vfio_group_try_dissolve_container() argument
1350 if (0 == atomic_dec_if_positive(&group->container_users)) in vfio_group_try_dissolve_container()
1351 __vfio_group_unset_container(group); in vfio_group_try_dissolve_container()
1354 static int vfio_group_set_container(struct vfio_group *group, int container_fd) in vfio_group_set_container() argument
1361 if (atomic_read(&group->container_users)) in vfio_group_set_container()
1364 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_set_container()
1384 container->noiommu != group->noiommu) { in vfio_group_set_container()
1392 group->iommu_group); in vfio_group_set_container()
1397 group->container = container; in vfio_group_set_container()
1398 container->noiommu = group->noiommu; in vfio_group_set_container()
1399 list_add(&group->container_next, &container->group_list); in vfio_group_set_container()
1401 /* Get a reference on the container and mark a user within the group */ in vfio_group_set_container()
1403 atomic_inc(&group->container_users); in vfio_group_set_container()
1411 static bool vfio_group_viable(struct vfio_group *group) in vfio_group_viable() argument
1413 return (iommu_group_for_each_dev(group->iommu_group, in vfio_group_viable()
1414 group, vfio_dev_viable) == 0); in vfio_group_viable()
1417 static int vfio_group_add_container_user(struct vfio_group *group) in vfio_group_add_container_user() argument
1419 if (!atomic_inc_not_zero(&group->container_users)) in vfio_group_add_container_user()
1422 if (group->noiommu) { in vfio_group_add_container_user()
1423 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1426 if (!group->container->iommu_driver || !vfio_group_viable(group)) { in vfio_group_add_container_user()
1427 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1436 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) in vfio_group_get_device_fd() argument
1442 if (0 == atomic_read(&group->container_users) || in vfio_group_get_device_fd()
1443 !group->container->iommu_driver || !vfio_group_viable(group)) in vfio_group_get_device_fd()
1446 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_get_device_fd()
1449 device = vfio_device_get_from_name(group, buf); in vfio_group_get_device_fd()
1487 atomic_inc(&group->container_users); in vfio_group_get_device_fd()
1491 if (group->noiommu) in vfio_group_get_device_fd()
1501 struct vfio_group *group = filep->private_data; in vfio_group_fops_unl_ioctl() local
1520 if (vfio_group_viable(group)) in vfio_group_fops_unl_ioctl()
1523 if (group->container) in vfio_group_fops_unl_ioctl()
1542 ret = vfio_group_set_container(group, fd); in vfio_group_fops_unl_ioctl()
1546 ret = vfio_group_unset_container(group); in vfio_group_fops_unl_ioctl()
1556 ret = vfio_group_get_device_fd(group, buf); in vfio_group_fops_unl_ioctl()
1576 struct vfio_group *group; in vfio_group_fops_open() local
1579 group = vfio_group_get_from_minor(iminor(inode)); in vfio_group_fops_open()
1580 if (!group) in vfio_group_fops_open()
1583 if (group->noiommu && !capable(CAP_SYS_RAWIO)) { in vfio_group_fops_open()
1584 vfio_group_put(group); in vfio_group_fops_open()
1588 /* Do we need multiple instances of the group open? Seems not. */ in vfio_group_fops_open()
1589 opened = atomic_cmpxchg(&group->opened, 0, 1); in vfio_group_fops_open()
1591 vfio_group_put(group); in vfio_group_fops_open()
1596 if (group->container) { in vfio_group_fops_open()
1597 atomic_dec(&group->opened); in vfio_group_fops_open()
1598 vfio_group_put(group); in vfio_group_fops_open()
1603 if (WARN_ON(group->notifier.head)) in vfio_group_fops_open()
1604 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_group_fops_open()
1606 filep->private_data = group; in vfio_group_fops_open()
1613 struct vfio_group *group = filep->private_data; in vfio_group_fops_release() local
1617 vfio_group_try_dissolve_container(group); in vfio_group_fops_release()
1619 atomic_dec(&group->opened); in vfio_group_fops_release()
1621 vfio_group_put(group); in vfio_group_fops_release()
1645 vfio_group_try_dissolve_container(device->group); in vfio_device_fops_release()
1723 * - attaching group(s) to it;
1728 * 2. User space passes a group fd to an external user.
1731 * - the group is initialized;
1735 * the VFIO group from disposal before KVM exits.
1741 * vfio_group_put_external_user() to release the VFIO group.
1746 struct vfio_group *group = filep->private_data; in vfio_group_get_external_user() local
1752 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user()
1756 vfio_group_get(group); in vfio_group_get_external_user()
1758 return group; in vfio_group_get_external_user()
1762 void vfio_group_put_external_user(struct vfio_group *group) in vfio_group_put_external_user() argument
1764 vfio_group_try_dissolve_container(group); in vfio_group_put_external_user()
1765 vfio_group_put(group); in vfio_group_put_external_user()
1772 struct vfio_group *group = filep->private_data; in vfio_external_group_match_file() local
1774 return (filep->f_op == &vfio_group_fops) && (group == test_group); in vfio_external_group_match_file()
1778 int vfio_external_user_iommu_id(struct vfio_group *group) in vfio_external_user_iommu_id() argument
1780 return iommu_group_id(group->iommu_group); in vfio_external_user_iommu_id()
1784 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) in vfio_external_check_extension() argument
1786 return vfio_ioctl_check_extension(group->container, arg); in vfio_external_check_extension()
1923 struct vfio_group *group; in vfio_pin_pages() local
1933 group = vfio_group_get_from_dev(dev); in vfio_pin_pages()
1934 if (!group) in vfio_pin_pages()
1937 ret = vfio_group_add_container_user(group); in vfio_pin_pages()
1941 container = group->container; in vfio_pin_pages()
1949 vfio_group_try_dissolve_container(group); in vfio_pin_pages()
1952 vfio_group_put(group); in vfio_pin_pages()
1969 struct vfio_group *group; in vfio_unpin_pages() local
1979 group = vfio_group_get_from_dev(dev); in vfio_unpin_pages()
1980 if (!group) in vfio_unpin_pages()
1983 ret = vfio_group_add_container_user(group); in vfio_unpin_pages()
1987 container = group->container; in vfio_unpin_pages()
1995 vfio_group_try_dissolve_container(group); in vfio_unpin_pages()
1998 vfio_group_put(group); in vfio_unpin_pages()
2003 static int vfio_register_iommu_notifier(struct vfio_group *group, in vfio_register_iommu_notifier() argument
2011 ret = vfio_group_add_container_user(group); in vfio_register_iommu_notifier()
2015 container = group->container; in vfio_register_iommu_notifier()
2023 vfio_group_try_dissolve_container(group); in vfio_register_iommu_notifier()
2028 static int vfio_unregister_iommu_notifier(struct vfio_group *group, in vfio_unregister_iommu_notifier() argument
2035 ret = vfio_group_add_container_user(group); in vfio_unregister_iommu_notifier()
2039 container = group->container; in vfio_unregister_iommu_notifier()
2047 vfio_group_try_dissolve_container(group); in vfio_unregister_iommu_notifier()
2052 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) in vfio_group_set_kvm() argument
2054 group->kvm = kvm; in vfio_group_set_kvm()
2055 blocking_notifier_call_chain(&group->notifier, in vfio_group_set_kvm()
2060 static int vfio_register_group_notifier(struct vfio_group *group, in vfio_register_group_notifier() argument
2077 ret = vfio_group_add_container_user(group); in vfio_register_group_notifier()
2081 ret = blocking_notifier_chain_register(&group->notifier, nb); in vfio_register_group_notifier()
2087 if (!ret && set_kvm && group->kvm) in vfio_register_group_notifier()
2088 blocking_notifier_call_chain(&group->notifier, in vfio_register_group_notifier()
2089 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); in vfio_register_group_notifier()
2091 vfio_group_try_dissolve_container(group); in vfio_register_group_notifier()
2096 static int vfio_unregister_group_notifier(struct vfio_group *group, in vfio_unregister_group_notifier() argument
2101 ret = vfio_group_add_container_user(group); in vfio_unregister_group_notifier()
2105 ret = blocking_notifier_chain_unregister(&group->notifier, nb); in vfio_unregister_group_notifier()
2107 vfio_group_try_dissolve_container(group); in vfio_unregister_group_notifier()
2115 struct vfio_group *group; in vfio_register_notifier() local
2121 group = vfio_group_get_from_dev(dev); in vfio_register_notifier()
2122 if (!group) in vfio_register_notifier()
2127 ret = vfio_register_iommu_notifier(group, events, nb); in vfio_register_notifier()
2130 ret = vfio_register_group_notifier(group, events, nb); in vfio_register_notifier()
2136 vfio_group_put(group); in vfio_register_notifier()
2144 struct vfio_group *group; in vfio_unregister_notifier() local
2150 group = vfio_group_get_from_dev(dev); in vfio_unregister_notifier()
2151 if (!group) in vfio_unregister_notifier()
2156 ret = vfio_unregister_iommu_notifier(group, nb); in vfio_unregister_notifier()
2159 ret = vfio_unregister_group_notifier(group, nb); in vfio_unregister_notifier()
2165 vfio_group_put(group); in vfio_unregister_notifier()
2203 /* /dev/vfio/$GROUP */ in vfio_init()