Lines Matching full:group
75 #define for_each_group_device(group, pos) \ argument
76 list_for_each_entry(pos, &(group)->devices, list)
80 ssize_t (*show)(struct iommu_group *group, char *buf);
81 ssize_t (*store)(struct iommu_group *group,
104 struct iommu_group *group);
110 static int __iommu_device_set_domain(struct iommu_group *group,
114 static int __iommu_group_set_domain_internal(struct iommu_group *group,
117 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
120 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
122 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument
126 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); in __iommu_group_set_domain_nofail()
129 static int iommu_setup_default_domain(struct iommu_group *group,
133 static ssize_t iommu_group_store_type(struct iommu_group *group,
135 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
137 static void __iommu_group_free_device(struct iommu_group *group,
391 struct iommu_group *group; in iommu_init_device() local
412 group = ops->device_group(dev); in iommu_init_device()
413 if (WARN_ON_ONCE(group == NULL)) in iommu_init_device()
414 group = ERR_PTR(-EINVAL); in iommu_init_device()
415 if (IS_ERR(group)) { in iommu_init_device()
416 ret = PTR_ERR(group); in iommu_init_device()
419 dev->iommu_group = group; in iommu_init_device()
441 struct iommu_group *group = dev->iommu_group; in iommu_deinit_device() local
444 lockdep_assert_held(&group->mutex); in iommu_deinit_device()
450 * If there are still other devices in the group they are not effected in iommu_deinit_device()
461 * If this is the last driver to use the group then we must free the in iommu_deinit_device()
464 if (list_empty(&group->devices)) { in iommu_deinit_device()
465 if (group->default_domain) { in iommu_deinit_device()
466 iommu_domain_free(group->default_domain); in iommu_deinit_device()
467 group->default_domain = NULL; in iommu_deinit_device()
469 if (group->blocking_domain) { in iommu_deinit_device()
470 iommu_domain_free(group->blocking_domain); in iommu_deinit_device()
471 group->blocking_domain = NULL; in iommu_deinit_device()
473 group->domain = NULL; in iommu_deinit_device()
487 struct iommu_group *group; in __iommu_probe_device() local
502 /* Device is probed already if in a group */ in __iommu_probe_device()
510 group = dev->iommu_group; in __iommu_probe_device()
511 gdev = iommu_group_alloc_device(group, dev); in __iommu_probe_device()
512 mutex_lock(&group->mutex); in __iommu_probe_device()
522 list_add_tail(&gdev->list, &group->devices); in __iommu_probe_device()
523 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
524 if (group->default_domain) in __iommu_probe_device()
525 iommu_create_device_direct_mappings(group->default_domain, dev); in __iommu_probe_device()
526 if (group->domain) { in __iommu_probe_device()
527 ret = __iommu_device_set_domain(group, dev, group->domain, 0); in __iommu_probe_device()
530 } else if (!group->default_domain && !group_list) { in __iommu_probe_device()
531 ret = iommu_setup_default_domain(group, 0); in __iommu_probe_device()
534 } else if (!group->default_domain) { in __iommu_probe_device()
540 if (list_empty(&group->entry)) in __iommu_probe_device()
541 list_add_tail(&group->entry, group_list); in __iommu_probe_device()
543 mutex_unlock(&group->mutex); in __iommu_probe_device()
552 __iommu_group_free_device(group, gdev); in __iommu_probe_device()
555 mutex_unlock(&group->mutex); in __iommu_probe_device()
556 iommu_group_put(group); in __iommu_probe_device()
591 static void __iommu_group_free_device(struct iommu_group *group, in __iommu_group_free_device() argument
596 sysfs_remove_link(group->devices_kobj, grp_dev->name); in __iommu_group_free_device()
599 trace_remove_device_from_group(group->id, dev); in __iommu_group_free_device()
602 * If the group has become empty then ownership must have been in __iommu_group_free_device()
606 if (list_empty(&group->devices)) in __iommu_group_free_device()
607 WARN_ON(group->owner_cnt || in __iommu_group_free_device()
608 group->domain != group->default_domain); in __iommu_group_free_device()
617 struct iommu_group *group = dev->iommu_group; in __iommu_group_remove_device() local
620 mutex_lock(&group->mutex); in __iommu_group_remove_device()
621 for_each_group_device(group, device) { in __iommu_group_remove_device()
626 __iommu_group_free_device(group, device); in __iommu_group_remove_device()
633 mutex_unlock(&group->mutex); in __iommu_group_remove_device()
639 iommu_group_put(group); in __iommu_group_remove_device()
644 struct iommu_group *group = dev->iommu_group; in iommu_release_device() local
646 if (group) in iommu_release_device()
693 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_show() local
697 ret = attr->show(group, buf); in iommu_group_attr_show()
706 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_store() local
710 ret = attr->store(group, buf, count); in iommu_group_attr_store()
719 static int iommu_group_create_file(struct iommu_group *group, in iommu_group_create_file() argument
722 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
725 static void iommu_group_remove_file(struct iommu_group *group, in iommu_group_remove_file() argument
728 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
731 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) in iommu_group_show_name() argument
733 return sysfs_emit(buf, "%s\n", group->name); in iommu_group_show_name()
812 int iommu_get_group_resv_regions(struct iommu_group *group, in iommu_get_group_resv_regions() argument
818 mutex_lock(&group->mutex); in iommu_get_group_resv_regions()
819 for_each_group_device(group, device) { in iommu_get_group_resv_regions()
836 mutex_unlock(&group->mutex); in iommu_get_group_resv_regions()
841 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, in iommu_group_show_resv_regions() argument
849 iommu_get_group_resv_regions(group, &group_resv_regions); in iommu_group_show_resv_regions()
863 static ssize_t iommu_group_show_type(struct iommu_group *group, in iommu_group_show_type() argument
868 mutex_lock(&group->mutex); in iommu_group_show_type()
869 if (group->default_domain) { in iommu_group_show_type()
870 switch (group->default_domain->type) { in iommu_group_show_type()
888 mutex_unlock(&group->mutex); in iommu_group_show_type()
903 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_release() local
905 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
907 if (group->iommu_data_release) in iommu_group_release()
908 group->iommu_data_release(group->iommu_data); in iommu_group_release()
910 ida_free(&iommu_group_ida, group->id); in iommu_group_release()
913 WARN_ON(group->default_domain); in iommu_group_release()
914 WARN_ON(group->blocking_domain); in iommu_group_release()
916 kfree(group->name); in iommu_group_release()
917 kfree(group); in iommu_group_release()
926 * iommu_group_alloc - Allocate a new group
929 * group. The iommu group represents the minimum granularity of the iommu.
931 * group in order to hold the group until devices are added. Use
933 * group to be automatically reclaimed once it has no devices or external
938 struct iommu_group *group; in iommu_group_alloc() local
941 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_group_alloc()
942 if (!group) in iommu_group_alloc()
945 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
946 mutex_init(&group->mutex); in iommu_group_alloc()
947 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
948 INIT_LIST_HEAD(&group->entry); in iommu_group_alloc()
949 xa_init(&group->pasid_array); in iommu_group_alloc()
953 kfree(group); in iommu_group_alloc()
956 group->id = ret; in iommu_group_alloc()
958 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
959 NULL, "%d", group->id); in iommu_group_alloc()
961 kobject_put(&group->kobj); in iommu_group_alloc()
965 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
966 if (!group->devices_kobj) { in iommu_group_alloc()
967 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
972 * The devices_kobj holds a reference on the group kobject, so in iommu_group_alloc()
973 * as long as that exists so will the group. We can therefore in iommu_group_alloc()
976 kobject_put(&group->kobj); in iommu_group_alloc()
978 ret = iommu_group_create_file(group, in iommu_group_alloc()
981 kobject_put(group->devices_kobj); in iommu_group_alloc()
985 ret = iommu_group_create_file(group, &iommu_group_attr_type); in iommu_group_alloc()
987 kobject_put(group->devices_kobj); in iommu_group_alloc()
991 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
993 return group; in iommu_group_alloc()
998 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
999 * @group: the group
1001 * iommu drivers can store data in the group for use when doing iommu
1003 * should hold a group reference.
1005 void *iommu_group_get_iommudata(struct iommu_group *group) in iommu_group_get_iommudata() argument
1007 return group->iommu_data; in iommu_group_get_iommudata()
1012 * iommu_group_set_iommudata - set iommu_data for a group
1013 * @group: the group
1017 * iommu drivers can store data in the group for use when doing iommu
1019 * the group has been allocated. Caller should hold a group reference.
1021 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, in iommu_group_set_iommudata() argument
1024 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
1025 group->iommu_data_release = release; in iommu_group_set_iommudata()
1030 * iommu_group_set_name - set name for a group
1031 * @group: the group
1034 * Allow iommu driver to set a name for a group. When set it will
1035 * appear in a name attribute file under the group in sysfs.
1037 int iommu_group_set_name(struct iommu_group *group, const char *name) in iommu_group_set_name() argument
1041 if (group->name) { in iommu_group_set_name()
1042 iommu_group_remove_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1043 kfree(group->name); in iommu_group_set_name()
1044 group->name = NULL; in iommu_group_set_name()
1049 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
1050 if (!group->name) in iommu_group_set_name()
1053 ret = iommu_group_create_file(group, &iommu_group_attr_name); in iommu_group_set_name()
1055 kfree(group->name); in iommu_group_set_name()
1056 group->name = NULL; in iommu_group_set_name()
1131 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, in iommu_group_alloc_device() argument
1143 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_alloc_device()
1154 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_alloc_device()
1170 trace_add_device_to_group(group->id, dev); in iommu_group_alloc_device()
1172 dev_info(dev, "Adding to iommu group %d\n", group->id); in iommu_group_alloc_device()
1182 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); in iommu_group_alloc_device()
1187 * iommu_group_add_device - add a device to an iommu group
1188 * @group: the group into which to add the device (reference should be held)
1192 * group. Adding a device increments the group reference count.
1194 int iommu_group_add_device(struct iommu_group *group, struct device *dev) in iommu_group_add_device() argument
1198 gdev = iommu_group_alloc_device(group, dev); in iommu_group_add_device()
1202 iommu_group_ref_get(group); in iommu_group_add_device()
1203 dev->iommu_group = group; in iommu_group_add_device()
1205 mutex_lock(&group->mutex); in iommu_group_add_device()
1206 list_add_tail(&gdev->list, &group->devices); in iommu_group_add_device()
1207 mutex_unlock(&group->mutex); in iommu_group_add_device()
1213 * iommu_group_remove_device - remove a device from it's current group
1217 * it's current group. This decrements the iommu group reference count.
1221 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device() local
1223 if (!group) in iommu_group_remove_device()
1226 dev_info(dev, "Removing from iommu group %d\n", group->id); in iommu_group_remove_device()
1233 * iommu_group_for_each_dev - iterate over each device in the group
1234 * @group: the group
1238 * This function is called by group users to iterate over group devices.
1239 * Callers should hold a reference count to the group during callback.
1240 * The group->mutex is held across callbacks, which will block calls to
1243 int iommu_group_for_each_dev(struct iommu_group *group, void *data, in iommu_group_for_each_dev() argument
1249 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
1250 for_each_group_device(group, device) { in iommu_group_for_each_dev()
1255 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
1262 * iommu_group_get - Return the group for a device and increment reference
1263 * @dev: get the group that this device belongs to
1265 * This function is called by iommu drivers and users to get the group
1266 * for the specified device. If found, the group is returned and the group
1271 struct iommu_group *group = dev->iommu_group; in iommu_group_get() local
1273 if (group) in iommu_group_get()
1274 kobject_get(group->devices_kobj); in iommu_group_get()
1276 return group; in iommu_group_get()
1281 * iommu_group_ref_get - Increment reference on a group
1282 * @group: the group to use, must not be NULL
1285 * existing group. Returns the given group for convenience.
1287 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) in iommu_group_ref_get() argument
1289 kobject_get(group->devices_kobj); in iommu_group_ref_get()
1290 return group; in iommu_group_ref_get()
1295 * iommu_group_put - Decrement group reference
1296 * @group: the group to use
1299 * iommu group. Once the reference count is zero, the group is released.
1301 void iommu_group_put(struct iommu_group *group) in iommu_group_put() argument
1303 if (group) in iommu_group_put()
1304 kobject_put(group->devices_kobj); in iommu_group_put()
1493 * matched using the group ID, the PASID valid bit and the PASID in iommu_page_response()
1494 * value. Otherwise only the group ID matches request and in iommu_page_response()
1520 * iommu_group_id - Return ID for a group
1521 * @group: the group to ID
1523 * Return the unique ID for the group matching the sysfs group number.
1525 int iommu_group_id(struct iommu_group *group) in iommu_group_id() argument
1527 return group->id; in iommu_group_id()
1548 * that may already have a group.
1554 struct iommu_group *group; in get_pci_function_alias_group() local
1565 group = get_pci_alias_group(tmp, devfns); in get_pci_function_alias_group()
1566 if (group) { in get_pci_function_alias_group()
1568 return group; in get_pci_function_alias_group()
1588 struct iommu_group *group; in get_pci_alias_group() local
1593 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
1594 if (group) in get_pci_alias_group()
1595 return group; in get_pci_alias_group()
1603 group = get_pci_alias_group(tmp, devfns); in get_pci_alias_group()
1604 if (group) { in get_pci_alias_group()
1606 return group; in get_pci_alias_group()
1609 group = get_pci_function_alias_group(tmp, devfns); in get_pci_alias_group()
1610 if (group) { in get_pci_alias_group()
1612 return group; in get_pci_alias_group()
1622 struct iommu_group *group; member
1627 * the IOMMU group if we find one along the way.
1634 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
1636 return data->group != NULL; in get_pci_alias_or_group()
1641 * iommu-group per device.
1651 * to find or create an IOMMU group for a device.
1658 struct iommu_group *group = NULL; in pci_device_group() local
1666 * be aliased due to topology in order to have its own IOMMU group. in pci_device_group()
1668 * group, use it. in pci_device_group()
1671 return data.group; in pci_device_group()
1679 * group, use it. in pci_device_group()
1690 group = iommu_group_get(&pdev->dev); in pci_device_group()
1691 if (group) in pci_device_group()
1692 return group; in pci_device_group()
1697 * device or another device aliases us, use the same group. in pci_device_group()
1699 group = get_pci_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1700 if (group) in pci_device_group()
1701 return group; in pci_device_group()
1708 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
1709 if (group) in pci_device_group()
1710 return group; in pci_device_group()
1712 /* No shared group found, allocate new */ in pci_device_group()
1717 /* Get the IOMMU group for device on fsl-mc bus */
1721 struct iommu_group *group; in fsl_mc_device_group() local
1723 group = iommu_group_get(cont_dev); in fsl_mc_device_group()
1724 if (!group) in fsl_mc_device_group()
1725 group = iommu_group_alloc(); in fsl_mc_device_group()
1726 return group; in fsl_mc_device_group()
1745 struct iommu_group *group, int req_type) in __iommu_group_alloc_default_domain() argument
1747 if (group->default_domain && group->default_domain->type == req_type) in __iommu_group_alloc_default_domain()
1748 return group->default_domain; in __iommu_group_alloc_default_domain()
1757 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) in iommu_group_alloc_default_domain() argument
1760 list_first_entry(&group->devices, struct group_device, list) in iommu_group_alloc_default_domain()
1764 lockdep_assert_held(&group->mutex); in iommu_group_alloc_default_domain()
1767 return __iommu_group_alloc_default_domain(bus, group, req_type); in iommu_group_alloc_default_domain()
1770 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); in iommu_group_alloc_default_domain()
1777 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); in iommu_group_alloc_default_domain()
1781 …pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_D… in iommu_group_alloc_default_domain()
1782 iommu_def_domain_type, group->name); in iommu_group_alloc_default_domain()
1786 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) in iommu_group_default_domain() argument
1788 return group->default_domain; in iommu_group_default_domain()
1824 static int iommu_get_default_domain_type(struct iommu_group *group, in iommu_get_default_domain_type() argument
1831 lockdep_assert_held(&group->mutex); in iommu_get_default_domain_type()
1833 for_each_group_device(group, gdev) { in iommu_get_default_domain_type()
1847 …"Device needs domain type %s, but device %s in the same iommu group requires type %s - using defau… in iommu_get_default_domain_type()
1869 struct iommu_group *group, *next; in bus_iommu_probe() local
1877 list_for_each_entry_safe(group, next, &group_list, entry) { in bus_iommu_probe()
1880 mutex_lock(&group->mutex); in bus_iommu_probe()
1883 list_del_init(&group->entry); in bus_iommu_probe()
1887 * that the cross-group default domain type and the setup of the in bus_iommu_probe()
1890 ret = iommu_setup_default_domain(group, 0); in bus_iommu_probe()
1892 mutex_unlock(&group->mutex); in bus_iommu_probe()
1895 mutex_unlock(&group->mutex); in bus_iommu_probe()
1901 * to take group->mutex, resulting in a deadlock. in bus_iommu_probe()
1903 for_each_group_device(group, gdev) in bus_iommu_probe()
1941 * for a group
1942 * @group: Group to query
1945 * msi_device_has_isolated_msi() for devices in a group. However nothing
1949 bool iommu_group_has_isolated_msi(struct iommu_group *group) in iommu_group_has_isolated_msi() argument
1954 mutex_lock(&group->mutex); in iommu_group_has_isolated_msi()
1955 for_each_group_device(group, group_dev) in iommu_group_has_isolated_msi()
1957 mutex_unlock(&group->mutex); in iommu_group_has_isolated_msi()
2032 * Put the group's domain back to the appropriate core-owned domain - either the
2035 static void __iommu_group_set_core_domain(struct iommu_group *group) in __iommu_group_set_core_domain() argument
2039 if (group->owner) in __iommu_group_set_core_domain()
2040 new_domain = group->blocking_domain; in __iommu_group_set_core_domain()
2042 new_domain = group->default_domain; in __iommu_group_set_core_domain()
2044 __iommu_group_set_domain_nofail(group, new_domain); in __iommu_group_set_core_domain()
2077 struct iommu_group *group; in iommu_attach_device() local
2080 group = iommu_group_get(dev); in iommu_attach_device()
2081 if (!group) in iommu_attach_device()
2085 * Lock the group to make sure the device-count doesn't in iommu_attach_device()
2088 mutex_lock(&group->mutex); in iommu_attach_device()
2090 if (list_count_nodes(&group->devices) != 1) in iommu_attach_device()
2093 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2096 mutex_unlock(&group->mutex); in iommu_attach_device()
2097 iommu_group_put(group); in iommu_attach_device()
2113 struct iommu_group *group; in iommu_detach_device() local
2115 group = iommu_group_get(dev); in iommu_detach_device()
2116 if (!group) in iommu_detach_device()
2119 mutex_lock(&group->mutex); in iommu_detach_device()
2120 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2121 WARN_ON(list_count_nodes(&group->devices) != 1)) in iommu_detach_device()
2123 __iommu_group_set_core_domain(group); in iommu_detach_device()
2126 mutex_unlock(&group->mutex); in iommu_detach_device()
2127 iommu_group_put(group); in iommu_detach_device()
2134 struct iommu_group *group; in iommu_get_domain_for_dev() local
2136 group = iommu_group_get(dev); in iommu_get_domain_for_dev()
2137 if (!group) in iommu_get_domain_for_dev()
2140 domain = group->domain; in iommu_get_domain_for_dev()
2142 iommu_group_put(group); in iommu_get_domain_for_dev()
2150 * guarantees that the group and its default domain are valid and correct.
2158 struct iommu_group *group) in __iommu_attach_group() argument
2160 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2161 group->domain != group->blocking_domain) in __iommu_attach_group()
2164 return __iommu_group_set_domain(group, domain); in __iommu_attach_group()
2168 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2170 * @group: IOMMU group that will be attached
2176 * the group. In this case attaching a different domain to the
2177 * group may succeed.
2179 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2183 mutex_lock(&group->mutex); in iommu_attach_group()
2184 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2185 mutex_unlock(&group->mutex); in iommu_attach_group()
2192 * iommu_group_replace_domain - replace the domain that a group is attached to
2194 * @group: IOMMU group that will be attached to the new domain
2196 * This API allows the group to switch domains without being forced to go to
2202 int iommu_group_replace_domain(struct iommu_group *group, in iommu_group_replace_domain() argument
2210 mutex_lock(&group->mutex); in iommu_group_replace_domain()
2211 ret = __iommu_group_set_domain(group, new_domain); in iommu_group_replace_domain()
2212 mutex_unlock(&group->mutex); in iommu_group_replace_domain()
2217 static int __iommu_device_set_domain(struct iommu_group *group, in __iommu_device_set_domain() argument
2233 new_domain == group->blocking_domain)) { in __iommu_device_set_domain()
2240 if (new_domain == group->default_domain) in __iommu_device_set_domain()
2253 group->blocking_domain && in __iommu_device_set_domain()
2254 group->blocking_domain != new_domain) in __iommu_device_set_domain()
2255 __iommu_attach_device(group->blocking_domain, dev); in __iommu_device_set_domain()
2262 * If 0 is returned the group's domain is new_domain. If an error is returned
2263 * then the group's domain will be set back to the existing domain unless
2264 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2270 * devices in a group. Ideally we'd have a single device which represents the
2271 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2273 * members, but we wish to group them at a higher level (ex. untrusted
2276 static int __iommu_group_set_domain_internal(struct iommu_group *group, in __iommu_group_set_domain_internal() argument
2285 lockdep_assert_held(&group->mutex); in __iommu_group_set_domain_internal()
2287 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2296 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2302 group->domain = NULL; in __iommu_group_set_domain_internal()
2310 * either new_domain or group->domain, never something else. in __iommu_group_set_domain_internal()
2313 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2314 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, in __iommu_group_set_domain_internal()
2319 * Keep trying the other devices in the group. If a in __iommu_group_set_domain_internal()
2330 group->domain = new_domain; in __iommu_group_set_domain_internal()
2339 for_each_group_device(group, gdev) { in __iommu_group_set_domain_internal()
2345 * group->domain as NULL and let release clean everything up. in __iommu_group_set_domain_internal()
2347 if (group->domain) in __iommu_group_set_domain_internal()
2349 group, gdev->dev, group->domain, in __iommu_group_set_domain_internal()
2359 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2361 mutex_lock(&group->mutex); in iommu_detach_group()
2362 __iommu_group_set_core_domain(group); in iommu_detach_group()
2363 mutex_unlock(&group->mutex); in iommu_detach_group()
2949 * iommu_setup_default_domain - Set the default_domain for the group
2950 * @group: Group to change
2953 * Allocate a default domain and set it as the current domain on the group. If
2954 * the group already has a default domain it will be changed to the target_type.
2958 static int iommu_setup_default_domain(struct iommu_group *group, in iommu_setup_default_domain() argument
2961 struct iommu_domain *old_dom = group->default_domain; in iommu_setup_default_domain()
2968 lockdep_assert_held(&group->mutex); in iommu_setup_default_domain()
2970 req_type = iommu_get_default_domain_type(group, target_type); in iommu_setup_default_domain()
2976 * we ignore the failure and leave group->default_domain NULL. in iommu_setup_default_domain()
2981 dom = iommu_group_alloc_default_domain(group, req_type); in iommu_setup_default_domain()
2984 if (group->default_domain) in iommu_setup_default_domain()
2986 group->default_domain = NULL; in iommu_setup_default_domain()
2990 if (group->default_domain == dom) in iommu_setup_default_domain()
2999 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3009 group->default_domain = dom; in iommu_setup_default_domain()
3010 if (!group->domain) { in iommu_setup_default_domain()
3015 * in group->default_domain so it is freed after. in iommu_setup_default_domain()
3018 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3022 ret = __iommu_group_set_domain(group, dom); in iommu_setup_default_domain()
3034 for_each_group_device(group, gdev) { in iommu_setup_default_domain()
3049 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); in iommu_setup_default_domain()
3053 group->default_domain = old_dom; in iommu_setup_default_domain()
3060 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3064 * group->mutex is used here to guarantee that the device release path
3067 static ssize_t iommu_group_store_type(struct iommu_group *group, in iommu_group_store_type() argument
3076 if (WARN_ON(!group) || !group->default_domain) in iommu_group_store_type()
3090 mutex_lock(&group->mutex); in iommu_group_store_type()
3093 group->default_domain->type == IOMMU_DOMAIN_DMA) { in iommu_group_store_type()
3094 ret = iommu_dma_init_fq(group->default_domain); in iommu_group_store_type()
3098 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; in iommu_group_store_type()
3104 if (list_empty(&group->devices) || group->owner_cnt) { in iommu_group_store_type()
3109 ret = iommu_setup_default_domain(group, req_type); in iommu_group_store_type()
3117 * group->mutex, resulting in a deadlock. in iommu_group_store_type()
3119 mutex_unlock(&group->mutex); in iommu_group_store_type()
3122 for_each_group_device(group, gdev) in iommu_group_store_type()
3127 mutex_unlock(&group->mutex); in iommu_group_store_type()
3131 static bool iommu_is_default_domain(struct iommu_group *group) in iommu_is_default_domain() argument
3133 if (group->domain == group->default_domain) in iommu_is_default_domain()
3142 if (group->default_domain && in iommu_is_default_domain()
3143 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && in iommu_is_default_domain()
3144 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_is_default_domain()
3159 struct iommu_group *group = iommu_group_get(dev); in iommu_device_use_default_domain() local
3162 if (!group) in iommu_device_use_default_domain()
3165 mutex_lock(&group->mutex); in iommu_device_use_default_domain()
3167 if (IS_ENABLED(CONFIG_IOMMU_DMA) && !group->default_domain && in iommu_device_use_default_domain()
3172 if (group->owner_cnt) { in iommu_device_use_default_domain()
3173 if (group->owner || !iommu_is_default_domain(group) || in iommu_device_use_default_domain()
3174 !xa_empty(&group->pasid_array)) { in iommu_device_use_default_domain()
3180 group->owner_cnt++; in iommu_device_use_default_domain()
3183 mutex_unlock(&group->mutex); in iommu_device_use_default_domain()
3184 iommu_group_put(group); in iommu_device_use_default_domain()
3199 struct iommu_group *group = iommu_group_get(dev); in iommu_device_unuse_default_domain() local
3201 if (!group) in iommu_device_unuse_default_domain()
3204 mutex_lock(&group->mutex); in iommu_device_unuse_default_domain()
3205 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) in iommu_device_unuse_default_domain()
3206 group->owner_cnt--; in iommu_device_unuse_default_domain()
3208 mutex_unlock(&group->mutex); in iommu_device_unuse_default_domain()
3209 iommu_group_put(group); in iommu_device_unuse_default_domain()
3212 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) in __iommu_group_alloc_blocking_domain() argument
3215 list_first_entry(&group->devices, struct group_device, list); in __iommu_group_alloc_blocking_domain()
3217 if (group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3220 group->blocking_domain = in __iommu_group_alloc_blocking_domain()
3222 if (!group->blocking_domain) { in __iommu_group_alloc_blocking_domain()
3227 group->blocking_domain = __iommu_domain_alloc( in __iommu_group_alloc_blocking_domain()
3229 if (!group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3235 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) in __iommu_take_dma_ownership() argument
3239 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3240 !xa_empty(&group->pasid_array)) in __iommu_take_dma_ownership()
3243 ret = __iommu_group_alloc_blocking_domain(group); in __iommu_take_dma_ownership()
3246 ret = __iommu_group_set_domain(group, group->blocking_domain); in __iommu_take_dma_ownership()
3250 group->owner = owner; in __iommu_take_dma_ownership()
3251 group->owner_cnt++; in __iommu_take_dma_ownership()
3256 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3257 * @group: The group.
3262 * prohibited. Only a single owner may exist for a group.
3264 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) in iommu_group_claim_dma_owner() argument
3271 mutex_lock(&group->mutex); in iommu_group_claim_dma_owner()
3272 if (group->owner_cnt) { in iommu_group_claim_dma_owner()
3277 ret = __iommu_take_dma_ownership(group, owner); in iommu_group_claim_dma_owner()
3279 mutex_unlock(&group->mutex); in iommu_group_claim_dma_owner()
3290 * Claim the DMA ownership of a device. Multiple devices in the same group may
3296 struct iommu_group *group; in iommu_device_claim_dma_owner() local
3302 group = iommu_group_get(dev); in iommu_device_claim_dma_owner()
3303 if (!group) in iommu_device_claim_dma_owner()
3306 mutex_lock(&group->mutex); in iommu_device_claim_dma_owner()
3307 if (group->owner_cnt) { in iommu_device_claim_dma_owner()
3308 if (group->owner != owner) { in iommu_device_claim_dma_owner()
3312 group->owner_cnt++; in iommu_device_claim_dma_owner()
3316 ret = __iommu_take_dma_ownership(group, owner); in iommu_device_claim_dma_owner()
3318 mutex_unlock(&group->mutex); in iommu_device_claim_dma_owner()
3319 iommu_group_put(group); in iommu_device_claim_dma_owner()
3325 static void __iommu_release_dma_ownership(struct iommu_group *group) in __iommu_release_dma_ownership() argument
3327 if (WARN_ON(!group->owner_cnt || !group->owner || in __iommu_release_dma_ownership()
3328 !xa_empty(&group->pasid_array))) in __iommu_release_dma_ownership()
3331 group->owner_cnt = 0; in __iommu_release_dma_ownership()
3332 group->owner = NULL; in __iommu_release_dma_ownership()
3333 __iommu_group_set_domain_nofail(group, group->default_domain); in __iommu_release_dma_ownership()
3337 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3338 * @group: The group
3342 void iommu_group_release_dma_owner(struct iommu_group *group) in iommu_group_release_dma_owner() argument
3344 mutex_lock(&group->mutex); in iommu_group_release_dma_owner()
3345 __iommu_release_dma_ownership(group); in iommu_group_release_dma_owner()
3346 mutex_unlock(&group->mutex); in iommu_group_release_dma_owner()
3358 struct iommu_group *group = iommu_group_get(dev); in iommu_device_release_dma_owner() local
3360 mutex_lock(&group->mutex); in iommu_device_release_dma_owner()
3361 if (group->owner_cnt > 1) in iommu_device_release_dma_owner()
3362 group->owner_cnt--; in iommu_device_release_dma_owner()
3364 __iommu_release_dma_ownership(group); in iommu_device_release_dma_owner()
3365 mutex_unlock(&group->mutex); in iommu_device_release_dma_owner()
3366 iommu_group_put(group); in iommu_device_release_dma_owner()
3371 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3372 * @group: The group.
3374 * This provides status query on a given group. It is racy and only for
3377 bool iommu_group_dma_owner_claimed(struct iommu_group *group) in iommu_group_dma_owner_claimed() argument
3381 mutex_lock(&group->mutex); in iommu_group_dma_owner_claimed()
3382 user = group->owner_cnt; in iommu_group_dma_owner_claimed()
3383 mutex_unlock(&group->mutex); in iommu_group_dma_owner_claimed()
3390 struct iommu_group *group, ioasid_t pasid) in __iommu_set_group_pasid() argument
3395 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3405 for_each_group_device(group, device) { in __iommu_set_group_pasid()
3415 static void __iommu_remove_group_pasid(struct iommu_group *group, in __iommu_remove_group_pasid() argument
3421 for_each_group_device(group, device) { in __iommu_remove_group_pasid()
3438 struct iommu_group *group; in iommu_attach_device_pasid() local
3445 group = iommu_group_get(dev); in iommu_attach_device_pasid()
3446 if (!group) in iommu_attach_device_pasid()
3449 mutex_lock(&group->mutex); in iommu_attach_device_pasid()
3450 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); in iommu_attach_device_pasid()
3456 ret = __iommu_set_group_pasid(domain, group, pasid); in iommu_attach_device_pasid()
3458 xa_erase(&group->pasid_array, pasid); in iommu_attach_device_pasid()
3460 mutex_unlock(&group->mutex); in iommu_attach_device_pasid()
3461 iommu_group_put(group); in iommu_attach_device_pasid()
3479 struct iommu_group *group = iommu_group_get(dev); in iommu_detach_device_pasid() local
3481 mutex_lock(&group->mutex); in iommu_detach_device_pasid()
3482 __iommu_remove_group_pasid(group, pasid); in iommu_detach_device_pasid()
3483 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); in iommu_detach_device_pasid()
3484 mutex_unlock(&group->mutex); in iommu_detach_device_pasid()
3486 iommu_group_put(group); in iommu_detach_device_pasid()
3509 struct iommu_group *group; in iommu_get_domain_for_dev_pasid() local
3511 group = iommu_group_get(dev); in iommu_get_domain_for_dev_pasid()
3512 if (!group) in iommu_get_domain_for_dev_pasid()
3515 xa_lock(&group->pasid_array); in iommu_get_domain_for_dev_pasid()
3516 domain = xa_load(&group->pasid_array, pasid); in iommu_get_domain_for_dev_pasid()
3519 xa_unlock(&group->pasid_array); in iommu_get_domain_for_dev_pasid()
3520 iommu_group_put(group); in iommu_get_domain_for_dev_pasid()