Lines Matching +full:never +full:- +full:post +full:- +full:merge +full:- +full:rules
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
24 #include <linux/pci-ats.h>
36 #include "dma-iommu.h"
37 #include "iommu-priv.h"
73 list_for_each_entry(pos, &(group)->devices, list)
84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
169 * Use a function instead of an array here because the domain-type is a
170 * bit-field, so an array would waste memory.
202 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); in iommu_subsys_init()
223 return -ENOMEM; in iommu_subsys_init()
236 if (dev->iommu && dev->iommu->iommu_dev == data) in remove_iommu_group()
243 * iommu_device_register() - Register an IOMMU hardware instance
256 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) in iommu_device_register()
257 return -EINVAL; in iommu_device_register()
259 iommu->ops = ops; in iommu_device_register()
261 iommu->fwnode = dev_fwnode(hwdev); in iommu_device_register()
264 list_add_tail(&iommu->list, &iommu_device_list); in iommu_device_register()
281 list_del(&iommu->list); in iommu_device_unregister()
285 iommu_group_put(iommu->singleton_group); in iommu_device_unregister()
286 iommu->singleton_group = NULL; in iommu_device_unregister()
312 iommu->ops = ops; in iommu_device_register_bus()
313 nb->notifier_call = iommu_bus_notifier; in iommu_device_register_bus()
319 list_add_tail(&iommu->list, &iommu_device_list); in iommu_device_register_bus()
334 struct dev_iommu *param = dev->iommu; in dev_iommu_get()
345 mutex_init(¶m->lock); in dev_iommu_get()
346 dev->iommu = param; in dev_iommu_get()
352 struct dev_iommu *param = dev->iommu; in dev_iommu_free()
354 dev->iommu = NULL; in dev_iommu_free()
355 if (param->fwspec) { in dev_iommu_free()
356 fwnode_handle_put(param->fwspec->iommu_fwnode); in dev_iommu_free()
357 kfree(param->fwspec); in dev_iommu_free()
364 * actually has API ops, and don't want false positives from VFIO-only groups.
368 return dev->iommu && dev->iommu->iommu_dev; in dev_has_iommu()
381 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); in dev_iommu_get_max_pasids()
386 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); in dev_iommu_get_max_pasids()
394 dev->iommu->priv = priv; in dev_iommu_priv_set()
399 * Init the dev->iommu and dev->iommu_group in the struct device and get the
409 return -ENOMEM; in iommu_init_device()
411 if (!try_module_get(ops->owner)) { in iommu_init_device()
412 ret = -EINVAL; in iommu_init_device()
416 iommu_dev = ops->probe_device(dev); in iommu_init_device()
421 dev->iommu->iommu_dev = iommu_dev; in iommu_init_device()
427 group = ops->device_group(dev); in iommu_init_device()
429 group = ERR_PTR(-EINVAL); in iommu_init_device()
434 dev->iommu_group = group; in iommu_init_device()
436 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); in iommu_init_device()
437 if (ops->is_attach_deferred) in iommu_init_device()
438 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); in iommu_init_device()
444 if (ops->release_device) in iommu_init_device()
445 ops->release_device(dev); in iommu_init_device()
447 module_put(ops->owner); in iommu_init_device()
449 dev->iommu->iommu_dev = NULL; in iommu_init_device()
456 struct iommu_group *group = dev->iommu_group; in iommu_deinit_device()
459 lockdep_assert_held(&group->mutex); in iommu_deinit_device()
461 iommu_device_unlink(dev->iommu->iommu_dev, dev); in iommu_deinit_device()
477 * Regardless, if a delayed attach never occurred, then the release in iommu_deinit_device()
480 if (!dev->iommu->attach_deferred && ops->release_domain) in iommu_deinit_device()
481 ops->release_domain->ops->attach_dev(ops->release_domain, dev); in iommu_deinit_device()
483 if (ops->release_device) in iommu_deinit_device()
484 ops->release_device(dev); in iommu_deinit_device()
490 if (list_empty(&group->devices)) { in iommu_deinit_device()
491 if (group->default_domain) { in iommu_deinit_device()
492 iommu_domain_free(group->default_domain); in iommu_deinit_device()
493 group->default_domain = NULL; in iommu_deinit_device()
495 if (group->blocking_domain) { in iommu_deinit_device()
496 iommu_domain_free(group->blocking_domain); in iommu_deinit_device()
497 group->blocking_domain = NULL; in iommu_deinit_device()
499 group->domain = NULL; in iommu_deinit_device()
503 dev->iommu_group = NULL; in iommu_deinit_device()
504 module_put(ops->owner); in iommu_deinit_device()
507 dev->dma_iommu = false; in iommu_deinit_device()
521 * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU in __iommu_probe_device()
522 * instances with non-NULL fwnodes, and client devices should have been in __iommu_probe_device()
526 * ops for probing, and thus cheekily co-opt the same mechanism. in __iommu_probe_device()
530 return -ENODEV; in __iommu_probe_device()
541 if (dev->iommu_group) in __iommu_probe_device()
548 group = dev->iommu_group; in __iommu_probe_device()
550 mutex_lock(&group->mutex); in __iommu_probe_device()
560 list_add_tail(&gdev->list, &group->devices); in __iommu_probe_device()
561 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
562 if (group->default_domain) in __iommu_probe_device()
563 iommu_create_device_direct_mappings(group->default_domain, dev); in __iommu_probe_device()
564 if (group->domain) { in __iommu_probe_device()
565 ret = __iommu_device_set_domain(group, dev, group->domain, 0); in __iommu_probe_device()
568 } else if (!group->default_domain && !group_list) { in __iommu_probe_device()
572 } else if (!group->default_domain) { in __iommu_probe_device()
575 * to the caller by providing a de-duplicated list of groups in __iommu_probe_device()
578 if (list_empty(&group->entry)) in __iommu_probe_device()
579 list_add_tail(&group->entry, group_list); in __iommu_probe_device()
582 if (group->default_domain) in __iommu_probe_device()
585 mutex_unlock(&group->mutex); in __iommu_probe_device()
590 list_del(&gdev->list); in __iommu_probe_device()
594 mutex_unlock(&group->mutex); in __iommu_probe_device()
612 if (ops->probe_finalize) in iommu_probe_device()
613 ops->probe_finalize(dev); in iommu_probe_device()
621 struct device *dev = grp_dev->dev; in __iommu_group_free_device()
623 sysfs_remove_link(group->devices_kobj, grp_dev->name); in __iommu_group_free_device()
624 sysfs_remove_link(&dev->kobj, "iommu_group"); in __iommu_group_free_device()
626 trace_remove_device_from_group(group->id, dev); in __iommu_group_free_device()
633 if (list_empty(&group->devices)) in __iommu_group_free_device()
634 WARN_ON(group->owner_cnt || in __iommu_group_free_device()
635 group->domain != group->default_domain); in __iommu_group_free_device()
637 kfree(grp_dev->name); in __iommu_group_free_device()
644 struct iommu_group *group = dev->iommu_group; in __iommu_group_remove_device()
647 mutex_lock(&group->mutex); in __iommu_group_remove_device()
649 if (device->dev != dev) in __iommu_group_remove_device()
652 list_del(&device->list); in __iommu_group_remove_device()
657 dev->iommu_group = NULL; in __iommu_group_remove_device()
660 mutex_unlock(&group->mutex); in __iommu_group_remove_device()
671 struct iommu_group *group = dev->iommu_group; in iommu_release_device()
677 if (dev->iommu) in iommu_release_device()
721 ssize_t ret = -EIO; in iommu_group_attr_show()
723 if (attr->show) in iommu_group_attr_show()
724 ret = attr->show(group, buf); in iommu_group_attr_show()
734 ssize_t ret = -EIO; in iommu_group_attr_store()
736 if (attr->store) in iommu_group_attr_store()
737 ret = attr->store(group, buf, count); in iommu_group_attr_store()
749 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
755 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
760 return sysfs_emit(buf, "%s\n", group->name); in iommu_group_show_name()
764 * iommu_insert_resv_region - Insert a new region in the
778 nr = iommu_alloc_resv_region(new->start, new->length, in iommu_insert_resv_region()
779 new->prot, new->type, GFP_KERNEL); in iommu_insert_resv_region()
781 return -ENOMEM; in iommu_insert_resv_region()
785 if (nr->start < iter->start || in iommu_insert_resv_region()
786 (nr->start == iter->start && nr->type <= iter->type)) in iommu_insert_resv_region()
789 list_add_tail(&nr->list, &iter->list); in iommu_insert_resv_region()
791 /* Merge overlapping segments of type nr->type in @regions, if any */ in iommu_insert_resv_region()
793 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; in iommu_insert_resv_region()
795 /* no merge needed on elements of different types than @new */ in iommu_insert_resv_region()
796 if (iter->type != new->type) { in iommu_insert_resv_region()
797 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
803 if (top->type == iter->type) in iommu_insert_resv_region()
806 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
810 top_end = top->start + top->length - 1; in iommu_insert_resv_region()
812 if (iter->start > top_end + 1) { in iommu_insert_resv_region()
813 list_move_tail(&iter->list, &stack); in iommu_insert_resv_region()
815 top->length = max(top_end, iter_end) - top->start + 1; in iommu_insert_resv_region()
816 list_del(&iter->list); in iommu_insert_resv_region()
845 mutex_lock(&group->mutex); in iommu_get_group_resv_regions()
850 * Non-API groups still expose reserved_regions in sysfs, in iommu_get_group_resv_regions()
853 if (!dev_has_iommu(device->dev)) in iommu_get_group_resv_regions()
857 iommu_get_resv_regions(device->dev, &dev_resv_regions); in iommu_get_group_resv_regions()
859 iommu_put_resv_regions(device->dev, &dev_resv_regions); in iommu_get_group_resv_regions()
863 mutex_unlock(&group->mutex); in iommu_get_group_resv_regions()
880 (long long)region->start, in iommu_group_show_resv_regions()
881 (long long)(region->start + in iommu_group_show_resv_regions()
882 region->length - 1), in iommu_group_show_resv_regions()
883 iommu_group_resv_type_string[region->type]); in iommu_group_show_resv_regions()
895 mutex_lock(&group->mutex); in iommu_group_show_type()
896 if (group->default_domain) { in iommu_group_show_type()
897 switch (group->default_domain->type) { in iommu_group_show_type()
911 type = "DMA-FQ"; in iommu_group_show_type()
915 mutex_unlock(&group->mutex); in iommu_group_show_type()
932 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
934 if (group->iommu_data_release) in iommu_group_release()
935 group->iommu_data_release(group->iommu_data); in iommu_group_release()
937 ida_free(&iommu_group_ida, group->id); in iommu_group_release()
940 WARN_ON(group->default_domain); in iommu_group_release()
941 WARN_ON(group->blocking_domain); in iommu_group_release()
943 kfree(group->name); in iommu_group_release()
953 * iommu_group_alloc - Allocate a new group
970 return ERR_PTR(-ENOMEM); in iommu_group_alloc()
972 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
973 mutex_init(&group->mutex); in iommu_group_alloc()
974 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
975 INIT_LIST_HEAD(&group->entry); in iommu_group_alloc()
976 xa_init(&group->pasid_array); in iommu_group_alloc()
983 group->id = ret; in iommu_group_alloc()
985 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
986 NULL, "%d", group->id); in iommu_group_alloc()
988 kobject_put(&group->kobj); in iommu_group_alloc()
992 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
993 if (!group->devices_kobj) { in iommu_group_alloc()
994 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
995 return ERR_PTR(-ENOMEM); in iommu_group_alloc()
1003 kobject_put(&group->kobj); in iommu_group_alloc()
1008 kobject_put(group->devices_kobj); in iommu_group_alloc()
1014 kobject_put(group->devices_kobj); in iommu_group_alloc()
1018 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
1025 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
1034 return group->iommu_data; in iommu_group_get_iommudata()
1039 * iommu_group_set_iommudata - set iommu_data for a group
1051 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
1052 group->iommu_data_release = release; in iommu_group_set_iommudata()
1057 * iommu_group_set_name - set name for a group
1068 if (group->name) { in iommu_group_set_name()
1070 kfree(group->name); in iommu_group_set_name()
1071 group->name = NULL; in iommu_group_set_name()
1076 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
1077 if (!group->name) in iommu_group_set_name()
1078 return -ENOMEM; in iommu_group_set_name()
1082 kfree(group->name); in iommu_group_set_name()
1083 group->name = NULL; in iommu_group_set_name()
1099 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; in iommu_create_device_direct_mappings()
1103 return -EINVAL; in iommu_create_device_direct_mappings()
1112 if (entry->type == IOMMU_RESV_DIRECT) in iommu_create_device_direct_mappings()
1113 dev->iommu->require_direct = 1; in iommu_create_device_direct_mappings()
1115 if ((entry->type != IOMMU_RESV_DIRECT && in iommu_create_device_direct_mappings()
1116 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || in iommu_create_device_direct_mappings()
1120 start = ALIGN(entry->start, pg_size); in iommu_create_device_direct_mappings()
1121 end = ALIGN(entry->start + entry->length, pg_size); in iommu_create_device_direct_mappings()
1137 ret = iommu_map(domain, addr - map_size, in iommu_create_device_direct_mappings()
1138 addr - map_size, map_size, in iommu_create_device_direct_mappings()
1139 entry->prot, GFP_KERNEL); in iommu_create_device_direct_mappings()
1166 return ERR_PTR(-ENOMEM); in iommu_group_alloc_device()
1168 device->dev = dev; in iommu_group_alloc_device()
1170 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_alloc_device()
1174 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); in iommu_group_alloc_device()
1176 if (!device->name) { in iommu_group_alloc_device()
1177 ret = -ENOMEM; in iommu_group_alloc_device()
1181 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_alloc_device()
1182 &dev->kobj, device->name); in iommu_group_alloc_device()
1184 if (ret == -EEXIST && i >= 0) { in iommu_group_alloc_device()
1189 kfree(device->name); in iommu_group_alloc_device()
1190 device->name = kasprintf(GFP_KERNEL, "%s.%d", in iommu_group_alloc_device()
1191 kobject_name(&dev->kobj), i++); in iommu_group_alloc_device()
1197 trace_add_device_to_group(group->id, dev); in iommu_group_alloc_device()
1199 dev_info(dev, "Adding to iommu group %d\n", group->id); in iommu_group_alloc_device()
1204 kfree(device->name); in iommu_group_alloc_device()
1206 sysfs_remove_link(&dev->kobj, "iommu_group"); in iommu_group_alloc_device()
1209 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); in iommu_group_alloc_device()
1214 * iommu_group_add_device - add a device to an iommu group
1230 dev->iommu_group = group; in iommu_group_add_device()
1232 mutex_lock(&group->mutex); in iommu_group_add_device()
1233 list_add_tail(&gdev->list, &group->devices); in iommu_group_add_device()
1234 mutex_unlock(&group->mutex); in iommu_group_add_device()
1240 * iommu_group_remove_device - remove a device from it's current group
1248 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device()
1253 dev_info(dev, "Removing from iommu group %d\n", group->id); in iommu_group_remove_device()
1261 * iommu_group_mutex_assert - Check device group mutex lock
1271 struct iommu_group *group = dev->iommu_group; in iommu_group_mutex_assert()
1273 lockdep_assert_held(&group->mutex); in iommu_group_mutex_assert()
1280 lockdep_assert_held(&group->mutex); in iommu_group_first_dev()
1281 return list_first_entry(&group->devices, struct group_device, list)->dev; in iommu_group_first_dev()
1285 * iommu_group_for_each_dev - iterate over each device in the group
1292 * The group->mutex is held across callbacks, which will block calls to
1301 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
1303 ret = fn(device->dev, data); in iommu_group_for_each_dev()
1307 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
1314 * iommu_group_get - Return the group for a device and increment reference
1323 struct iommu_group *group = dev->iommu_group; in iommu_group_get()
1326 kobject_get(group->devices_kobj); in iommu_group_get()
1333 * iommu_group_ref_get - Increment reference on a group
1341 kobject_get(group->devices_kobj); in iommu_group_ref_get()
1347 * iommu_group_put - Decrement group reference
1356 kobject_put(group->devices_kobj); in iommu_group_put()
1361 * iommu_group_id - Return ID for a group
1368 return group->id; in iommu_group_id()
1387 * all the other non-isolated functions and look for existing groups. For
1397 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) in get_pci_function_alias_group()
1401 if (tmp == pdev || tmp->bus != pdev->bus || in get_pci_function_alias_group()
1402 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || in get_pci_function_alias_group()
1431 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) in get_pci_alias_group()
1434 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
1439 if (tmp == pdev || tmp->bus != pdev->bus) in get_pci_alias_group()
1474 data->pdev = pdev; in get_pci_alias_or_group()
1475 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
1477 return data->group != NULL; in get_pci_alias_or_group()
1481 * Generic device_group call-back function. It just allocates one
1482 * iommu-group per device.
1491 * Generic device_group call-back function. It just allocates one
1492 * iommu-group per iommu driver instance shared by every device
1497 struct iommu_device *iommu = dev->iommu->iommu_dev; in generic_single_device_group()
1499 if (!iommu->singleton_group) { in generic_single_device_group()
1505 iommu->singleton_group = group; in generic_single_device_group()
1507 return iommu_group_ref_get(iommu->singleton_group); in generic_single_device_group()
1524 return ERR_PTR(-EINVAL); in pci_device_group()
1540 * peer-to-peer DMA by PCI ACS. Again, if we find an existing in pci_device_group()
1543 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { in pci_device_group()
1544 if (!bus->self) in pci_device_group()
1547 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) in pci_device_group()
1550 pdev = bus->self; in pci_device_group()
1552 group = iommu_group_get(&pdev->dev); in pci_device_group()
1566 * Look for existing groups on non-isolated functions on the same in pci_device_group()
1579 /* Get the IOMMU group for device on fsl-mc bus */
1595 if (group->default_domain && group->default_domain->type == req_type) in __iommu_group_alloc_default_domain()
1596 return group->default_domain; in __iommu_group_alloc_default_domain()
1610 lockdep_assert_held(&group->mutex); in iommu_group_alloc_default_domain()
1617 if (ops->default_domain) { in iommu_group_alloc_default_domain()
1618 if (req_type != ops->default_domain->type) in iommu_group_alloc_default_domain()
1619 return ERR_PTR(-EINVAL); in iommu_group_alloc_default_domain()
1620 return ops->default_domain; in iommu_group_alloc_default_domain()
1633 return ERR_PTR(-EINVAL); in iommu_group_alloc_default_domain()
1638 …pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_D… in iommu_group_alloc_default_domain()
1639 iommu_def_domain_type, group->name); in iommu_group_alloc_default_domain()
1645 return group->default_domain; in iommu_group_default_domain()
1656 if (ret == -ENODEV) in probe_iommu_group()
1690 if (ops->default_domain) { in iommu_get_def_domain_type()
1695 type = ops->default_domain->type; in iommu_get_def_domain_type()
1697 if (ops->def_domain_type) in iommu_get_def_domain_type()
1698 type = ops->def_domain_type(dev); in iommu_get_def_domain_type()
1711 group->id); in iommu_get_def_domain_type()
1733 lockdep_assert_held(&group->mutex); in iommu_get_default_domain_type()
1748 driver_type = iommu_get_def_domain_type(group, gdev->dev, in iommu_get_default_domain_type()
1751 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->requires_dma_protection) { in iommu_get_default_domain_type()
1756 return -1; in iommu_get_default_domain_type()
1757 untrusted = gdev->dev; in iommu_get_default_domain_type()
1768 return -1; in iommu_get_default_domain_type()
1778 group->id, iommu_domain_type_str(driver_type)); in iommu_get_default_domain_type()
1779 return -1; in iommu_get_default_domain_type()
1786 return -1; in iommu_get_default_domain_type()
1796 if (ops->probe_finalize) in iommu_group_do_probe_finalize()
1797 ops->probe_finalize(dev); in iommu_group_do_probe_finalize()
1813 mutex_lock(&group->mutex); in bus_iommu_probe()
1816 list_del_init(&group->entry); in bus_iommu_probe()
1820 * that the cross-group default domain type and the setup of the in bus_iommu_probe()
1821 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. in bus_iommu_probe()
1825 mutex_unlock(&group->mutex); in bus_iommu_probe()
1829 iommu_setup_dma_ops(gdev->dev); in bus_iommu_probe()
1830 mutex_unlock(&group->mutex); in bus_iommu_probe()
1833 * FIXME: Mis-locked because the ops->probe_finalize() call-back in bus_iommu_probe()
1835 * in-turn might call back into IOMMU core code, where it tries in bus_iommu_probe()
1836 * to take group->mutex, resulting in a deadlock. in bus_iommu_probe()
1839 iommu_group_do_probe_finalize(gdev->dev); in bus_iommu_probe()
1846 * iommu_present() - make platform-specific assumptions about an IOMMU
1871 * device_iommu_capable() - check for a general IOMMU capability
1886 if (!ops->capable) in device_iommu_capable()
1889 return ops->capable(dev, cap); in device_iommu_capable()
1894 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
1908 mutex_lock(&group->mutex); in iommu_group_has_isolated_msi()
1910 ret &= msi_device_has_isolated_msi(group_dev->dev); in iommu_group_has_isolated_msi()
1911 mutex_unlock(&group->mutex); in iommu_group_has_isolated_msi()
1917 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1934 domain->handler = handler; in iommu_set_fault_handler()
1935 domain->handler_token = token; in iommu_set_fault_handler()
1946 if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) in __iommu_domain_alloc()
1947 return ops->identity_domain; in __iommu_domain_alloc()
1948 else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) in __iommu_domain_alloc()
1949 return ops->blocked_domain; in __iommu_domain_alloc()
1950 else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) in __iommu_domain_alloc()
1951 domain = ops->domain_alloc_paging(dev); in __iommu_domain_alloc()
1952 else if (ops->domain_alloc) in __iommu_domain_alloc()
1953 domain = ops->domain_alloc(alloc_type); in __iommu_domain_alloc()
1955 return ERR_PTR(-EOPNOTSUPP); in __iommu_domain_alloc()
1960 * having two rules. in __iommu_domain_alloc()
1965 return ERR_PTR(-ENOMEM); in __iommu_domain_alloc()
1967 domain->type = type; in __iommu_domain_alloc()
1968 domain->owner = ops; in __iommu_domain_alloc()
1973 if (!domain->pgsize_bitmap) in __iommu_domain_alloc()
1974 domain->pgsize_bitmap = ops->pgsize_bitmap; in __iommu_domain_alloc()
1976 if (!domain->ops) in __iommu_domain_alloc()
1977 domain->ops = ops->default_domain_ops; in __iommu_domain_alloc()
2009 return -EBUSY; in __iommu_domain_alloc_dev()
2036 * iommu_paging_domain_alloc() - Allocate a paging domain
2045 return ERR_PTR(-ENODEV); in iommu_paging_domain_alloc()
2053 if (domain->type == IOMMU_DOMAIN_SVA) in iommu_domain_free()
2054 mmdrop(domain->mm); in iommu_domain_free()
2056 if (domain->ops->free) in iommu_domain_free()
2057 domain->ops->free(domain); in iommu_domain_free()
2062 * Put the group's domain back to the appropriate core-owned domain - either the
2063 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
2069 if (group->owner) in __iommu_group_set_core_domain()
2070 new_domain = group->blocking_domain; in __iommu_group_set_core_domain()
2072 new_domain = group->default_domain; in __iommu_group_set_core_domain()
2082 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
2083 return -ENODEV; in __iommu_attach_device()
2085 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
2088 dev->iommu->attach_deferred = 0; in __iommu_attach_device()
2094 * iommu_attach_device - Attach an IOMMU domain to a device
2108 struct iommu_group *group = dev->iommu_group; in iommu_attach_device()
2112 return -ENODEV; in iommu_attach_device()
2115 * Lock the group to make sure the device-count doesn't in iommu_attach_device()
2118 mutex_lock(&group->mutex); in iommu_attach_device()
2119 ret = -EINVAL; in iommu_attach_device()
2120 if (list_count_nodes(&group->devices) != 1) in iommu_attach_device()
2126 mutex_unlock(&group->mutex); in iommu_attach_device()
2133 if (dev->iommu && dev->iommu->attach_deferred) in iommu_deferred_attach()
2142 struct iommu_group *group = dev->iommu_group; in iommu_detach_device()
2147 mutex_lock(&group->mutex); in iommu_detach_device()
2148 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2149 WARN_ON(list_count_nodes(&group->devices) != 1)) in iommu_detach_device()
2154 mutex_unlock(&group->mutex); in iommu_detach_device()
2161 struct iommu_group *group = dev->iommu_group; in iommu_get_domain_for_dev()
2166 return group->domain; in iommu_get_domain_for_dev()
2176 return dev->iommu_group->default_domain; in iommu_get_dma_domain()
2184 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2185 group->domain != group->blocking_domain) in __iommu_attach_group()
2186 return -EBUSY; in __iommu_attach_group()
2189 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) in __iommu_attach_group()
2190 return -EINVAL; in __iommu_attach_group()
2196 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2211 mutex_lock(&group->mutex); in iommu_attach_group()
2213 mutex_unlock(&group->mutex); in iommu_attach_group()
2220 * iommu_group_replace_domain - replace the domain that a group is attached to
2225 * the blocking domain in-between.
2236 return -EINVAL; in iommu_group_replace_domain()
2238 mutex_lock(&group->mutex); in iommu_group_replace_domain()
2240 mutex_unlock(&group->mutex); in iommu_group_replace_domain()
2259 if (dev->iommu->require_direct && in __iommu_device_set_domain()
2260 (new_domain->type == IOMMU_DOMAIN_BLOCKED || in __iommu_device_set_domain()
2261 new_domain == group->blocking_domain)) { in __iommu_device_set_domain()
2264 return -EINVAL; in __iommu_device_set_domain()
2267 if (dev->iommu->attach_deferred) { in __iommu_device_set_domain()
2268 if (new_domain == group->default_domain) in __iommu_device_set_domain()
2270 dev->iommu->attach_deferred = 0; in __iommu_device_set_domain()
2281 group->blocking_domain && in __iommu_device_set_domain()
2282 group->blocking_domain != new_domain) in __iommu_device_set_domain()
2283 __iommu_attach_device(group->blocking_domain, dev); in __iommu_device_set_domain()
2302 * multi-function PCI devices). Thus we attach each device.
2313 lockdep_assert_held(&group->mutex); in __iommu_group_set_domain_internal()
2315 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2319 return -EINVAL; in __iommu_group_set_domain_internal()
2325 * either new_domain or group->domain, never something else. in __iommu_group_set_domain_internal()
2329 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, in __iommu_group_set_domain_internal()
2345 group->domain = new_domain; in __iommu_group_set_domain_internal()
2357 * we leave group->domain as NULL and let release clean in __iommu_group_set_domain_internal()
2360 if (group->domain) in __iommu_group_set_domain_internal()
2362 group, gdev->dev, group->domain, in __iommu_group_set_domain_internal()
2372 mutex_lock(&group->mutex); in iommu_detach_group()
2374 mutex_unlock(&group->mutex); in iommu_detach_group()
2380 if (domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_iova_to_phys()
2383 if (domain->type == IOMMU_DOMAIN_BLOCKED) in iommu_iova_to_phys()
2386 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2400 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2416 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2427 if ((iova ^ paddr) & (pgsize_next - 1)) in iommu_pgsize()
2431 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); in iommu_pgsize()
2449 const struct iommu_domain_ops *ops = domain->ops; in iommu_map_nosync()
2458 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in iommu_map_nosync()
2459 return -EINVAL; in iommu_map_nosync()
2461 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) in iommu_map_nosync()
2462 return -ENODEV; in iommu_map_nosync()
2467 return -EINVAL; in iommu_map_nosync()
2470 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in iommu_map_nosync()
2480 return -EINVAL; in iommu_map_nosync()
2492 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in iommu_map_nosync()
2498 size -= mapped; in iommu_map_nosync()
2509 iommu_unmap(domain, orig_iova, orig_size - size); in iommu_map_nosync()
2518 const struct iommu_domain_ops *ops = domain->ops; in iommu_sync_map()
2520 if (!ops->iotlb_sync_map) in iommu_sync_map()
2522 return ops->iotlb_sync_map(domain, iova, size); in iommu_sync_map()
2546 const struct iommu_domain_ops *ops = domain->ops; in __iommu_unmap()
2551 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2554 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2558 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2580 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); in __iommu_unmap()
2581 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); in __iommu_unmap()
2611 * iommu_unmap_fast() - Remove mappings from a range of IOVA without IOTLB sync
2640 struct iommu_domain *domain = cookie_sg->domain; in __iommu_add_sg()
2641 const struct iommu_domain_ops *ops = domain->ops; in __iommu_add_sg()
2645 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_add_sg()
2646 return -EINVAL; in __iommu_add_sg()
2648 if (WARN_ON(domain->pgsize_bitmap == 0UL)) in __iommu_add_sg()
2649 return -ENODEV; in __iommu_add_sg()
2652 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_add_sg()
2662 return -EINVAL; in __iommu_add_sg()
2669 ret = ops->add_deferred_map_sg(cookie_sg, paddr, pgsize, count); in __iommu_add_sg()
2674 size -= added; in __iommu_add_sg()
2686 const struct iommu_domain_ops *ops = domain->ops; in iommu_map_sg()
2691 bool deferred_sg = ops->alloc_cookie_sg && ops->add_deferred_map_sg && in iommu_map_sg()
2692 ops->consume_deferred_map_sg; in iommu_map_sg()
2696 cookie_sg = ops->alloc_cookie_sg(iova, prot, nents, gfp); in iommu_map_sg()
2699 return -ENOMEM; in iommu_map_sg()
2701 cookie_sg->domain = domain; in iommu_map_sg()
2712 mapped = ops->consume_deferred_map_sg(cookie_sg); in iommu_map_sg()
2728 len += sg->length; in iommu_map_sg()
2730 len = sg->length; in iommu_map_sg()
2742 consumed = ops->consume_deferred_map_sg(cookie_sg); in iommu_map_sg()
2745 ret = -EINVAL; in iommu_map_sg()
2765 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2771 * This function should be called by the low-level IOMMU implementations
2772 * whenever IOMMU faults happen, to allow high-level users, that are
2776 * - mere logging of the event
2777 * - dynamic TLB/PTE loading
2778 * - if restarting of the faulting device is required
2784 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2785 * (though fault handlers can also return -ENOSYS, in case they want to
2791 int ret = -ENOSYS; in report_iommu_fault()
2797 if (domain->handler) in report_iommu_fault()
2798 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2799 domain->handler_token); in report_iommu_fault()
2820 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_enable_nesting()
2821 return -EINVAL; in iommu_enable_nesting()
2822 if (!domain->ops->enable_nesting) in iommu_enable_nesting()
2823 return -EINVAL; in iommu_enable_nesting()
2824 return domain->ops->enable_nesting(domain); in iommu_enable_nesting()
2831 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_set_pgtable_quirks()
2832 return -EINVAL; in iommu_set_pgtable_quirks()
2833 if (!domain->ops->set_pgtable_quirks) in iommu_set_pgtable_quirks()
2834 return -EINVAL; in iommu_set_pgtable_quirks()
2835 return domain->ops->set_pgtable_quirks(domain, quirk); in iommu_set_pgtable_quirks()
2840 * iommu_get_resv_regions - get reserved regions
2851 if (ops->get_resv_regions) in iommu_get_resv_regions()
2852 ops->get_resv_regions(dev, list); in iommu_get_resv_regions()
2857 * iommu_put_resv_regions - release reserved regions
2868 if (entry->free) in iommu_put_resv_regions()
2869 entry->free(dev, entry); in iommu_put_resv_regions()
2887 INIT_LIST_HEAD(®ion->list); in iommu_alloc_resv_region()
2888 region->start = start; in iommu_alloc_resv_region()
2889 region->length = length; in iommu_alloc_resv_region()
2890 region->prot = prot; in iommu_alloc_resv_region()
2891 region->type = type; in iommu_alloc_resv_region()
2923 if (iommu->fwnode == fwnode) { in iommu_ops_from_fwnode()
2924 ops = iommu->ops; in iommu_ops_from_fwnode()
2937 return -EPROBE_DEFER; in iommu_fwspec_init()
2940 return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; in iommu_fwspec_init()
2943 return -ENOMEM; in iommu_fwspec_init()
2948 return -ENOMEM; in iommu_fwspec_init()
2951 fwspec->iommu_fwnode = iommu_fwnode; in iommu_fwspec_init()
2962 fwnode_handle_put(fwspec->iommu_fwnode); in iommu_fwspec_free()
2975 return -EINVAL; in iommu_fwspec_add_ids()
2977 new_num = fwspec->num_ids + num_ids; in iommu_fwspec_add_ids()
2982 return -ENOMEM; in iommu_fwspec_add_ids()
2988 fwspec->ids[fwspec->num_ids + i] = ids[i]; in iommu_fwspec_add_ids()
2990 fwspec->num_ids = new_num; in iommu_fwspec_add_ids()
3003 if (ops->dev_enable_feat) in iommu_dev_enable_feature()
3004 return ops->dev_enable_feat(dev, feat); in iommu_dev_enable_feature()
3007 return -ENODEV; in iommu_dev_enable_feature()
3019 if (ops->dev_disable_feat) in iommu_dev_disable_feature()
3020 return ops->dev_disable_feat(dev, feat); in iommu_dev_disable_feature()
3023 return -EBUSY; in iommu_dev_disable_feature()
3028 * iommu_setup_default_domain - Set the default_domain for the group
3040 struct iommu_domain *old_dom = group->default_domain; in iommu_setup_default_domain()
3047 lockdep_assert_held(&group->mutex); in iommu_setup_default_domain()
3051 return -EINVAL; in iommu_setup_default_domain()
3057 if (group->default_domain == dom) in iommu_setup_default_domain()
3067 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { in iommu_setup_default_domain()
3070 gdev->dev->iommu->iommu_dev->dev, in iommu_setup_default_domain()
3076 group->default_domain = dom; in iommu_setup_default_domain()
3077 if (!group->domain) { in iommu_setup_default_domain()
3081 * iommu driver and call ops->release_device. Put the domain in iommu_setup_default_domain()
3082 * in group->default_domain so it is freed after. in iommu_setup_default_domain()
3102 ret = iommu_create_device_direct_mappings(dom, gdev->dev); in iommu_setup_default_domain()
3120 group->default_domain = old_dom; in iommu_setup_default_domain()
3127 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3131 * group->mutex is used here to guarantee that the device release path
3141 return -EACCES; in iommu_group_store_type()
3143 if (WARN_ON(!group) || !group->default_domain) in iommu_group_store_type()
3144 return -EINVAL; in iommu_group_store_type()
3150 else if (sysfs_streq(buf, "DMA-FQ")) in iommu_group_store_type()
3155 return -EINVAL; in iommu_group_store_type()
3157 mutex_lock(&group->mutex); in iommu_group_store_type()
3160 group->default_domain->type == IOMMU_DOMAIN_DMA) { in iommu_group_store_type()
3161 ret = iommu_dma_init_fq(group->default_domain); in iommu_group_store_type()
3165 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; in iommu_group_store_type()
3171 if (list_empty(&group->devices) || group->owner_cnt) { in iommu_group_store_type()
3172 ret = -EPERM; in iommu_group_store_type()
3182 iommu_setup_dma_ops(gdev->dev); in iommu_group_store_type()
3185 mutex_unlock(&group->mutex); in iommu_group_store_type()
3190 * iommu_device_use_default_domain() - Device driver wants to handle device
3199 /* Caller is the driver core during the pre-probe path */ in iommu_device_use_default_domain()
3200 struct iommu_group *group = dev->iommu_group; in iommu_device_use_default_domain()
3206 mutex_lock(&group->mutex); in iommu_device_use_default_domain()
3208 if (!group->default_domain) { in iommu_device_use_default_domain()
3209 ret = -EPROBE_DEFER; in iommu_device_use_default_domain()
3212 if (group->owner_cnt) { in iommu_device_use_default_domain()
3213 if (group->domain != group->default_domain || group->owner || in iommu_device_use_default_domain()
3214 !xa_empty(&group->pasid_array)) { in iommu_device_use_default_domain()
3215 ret = -EBUSY; in iommu_device_use_default_domain()
3220 group->owner_cnt++; in iommu_device_use_default_domain()
3223 mutex_unlock(&group->mutex); in iommu_device_use_default_domain()
3228 * iommu_device_unuse_default_domain() - Device driver stops handling device
3237 /* Caller is the driver core during the post-probe path */ in iommu_device_unuse_default_domain()
3238 struct iommu_group *group = dev->iommu_group; in iommu_device_unuse_default_domain()
3243 mutex_lock(&group->mutex); in iommu_device_unuse_default_domain()
3244 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) in iommu_device_unuse_default_domain()
3245 group->owner_cnt--; in iommu_device_unuse_default_domain()
3247 mutex_unlock(&group->mutex); in iommu_device_unuse_default_domain()
3254 if (group->blocking_domain) in __iommu_group_alloc_blocking_domain()
3268 group->blocking_domain = domain; in __iommu_group_alloc_blocking_domain()
3276 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3277 !xa_empty(&group->pasid_array)) in __iommu_take_dma_ownership()
3278 return -EBUSY; in __iommu_take_dma_ownership()
3283 ret = __iommu_group_set_domain(group, group->blocking_domain); in __iommu_take_dma_ownership()
3287 group->owner = owner; in __iommu_take_dma_ownership()
3288 group->owner_cnt++; in __iommu_take_dma_ownership()
3293 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3306 return -EINVAL; in iommu_group_claim_dma_owner()
3308 mutex_lock(&group->mutex); in iommu_group_claim_dma_owner()
3309 if (group->owner_cnt) { in iommu_group_claim_dma_owner()
3310 ret = -EPERM; in iommu_group_claim_dma_owner()
3316 mutex_unlock(&group->mutex); in iommu_group_claim_dma_owner()
3323 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
3334 struct iommu_group *group = dev->iommu_group; in iommu_device_claim_dma_owner()
3338 return -EINVAL; in iommu_device_claim_dma_owner()
3341 return -ENODEV; in iommu_device_claim_dma_owner()
3343 mutex_lock(&group->mutex); in iommu_device_claim_dma_owner()
3344 if (group->owner_cnt) { in iommu_device_claim_dma_owner()
3345 if (group->owner != owner) { in iommu_device_claim_dma_owner()
3346 ret = -EPERM; in iommu_device_claim_dma_owner()
3349 group->owner_cnt++; in iommu_device_claim_dma_owner()
3355 mutex_unlock(&group->mutex); in iommu_device_claim_dma_owner()
3362 if (WARN_ON(!group->owner_cnt || !group->owner || in __iommu_release_dma_ownership()
3363 !xa_empty(&group->pasid_array))) in __iommu_release_dma_ownership()
3366 group->owner_cnt = 0; in __iommu_release_dma_ownership()
3367 group->owner = NULL; in __iommu_release_dma_ownership()
3368 __iommu_group_set_domain_nofail(group, group->default_domain); in __iommu_release_dma_ownership()
3372 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3379 mutex_lock(&group->mutex); in iommu_group_release_dma_owner()
3381 mutex_unlock(&group->mutex); in iommu_group_release_dma_owner()
3386 * iommu_device_release_dma_owner() - Release DMA ownership of a device
3394 struct iommu_group *group = dev->iommu_group; in iommu_device_release_dma_owner()
3396 mutex_lock(&group->mutex); in iommu_device_release_dma_owner()
3397 if (group->owner_cnt > 1) in iommu_device_release_dma_owner()
3398 group->owner_cnt--; in iommu_device_release_dma_owner()
3401 mutex_unlock(&group->mutex); in iommu_device_release_dma_owner()
3406 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3410 * non-binding status reporting.
3416 mutex_lock(&group->mutex); in iommu_group_dma_owner_claimed()
3417 user = group->owner_cnt; in iommu_group_dma_owner_claimed()
3418 mutex_unlock(&group->mutex); in iommu_group_dma_owner_claimed()
3431 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); in __iommu_set_group_pasid()
3441 const struct iommu_ops *ops = dev_iommu_ops(device->dev); in __iommu_set_group_pasid()
3445 ops->remove_dev_pasid(device->dev, pasid, domain); in __iommu_set_group_pasid()
3458 ops = dev_iommu_ops(device->dev); in __iommu_remove_group_pasid()
3459 ops->remove_dev_pasid(device->dev, pasid, domain); in __iommu_remove_group_pasid()
3464 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3477 struct iommu_group *group = dev->iommu_group; in iommu_attach_device_pasid()
3481 if (!domain->ops->set_dev_pasid) in iommu_attach_device_pasid()
3482 return -EOPNOTSUPP; in iommu_attach_device_pasid()
3485 return -ENODEV; in iommu_attach_device_pasid()
3487 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner || in iommu_attach_device_pasid()
3489 return -EINVAL; in iommu_attach_device_pasid()
3491 mutex_lock(&group->mutex); in iommu_attach_device_pasid()
3493 if (pasid >= device->dev->iommu->max_pasids) { in iommu_attach_device_pasid()
3494 ret = -EINVAL; in iommu_attach_device_pasid()
3500 handle->domain = domain; in iommu_attach_device_pasid()
3502 ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); in iommu_attach_device_pasid()
3508 xa_erase(&group->pasid_array, pasid); in iommu_attach_device_pasid()
3510 mutex_unlock(&group->mutex); in iommu_attach_device_pasid()
3516 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3528 struct iommu_group *group = dev->iommu_group; in iommu_detach_device_pasid()
3530 mutex_lock(&group->mutex); in iommu_detach_device_pasid()
3532 xa_erase(&group->pasid_array, pasid); in iommu_detach_device_pasid()
3533 mutex_unlock(&group->mutex); in iommu_detach_device_pasid()
3542 if (!dev->iommu->max_pasids) in iommu_alloc_global_pasid()
3550 dev->iommu->max_pasids - 1, GFP_KERNEL); in iommu_alloc_global_pasid()
3565 * iommu_attach_handle_get - Return the attach handle
3570 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch.
3583 xa_lock(&group->pasid_array); in iommu_attach_handle_get()
3584 handle = xa_load(&group->pasid_array, pasid); in iommu_attach_handle_get()
3586 handle = ERR_PTR(-ENOENT); in iommu_attach_handle_get()
3587 else if (type && handle->domain->type != type) in iommu_attach_handle_get()
3588 handle = ERR_PTR(-EBUSY); in iommu_attach_handle_get()
3589 xa_unlock(&group->pasid_array); in iommu_attach_handle_get()
3596 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group
3614 handle->domain = domain; in iommu_attach_group_handle()
3616 mutex_lock(&group->mutex); in iommu_attach_group_handle()
3617 ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_attach_group_handle()
3624 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3628 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_attach_group_handle()
3630 mutex_unlock(&group->mutex); in iommu_attach_group_handle()
3636 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group
3646 mutex_lock(&group->mutex); in iommu_detach_group_handle()
3648 xa_erase(&group->pasid_array, IOMMU_NO_PASID); in iommu_detach_group_handle()
3649 mutex_unlock(&group->mutex); in iommu_detach_group_handle()
3654 * iommu_replace_group_handle - replace the domain that a group is attached to
3671 return -EINVAL; in iommu_replace_group_handle()
3673 mutex_lock(&group->mutex); in iommu_replace_group_handle()
3675 ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); in iommu_replace_group_handle()
3678 handle->domain = new_domain; in iommu_replace_group_handle()
3685 curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); in iommu_replace_group_handle()
3688 mutex_unlock(&group->mutex); in iommu_replace_group_handle()
3692 xa_release(&group->pasid_array, IOMMU_NO_PASID); in iommu_replace_group_handle()
3694 mutex_unlock(&group->mutex); in iommu_replace_group_handle()