Lines Matching refs:domain
48 struct iommu_domain *domain; member
80 static int __iommu_attach_device(struct iommu_domain *domain,
82 static int __iommu_attach_group(struct iommu_domain *domain,
84 static void __iommu_detach_group(struct iommu_domain *domain,
791 struct iommu_domain *domain = group->default_domain; in iommu_create_device_direct_mappings() local
797 if (!domain || !iommu_is_dma_domain(domain)) in iommu_create_device_direct_mappings()
800 BUG_ON(!domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
802 pg_size = 1UL << __ffs(domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
812 if (domain->ops->apply_resv_region) in iommu_create_device_direct_mappings()
813 domain->ops->apply_resv_region(dev, domain, entry); in iommu_create_device_direct_mappings()
828 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_create_device_direct_mappings()
836 ret = iommu_map(domain, addr - map_size, in iommu_create_device_direct_mappings()
847 iommu_flush_iotlb_all(domain); in iommu_create_device_direct_mappings()
855 static bool iommu_is_attach_deferred(struct iommu_domain *domain, in iommu_is_attach_deferred() argument
858 if (domain->ops->is_attach_deferred) in iommu_is_attach_deferred()
859 return domain->ops->is_attach_deferred(domain, dev); in iommu_is_attach_deferred()
916 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) in iommu_group_add_device()
917 ret = __iommu_attach_device(group->domain, dev); in iommu_group_add_device()
1279 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); in iommu_page_response() local
1281 if (!domain || !domain->ops->page_response) in iommu_page_response()
1322 ret = domain->ops->page_response(dev, evt, msg); in iommu_page_response()
1576 if (!group->domain) in iommu_group_alloc_default_domain()
1577 group->domain = dom; in iommu_group_alloc_default_domain()
1770 struct iommu_domain *domain = data; in iommu_group_do_dma_attach() local
1773 if (!iommu_is_attach_deferred(domain, dev)) in iommu_group_do_dma_attach()
1774 ret = __iommu_attach_device(domain, dev); in iommu_group_do_dma_attach()
1787 struct iommu_domain *domain = data; in iommu_group_do_probe_finalize() local
1789 if (domain->ops->probe_finalize) in iommu_group_do_probe_finalize()
1790 domain->ops->probe_finalize(dev); in iommu_group_do_probe_finalize()
1956 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
1960 BUG_ON(!domain); in iommu_set_fault_handler()
1962 domain->handler = handler; in iommu_set_fault_handler()
1963 domain->handler_token = token; in iommu_set_fault_handler()
1970 struct iommu_domain *domain; in __iommu_domain_alloc() local
1975 domain = bus->iommu_ops->domain_alloc(type); in __iommu_domain_alloc()
1976 if (!domain) in __iommu_domain_alloc()
1979 domain->ops = bus->iommu_ops; in __iommu_domain_alloc()
1980 domain->type = type; in __iommu_domain_alloc()
1982 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; in __iommu_domain_alloc()
1985 if (iommu_is_dma_domain(domain) && !domain->iova_cookie && iommu_get_dma_cookie(domain)) { in __iommu_domain_alloc()
1986 iommu_domain_free(domain); in __iommu_domain_alloc()
1987 domain = NULL; in __iommu_domain_alloc()
1989 return domain; in __iommu_domain_alloc()
1998 void iommu_domain_free(struct iommu_domain *domain) in iommu_domain_free() argument
2000 iommu_put_dma_cookie(domain); in iommu_domain_free()
2001 domain->ops->domain_free(domain); in iommu_domain_free()
2005 static int __iommu_attach_device(struct iommu_domain *domain, in __iommu_attach_device() argument
2010 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
2013 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
2019 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_attach_device() argument
2037 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2047 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) in iommu_deferred_attach() argument
2049 const struct iommu_ops *ops = domain->ops; in iommu_deferred_attach()
2051 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) in iommu_deferred_attach()
2052 return __iommu_attach_device(domain, dev); in iommu_deferred_attach()
2113 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, in iommu_uapi_cache_invalidate() argument
2120 if (unlikely(!domain->ops->cache_invalidate)) in iommu_uapi_cache_invalidate()
2161 return domain->ops->cache_invalidate(domain, dev, &inv_info); in iommu_uapi_cache_invalidate()
2223 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_bind_gpasid() argument
2229 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_bind_gpasid()
2236 return domain->ops->sva_bind_gpasid(domain, dev, &data); in iommu_uapi_sva_bind_gpasid()
2240 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_sva_unbind_gpasid() argument
2243 if (unlikely(!domain->ops->sva_unbind_gpasid)) in iommu_sva_unbind_gpasid()
2246 return domain->ops->sva_unbind_gpasid(dev, pasid); in iommu_sva_unbind_gpasid()
2250 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_unbind_gpasid() argument
2256 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_unbind_gpasid()
2263 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); in iommu_uapi_sva_unbind_gpasid()
2267 static void __iommu_detach_device(struct iommu_domain *domain, in __iommu_detach_device() argument
2270 if (iommu_is_attach_deferred(domain, dev)) in __iommu_detach_device()
2273 if (unlikely(domain->ops->detach_dev == NULL)) in __iommu_detach_device()
2276 domain->ops->detach_dev(domain, dev); in __iommu_detach_device()
2280 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_detach_device() argument
2294 __iommu_detach_group(domain, group); in iommu_detach_device()
2304 struct iommu_domain *domain; in iommu_get_domain_for_dev() local
2311 domain = group->domain; in iommu_get_domain_for_dev()
2315 return domain; in iommu_get_domain_for_dev()
2340 struct iommu_domain *domain = data; in iommu_group_do_attach_device() local
2342 return __iommu_attach_device(domain, dev); in iommu_group_do_attach_device()
2345 static int __iommu_attach_group(struct iommu_domain *domain, in __iommu_attach_group() argument
2350 if (group->default_domain && group->domain != group->default_domain) in __iommu_attach_group()
2353 ret = __iommu_group_for_each_dev(group, domain, in __iommu_attach_group()
2356 group->domain = domain; in __iommu_attach_group()
2361 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2366 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2375 struct iommu_domain *domain = data; in iommu_group_do_detach_device() local
2377 __iommu_detach_device(domain, dev); in iommu_group_do_detach_device()
2382 static void __iommu_detach_group(struct iommu_domain *domain, in __iommu_detach_group() argument
2388 __iommu_group_for_each_dev(group, domain, in __iommu_detach_group()
2390 group->domain = NULL; in __iommu_detach_group()
2394 if (group->domain == group->default_domain) in __iommu_detach_group()
2403 group->domain = group->default_domain; in __iommu_detach_group()
2406 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2409 __iommu_detach_group(domain, group); in iommu_detach_group()
2414 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
2416 if (domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_iova_to_phys()
2419 if (domain->type == IOMMU_DOMAIN_BLOCKED) in iommu_iova_to_phys()
2422 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2426 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, in iommu_pgsize() argument
2435 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2451 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2480 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, in __iommu_map_pages() argument
2484 const struct iommu_ops *ops = domain->ops; in __iommu_map_pages()
2488 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map_pages()
2494 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in __iommu_map_pages()
2497 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); in __iommu_map_pages()
2504 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, in __iommu_map() argument
2507 const struct iommu_ops *ops = domain->ops; in __iommu_map()
2515 domain->pgsize_bitmap == 0UL)) in __iommu_map()
2518 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_map()
2522 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_map()
2540 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, in __iommu_map()
2557 iommu_unmap(domain, orig_iova, orig_size - size); in __iommu_map()
2564 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, in _iommu_map() argument
2567 const struct iommu_ops *ops = domain->ops; in _iommu_map()
2570 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); in _iommu_map()
2572 ops->iotlb_sync_map(domain, iova, size); in _iommu_map()
2577 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
2581 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); in iommu_map()
2585 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_atomic() argument
2588 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); in iommu_map_atomic()
2592 static size_t __iommu_unmap_pages(struct iommu_domain *domain, in __iommu_unmap_pages() argument
2596 const struct iommu_ops *ops = domain->ops; in __iommu_unmap_pages()
2599 pgsize = iommu_pgsize(domain, iova, iova, size, &count); in __iommu_unmap_pages()
2601 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : in __iommu_unmap_pages()
2602 ops->unmap(domain, iova, pgsize, iotlb_gather); in __iommu_unmap_pages()
2605 static size_t __iommu_unmap(struct iommu_domain *domain, in __iommu_unmap() argument
2609 const struct iommu_ops *ops = domain->ops; in __iommu_unmap()
2615 domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2618 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2622 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2642 unmapped_page = __iommu_unmap_pages(domain, iova, in __iommu_unmap()
2659 size_t iommu_unmap(struct iommu_domain *domain, in iommu_unmap() argument
2666 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); in iommu_unmap()
2667 iommu_iotlb_sync(domain, &iotlb_gather); in iommu_unmap()
2673 size_t iommu_unmap_fast(struct iommu_domain *domain, in iommu_unmap_fast() argument
2677 return __iommu_unmap(domain, iova, size, iotlb_gather); in iommu_unmap_fast()
2681 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in __iommu_map_sg() argument
2685 const struct iommu_ops *ops = domain->ops; in __iommu_map_sg()
2692 ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped); in __iommu_map_sg()
2695 ops->iotlb_sync_map(domain, iova, mapped); in __iommu_map_sg()
2707 ret = __iommu_map(domain, iova + mapped, start, in __iommu_map_sg()
2729 ops->iotlb_sync_map(domain, iova, mapped); in __iommu_map_sg()
2734 iommu_unmap(domain, iova, mapped); in __iommu_map_sg()
2739 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg() argument
2743 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); in iommu_map_sg()
2747 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg_atomic() argument
2750 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); in iommu_map_sg_atomic()
2777 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, in report_iommu_fault() argument
2786 if (domain->handler) in report_iommu_fault()
2787 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2788 domain->handler_token); in report_iommu_fault()
2807 int iommu_enable_nesting(struct iommu_domain *domain) in iommu_enable_nesting() argument
2809 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_enable_nesting()
2811 if (!domain->ops->enable_nesting) in iommu_enable_nesting()
2813 return domain->ops->enable_nesting(domain); in iommu_enable_nesting()
2817 int iommu_set_pgtable_quirks(struct iommu_domain *domain, in iommu_set_pgtable_quirks() argument
2820 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_set_pgtable_quirks()
2822 if (!domain->ops->set_pgtable_quirks) in iommu_set_pgtable_quirks()
2824 return domain->ops->set_pgtable_quirks(domain, quirk); in iommu_set_pgtable_quirks()
3036 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_attach_device() argument
3040 if (domain->ops->aux_attach_dev) in iommu_aux_attach_device()
3041 ret = domain->ops->aux_attach_dev(domain, dev); in iommu_aux_attach_device()
3050 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_detach_device() argument
3052 if (domain->ops->aux_detach_dev) { in iommu_aux_detach_device()
3053 domain->ops->aux_detach_dev(domain, dev); in iommu_aux_detach_device()
3059 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in iommu_aux_get_pasid() argument
3063 if (domain->ops->aux_get_pasid) in iommu_aux_get_pasid()
3064 ret = domain->ops->aux_get_pasid(domain, dev); in iommu_aux_get_pasid()
3186 if (group->default_domain != group->domain) { in iommu_change_dev_def_domain()
3272 group->domain = group->default_domain; in iommu_change_dev_def_domain()
3290 group->domain = prev_dom; in iommu_change_dev_def_domain()