• Home
  • Raw
  • Download

Lines Matching refs:domain

270 #define for_each_domain_iommu(idx, domain)			\  argument
272 if (domain->iommu_refcnt[idx])
310 static void domain_exit(struct dmar_domain *domain);
311 static void domain_remove_dev_info(struct dmar_domain *domain);
314 static int intel_iommu_attach_device(struct iommu_domain *domain,
316 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
462 struct dmar_domain *domain) in set_iommu_domain() argument
476 domains[did & 0xff] = domain; in set_iommu_domain()
515 static inline int domain_type_is_si(struct dmar_domain *domain) in domain_type_is_si() argument
517 return domain->domain.type == IOMMU_DOMAIN_IDENTITY; in domain_type_is_si()
520 static inline bool domain_use_first_level(struct dmar_domain *domain) in domain_use_first_level() argument
522 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL; in domain_use_first_level()
525 static inline int domain_pfn_supported(struct dmar_domain *domain, in domain_pfn_supported() argument
528 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
589 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
594 if (WARN_ON(!iommu_is_dma_domain(&domain->domain))) in domain_get_iommu()
597 for_each_domain_iommu(iommu_id, domain) in domain_get_iommu()
612 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
619 domain->iommu_coherency = true; in domain_update_iommu_coherency()
621 for_each_domain_iommu(i, domain) { in domain_update_iommu_coherency()
624 domain->iommu_coherency = false; in domain_update_iommu_coherency()
635 domain->iommu_coherency = false; in domain_update_iommu_coherency()
669 static int domain_update_iommu_superpage(struct dmar_domain *domain, in domain_update_iommu_superpage() argument
683 if (domain && domain_use_first_level(domain)) { in domain_update_iommu_superpage()
699 static int domain_update_device_node(struct dmar_domain *domain) in domain_update_device_node() argument
706 if (list_empty(&domain->devices)) in domain_update_device_node()
709 list_for_each_entry(info, &domain->devices, link) { in domain_update_device_node()
727 static void domain_update_iotlb(struct dmar_domain *domain);
730 static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) in domain_super_pgsize_bitmap() argument
738 if (domain->iommu_superpage == 1) in domain_super_pgsize_bitmap()
740 else if (domain->iommu_superpage == 2) in domain_super_pgsize_bitmap()
747 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
749 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
750 domain->iommu_snooping = domain_update_iommu_snooping(NULL); in domain_update_iommu_cap()
751 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL); in domain_update_iommu_cap()
757 if (domain->nid == NUMA_NO_NODE) in domain_update_iommu_cap()
758 domain->nid = domain_update_device_node(domain); in domain_update_iommu_cap()
767 if (domain_use_first_level(domain)) in domain_update_iommu_cap()
768 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in domain_update_iommu_cap()
770 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in domain_update_iommu_cap()
772 domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); in domain_update_iommu_cap()
773 domain_update_iotlb(domain); in domain_update_iommu_cap()
966 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
969 if (!domain->iommu_coherency) in domain_flush_cache()
1016 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
1020 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
1023 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
1025 if (!domain_pfn_supported(domain, pfn)) in pfn_to_dma_pte()
1029 parent = domain->pgd; in pfn_to_dma_pte()
1044 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
1049 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
1051 if (domain_use_first_level(domain)) in pfn_to_dma_pte()
1058 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
1074 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
1079 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
1082 parent = domain->pgd; in dma_pfn_level_pte()
1106 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
1113 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range()
1114 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_clear_range()
1120 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
1131 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
1137 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level() argument
1156 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
1168 domain_flush_cache(domain, pte, sizeof(*pte)); in dma_pte_free_level()
1180 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
1185 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_free_pagetable()
1186 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_free_pagetable()
1189 dma_pte_clear_range(domain, start_pfn, last_pfn); in dma_pte_free_pagetable()
1192 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
1193 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
1196 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1197 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
1198 domain->pgd = NULL; in dma_pte_free_pagetable()
1208 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables() argument
1224 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables()
1232 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level() argument
1255 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
1263 freelist = dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
1273 domain_flush_cache(domain, first_pte, in dma_pte_clear_level()
1282 static struct page *domain_unmap(struct dmar_domain *domain, in domain_unmap() argument
1287 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in domain_unmap()
1288 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in domain_unmap()
1292 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
1293 domain->pgd, 0, start_pfn, last_pfn, in domain_unmap()
1297 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1298 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
1302 domain->pgd = NULL; in domain_unmap()
1477 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1487 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1498 static void domain_update_iotlb(struct dmar_domain *domain) in domain_update_iotlb() argument
1505 list_for_each_entry(info, &domain->devices, link) in domain_update_iotlb()
1514 list_for_each_entry(sinfo, &domain->subdevices, link_domain) { in domain_update_iotlb()
1523 domain->has_iotlb_device = has_iotlb_device; in domain_update_iotlb()
1568 domain_update_iotlb(info->domain); in iommu_enable_dev_iotlb()
1587 domain_update_iotlb(info->domain); in iommu_disable_dev_iotlb()
1615 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1622 if (!domain->has_iotlb_device) in iommu_flush_dev_iotlb()
1626 list_for_each_entry(info, &domain->devices, link) in iommu_flush_dev_iotlb()
1629 list_for_each_entry(sinfo, &domain->subdevices, link_domain) { in iommu_flush_dev_iotlb()
1637 struct dmar_domain *domain, in domain_flush_piotlb() argument
1640 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1642 if (domain->default_pasid) in domain_flush_piotlb()
1643 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1646 if (!list_empty(&domain->devices)) in domain_flush_piotlb()
1651 struct dmar_domain *domain, in iommu_flush_iotlb_psi() argument
1658 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1665 if (domain_use_first_level(domain)) { in iommu_flush_iotlb_psi()
1666 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1707 iommu_flush_dev_iotlb(domain, addr, mask); in iommu_flush_iotlb_psi()
1712 struct dmar_domain *domain, in __mapping_notify_one() argument
1719 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1720 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1725 static void intel_flush_iotlb_all(struct iommu_domain *domain) in intel_flush_iotlb_all() argument
1727 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_flush_iotlb_all()
1870 if (!info->dev || !info->domain) in disable_dmar_iommu()
1936 struct dmar_domain *domain; in alloc_domain() local
1938 domain = alloc_domain_mem(); in alloc_domain()
1939 if (!domain) in alloc_domain()
1942 memset(domain, 0, sizeof(*domain)); in alloc_domain()
1943 domain->nid = NUMA_NO_NODE; in alloc_domain()
1945 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; in alloc_domain()
1946 domain->has_iotlb_device = false; in alloc_domain()
1947 INIT_LIST_HEAD(&domain->devices); in alloc_domain()
1948 INIT_LIST_HEAD(&domain->subdevices); in alloc_domain()
1950 return domain; in alloc_domain()
1954 static int domain_attach_iommu(struct dmar_domain *domain, in domain_attach_iommu() argument
1963 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1964 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1970 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1975 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1977 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1978 domain->nid = iommu->node; in domain_attach_iommu()
1980 domain_update_iommu_cap(domain); in domain_attach_iommu()
1986 static void domain_detach_iommu(struct dmar_domain *domain, in domain_detach_iommu() argument
1994 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1995 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1996 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
2000 domain_update_iommu_cap(domain); in domain_detach_iommu()
2001 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2019 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
2023 domain_remove_dev_info(domain); in domain_exit()
2025 if (domain->pgd) { in domain_exit()
2028 freelist = domain_unmap(domain, 0, in domain_exit()
2029 DOMAIN_MAX_PFN(domain->gaw), NULL); in domain_exit()
2033 free_domain_mem(domain); in domain_exit()
2085 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one() argument
2090 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2099 if (hw_pass_through && domain_type_is_si(domain)) in domain_context_mapping_one()
2105 BUG_ON(!domain->pgd); in domain_context_mapping_one()
2162 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2168 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
2178 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2185 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2237 struct dmar_domain *domain; member
2247 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2253 domain_context_mapping(struct dmar_domain *domain, struct device *dev) in domain_context_mapping() argument
2267 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2270 data.domain = domain; in domain_context_mapping()
2311 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
2319 support = domain->iommu_superpage; in hardware_largepage_caps()
2343 static void switch_to_super_page(struct dmar_domain *domain, in switch_to_super_page() argument
2353 pte = pfn_to_dma_pte(domain, start_pfn, &level); in switch_to_super_page()
2356 dma_pte_free_pagetable(domain, start_pfn, in switch_to_super_page()
2360 for_each_domain_iommu(i, domain) in switch_to_super_page()
2361 iommu_flush_iotlb_psi(g_iommus[i], domain, in switch_to_super_page()
2374 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
2383 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping()
2390 if (domain_use_first_level(domain)) { in __domain_mapping()
2402 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, in __domain_mapping()
2405 pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); in __domain_mapping()
2416 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl); in __domain_mapping()
2461 domain_flush_cache(domain, first_pte, in __domain_mapping()
2488 if (hw_pass_through && domain_type_is_si(info->domain)) in domain_context_clear_one()
2491 did_old = info->domain->iommu_did[iommu->seq_id]; in domain_context_clear_one()
2526 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
2532 list_for_each_entry_safe(info, tmp, &domain->devices, link) in domain_remove_dev_info()
2550 return info->domain; in find_domain()
2569 struct dmar_domain *domain, in domain_setup_first_level() argument
2573 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
2581 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2596 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) in domain_setup_first_level()
2600 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2613 struct dmar_domain *domain) in dmar_insert_one_dev_info() argument
2640 info->domain = domain; in dmar_insert_one_dev_info()
2676 found = info2->domain; in dmar_insert_one_dev_info()
2689 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2698 list_add(&info->link, &domain->devices); in dmar_insert_one_dev_info()
2715 if (hw_pass_through && domain_type_is_si(domain)) in dmar_insert_one_dev_info()
2716 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2718 else if (domain_use_first_level(domain)) in dmar_insert_one_dev_info()
2719 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2722 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2732 if (dev && domain_context_mapping(domain, dev)) { in dmar_insert_one_dev_info()
2738 return domain; in dmar_insert_one_dev_info()
2741 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2749 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2751 return __domain_mapping(domain, first_vpfn, in iommu_domain_identity_map()
2756 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2815 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) in domain_add_dev_info() argument
2825 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2826 if (ndomain != domain) in domain_add_dev_info()
4477 struct dmar_domain *domain; in __dmar_remove_one_dev_info() local
4487 domain = info->domain; in __dmar_remove_one_dev_info()
4502 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
4520 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
4525 domain->gaw = guest_width; in md_domain_init()
4527 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
4529 domain->iommu_coherency = false; in md_domain_init()
4530 domain->iommu_snooping = false; in md_domain_init()
4531 domain->iommu_superpage = 0; in md_domain_init()
4532 domain->max_addr = 0; in md_domain_init()
4535 domain->pgd = alloc_pgtable_page(domain->nid); in md_domain_init()
4536 if (!domain->pgd) in md_domain_init()
4538 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
4545 struct iommu_domain *domain; in intel_iommu_domain_alloc() local
4562 domain = &dmar_domain->domain; in intel_iommu_domain_alloc()
4563 domain->geometry.aperture_start = 0; in intel_iommu_domain_alloc()
4564 domain->geometry.aperture_end = in intel_iommu_domain_alloc()
4566 domain->geometry.force_aperture = true; in intel_iommu_domain_alloc()
4568 return domain; in intel_iommu_domain_alloc()
4570 return &si_domain->domain; in intel_iommu_domain_alloc()
4578 static void intel_iommu_domain_free(struct iommu_domain *domain) in intel_iommu_domain_free() argument
4580 if (domain != &si_domain->domain) in intel_iommu_domain_free()
4581 domain_exit(to_dmar_domain(domain)); in intel_iommu_domain_free()
4589 is_aux_domain(struct device *dev, struct iommu_domain *domain) in is_aux_domain() argument
4594 domain->type == IOMMU_DOMAIN_UNMANAGED; in is_aux_domain()
4598 lookup_subdev_info(struct dmar_domain *domain, struct device *dev) in lookup_subdev_info() argument
4602 if (!list_empty(&domain->subdevices)) { in lookup_subdev_info()
4603 list_for_each_entry(sinfo, &domain->subdevices, link_domain) { in lookup_subdev_info()
4612 static int auxiliary_link_device(struct dmar_domain *domain, in auxiliary_link_device() argument
4616 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); in auxiliary_link_device()
4626 sinfo->domain = domain; in auxiliary_link_device()
4629 list_add(&sinfo->link_domain, &domain->subdevices); in auxiliary_link_device()
4635 static int auxiliary_unlink_device(struct dmar_domain *domain, in auxiliary_unlink_device() argument
4639 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); in auxiliary_unlink_device()
4656 static int aux_domain_add_dev(struct dmar_domain *domain, in aux_domain_add_dev() argument
4667 if (domain->default_pasid <= 0) { in aux_domain_add_dev()
4678 domain->default_pasid = pasid; in aux_domain_add_dev()
4682 ret = auxiliary_link_device(domain, dev); in aux_domain_add_dev()
4700 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
4705 if (domain_use_first_level(domain)) in aux_domain_add_dev()
4706 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
4707 domain->default_pasid); in aux_domain_add_dev()
4709 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
4710 domain->default_pasid); in aux_domain_add_dev()
4721 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
4724 auxiliary_unlink_device(domain, dev); in aux_domain_add_dev()
4727 if (list_empty(&domain->subdevices) && domain->default_pasid > 0) in aux_domain_add_dev()
4728 ioasid_put(domain->default_pasid); in aux_domain_add_dev()
4733 static void aux_domain_remove_dev(struct dmar_domain *domain, in aux_domain_remove_dev() argument
4740 if (!is_aux_domain(dev, &domain->domain)) in aux_domain_remove_dev()
4747 if (!auxiliary_unlink_device(domain, dev)) { in aux_domain_remove_dev()
4750 domain->default_pasid, false); in aux_domain_remove_dev()
4751 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
4757 if (list_empty(&domain->subdevices) && domain->default_pasid > 0) in aux_domain_remove_dev()
4758 ioasid_put(domain->default_pasid); in aux_domain_remove_dev()
4761 static int prepare_domain_attach_device(struct iommu_domain *domain, in prepare_domain_attach_device() argument
4764 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in prepare_domain_attach_device()
4809 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
4814 if (domain->type == IOMMU_DOMAIN_UNMANAGED && in intel_iommu_attach_device()
4820 if (is_aux_domain(dev, domain)) in intel_iommu_attach_device()
4832 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_attach_device()
4836 return domain_add_dev_info(to_dmar_domain(domain), dev); in intel_iommu_attach_device()
4839 static int intel_iommu_aux_attach_device(struct iommu_domain *domain, in intel_iommu_aux_attach_device() argument
4844 if (!is_aux_domain(dev, domain)) in intel_iommu_aux_attach_device()
4847 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_aux_attach_device()
4851 return aux_domain_add_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_attach_device()
4854 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
4860 static void intel_iommu_aux_detach_device(struct iommu_domain *domain, in intel_iommu_aux_detach_device() argument
4863 aux_domain_remove_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_detach_device()
4914 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, in intel_iommu_sva_invalidate() argument
4917 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_sva_invalidate()
5045 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
5049 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_map()
5081 static int intel_iommu_map_pages(struct iommu_domain *domain, in intel_iommu_map_pages() argument
5096 ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp); in intel_iommu_map_pages()
5103 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
5107 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_unmap()
5132 iommu_iotlb_gather_add_page(domain, gather, iova, size); in intel_iommu_unmap()
5137 static size_t intel_iommu_unmap_pages(struct iommu_domain *domain, in intel_iommu_unmap_pages() argument
5145 return intel_iommu_unmap(domain, iova, size, gather); in intel_iommu_unmap_pages()
5148 static void intel_iommu_tlb_sync(struct iommu_domain *domain, in intel_iommu_tlb_sync() argument
5151 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_tlb_sync()
5168 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
5171 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iova_to_phys()
5289 struct dmar_domain *domain; in intel_iommu_enable_pasid() local
5294 domain = find_domain(dev); in intel_iommu_enable_pasid()
5295 if (!domain) in intel_iommu_enable_pasid()
5317 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5539 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in intel_iommu_aux_get_pasid() argument
5541 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_aux_get_pasid()
5547 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, in intel_iommu_is_attach_deferred() argument
5554 intel_iommu_enable_nesting(struct iommu_domain *domain) in intel_iommu_enable_nesting() argument
5556 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_enable_nesting()
5588 static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, in intel_iommu_iotlb_sync_map() argument
5591 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iotlb_sync_map()