Lines Matching refs:domain
374 struct dmar_domain *domain; /* pointer to domain */ member
408 struct dmar_domain *domain[HIGH_WATER_MARK]; member
423 static void domain_exit(struct dmar_domain *domain);
424 static void domain_remove_dev_info(struct dmar_domain *domain);
425 static void domain_remove_one_dev_info(struct dmar_domain *domain,
429 static int domain_detach_iommu(struct dmar_domain *domain,
542 static inline int domain_type_is_vm(struct dmar_domain *domain) in domain_type_is_vm() argument
544 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; in domain_type_is_vm()
547 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) in domain_type_is_vm_or_si() argument
549 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | in domain_type_is_vm_or_si()
553 static inline int domain_pfn_supported(struct dmar_domain *domain, in domain_pfn_supported() argument
556 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
595 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
600 BUG_ON(domain_type_is_vm_or_si(domain)); in domain_get_iommu()
601 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus); in domain_get_iommu()
608 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
614 domain->iommu_coherency = 1; in domain_update_iommu_coherency()
616 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { in domain_update_iommu_coherency()
619 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
630 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
682 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
684 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
685 domain->iommu_snooping = domain_update_iommu_snooping(NULL); in domain_update_iommu_cap()
686 domain->iommu_superpage = domain_update_iommu_superpage(NULL); in domain_update_iommu_cap()
754 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
757 if (!domain->iommu_coherency) in domain_flush_cache()
850 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
854 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
857 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
859 if (!domain_pfn_supported(domain, pfn)) in pfn_to_dma_pte()
863 parent = domain->pgd; in pfn_to_dma_pte()
878 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
883 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
889 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
906 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
911 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
914 parent = domain->pgd; in dma_pfn_level_pte()
938 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
945 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range()
946 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_clear_range()
952 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
963 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
969 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level() argument
987 dma_pte_free_level(domain, level - 1, level_pte, in dma_pte_free_level()
994 domain_flush_cache(domain, pte, sizeof(*pte)); in dma_pte_free_level()
1003 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
1007 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_free_pagetable()
1008 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_free_pagetable()
1011 dma_pte_clear_range(domain, start_pfn, last_pfn); in dma_pte_free_pagetable()
1014 dma_pte_free_level(domain, agaw_to_level(domain->agaw), in dma_pte_free_pagetable()
1015 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
1018 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1019 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
1020 domain->pgd = NULL; in dma_pte_free_pagetable()
1030 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables() argument
1046 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables()
1054 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level() argument
1079 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
1087 freelist = dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
1097 domain_flush_cache(domain, first_pte, in dma_pte_clear_level()
1106 struct page *domain_unmap(struct dmar_domain *domain, in domain_unmap() argument
1112 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in domain_unmap()
1113 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in domain_unmap()
1117 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
1118 domain->pgd, 0, start_pfn, last_pfn, NULL); in domain_unmap()
1121 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1122 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
1126 domain->pgd = NULL; in domain_unmap()
1291 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1306 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1345 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1353 list_for_each_entry(info, &domain->devices, link) { in iommu_flush_dev_iotlb()
1492 struct dmar_domain *domain; in free_dmar_iommu() local
1504 domain = iommu->domains[i]; in free_dmar_iommu()
1506 if (domain_detach_iommu(domain, iommu) == 0 && in free_dmar_iommu()
1507 !domain_type_is_vm(domain)) in free_dmar_iommu()
1508 domain_exit(domain); in free_dmar_iommu()
1530 struct dmar_domain *domain; in alloc_domain() local
1532 domain = alloc_domain_mem(); in alloc_domain()
1533 if (!domain) in alloc_domain()
1536 memset(domain, 0, sizeof(*domain)); in alloc_domain()
1537 domain->nid = -1; in alloc_domain()
1538 domain->flags = flags; in alloc_domain()
1539 spin_lock_init(&domain->iommu_lock); in alloc_domain()
1540 INIT_LIST_HEAD(&domain->devices); in alloc_domain()
1542 domain->id = atomic_inc_return(&vm_domid); in alloc_domain()
1544 return domain; in alloc_domain()
1547 static int __iommu_attach_domain(struct dmar_domain *domain, in __iommu_attach_domain() argument
1557 iommu->domains[num] = domain; in __iommu_attach_domain()
1565 static int iommu_attach_domain(struct dmar_domain *domain, in iommu_attach_domain() argument
1572 num = __iommu_attach_domain(domain, iommu); in iommu_attach_domain()
1580 static int iommu_attach_vm_domain(struct dmar_domain *domain, in iommu_attach_vm_domain() argument
1588 if (iommu->domains[num] == domain) in iommu_attach_vm_domain()
1591 return __iommu_attach_domain(domain, iommu); in iommu_attach_vm_domain()
1594 static void iommu_detach_domain(struct dmar_domain *domain, in iommu_detach_domain() argument
1601 if (domain_type_is_vm_or_si(domain)) { in iommu_detach_domain()
1604 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1611 clear_bit(domain->id, iommu->domain_ids); in iommu_detach_domain()
1612 iommu->domains[domain->id] = NULL; in iommu_detach_domain()
1617 static void domain_attach_iommu(struct dmar_domain *domain, in domain_attach_iommu() argument
1622 spin_lock_irqsave(&domain->iommu_lock, flags); in domain_attach_iommu()
1623 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_attach_iommu()
1624 domain->iommu_count++; in domain_attach_iommu()
1625 if (domain->iommu_count == 1) in domain_attach_iommu()
1626 domain->nid = iommu->node; in domain_attach_iommu()
1627 domain_update_iommu_cap(domain); in domain_attach_iommu()
1629 spin_unlock_irqrestore(&domain->iommu_lock, flags); in domain_attach_iommu()
1632 static int domain_detach_iommu(struct dmar_domain *domain, in domain_detach_iommu() argument
1638 spin_lock_irqsave(&domain->iommu_lock, flags); in domain_detach_iommu()
1639 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_detach_iommu()
1640 count = --domain->iommu_count; in domain_detach_iommu()
1641 domain_update_iommu_cap(domain); in domain_detach_iommu()
1643 spin_unlock_irqrestore(&domain->iommu_lock, flags); in domain_detach_iommu()
1690 static void domain_reserve_special_ranges(struct dmar_domain *domain) in domain_reserve_special_ranges() argument
1692 copy_reserved_iova(&reserved_iova_list, &domain->iovad); in domain_reserve_special_ranges()
1709 static int domain_init(struct dmar_domain *domain, int guest_width) in domain_init() argument
1715 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); in domain_init()
1716 domain_reserve_special_ranges(domain); in domain_init()
1719 iommu = domain_get_iommu(domain); in domain_init()
1722 domain->gaw = guest_width; in domain_init()
1733 domain->agaw = agaw; in domain_init()
1736 domain->iommu_coherency = 1; in domain_init()
1738 domain->iommu_coherency = 0; in domain_init()
1741 domain->iommu_snooping = 1; in domain_init()
1743 domain->iommu_snooping = 0; in domain_init()
1746 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1748 domain->iommu_superpage = 0; in domain_init()
1750 domain->nid = iommu->node; in domain_init()
1753 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in domain_init()
1754 if (!domain->pgd) in domain_init()
1756 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1760 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
1766 if (!domain) in domain_exit()
1774 domain_remove_dev_info(domain); in domain_exit()
1777 put_iova_domain(&domain->iovad); in domain_exit()
1779 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
1783 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) in domain_exit()
1784 iommu_detach_domain(domain, g_iommus[i]); in domain_exit()
1789 free_domain_mem(domain); in domain_exit()
1792 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one() argument
1806 BUG_ON(!domain->pgd); in domain_context_mapping_one()
1819 id = domain->id; in domain_context_mapping_one()
1820 pgd = domain->pgd; in domain_context_mapping_one()
1822 if (domain_type_is_vm_or_si(domain)) { in domain_context_mapping_one()
1823 if (domain_type_is_vm(domain)) { in domain_context_mapping_one()
1824 id = iommu_attach_vm_domain(domain, iommu); in domain_context_mapping_one()
1837 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1850 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
1868 domain_flush_cache(domain, context, sizeof(*context)); in domain_context_mapping_one()
1888 domain_attach_iommu(domain, iommu); in domain_context_mapping_one()
1894 struct dmar_domain *domain; member
1904 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
1910 domain_context_mapping(struct dmar_domain *domain, struct device *dev, in domain_context_mapping() argument
1922 return domain_context_mapping_one(domain, iommu, bus, devfn, in domain_context_mapping()
1925 data.domain = domain; in domain_context_mapping()
1966 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
1974 support = domain->iommu_superpage; in hardware_largepage_caps()
1993 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
2003 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping()
2029 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); in __domain_mapping()
2031 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); in __domain_mapping()
2043 dma_pte_free_pagetable(domain, iov_pfn, in __domain_mapping()
2090 domain_flush_cache(domain, first_pte, in __domain_mapping()
2101 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_sg_mapping() argument
2105 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping()
2108 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_pfn_mapping() argument
2112 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); in domain_pfn_mapping()
2135 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
2141 list_for_each_entry_safe(info, tmp, &domain->devices, link) { in domain_remove_dev_info()
2148 if (domain_type_is_vm(domain)) { in domain_remove_dev_info()
2150 domain_detach_iommu(domain, info->iommu); in domain_remove_dev_info()
2170 return info->domain; in find_domain()
2190 struct dmar_domain *domain) in dmar_insert_dev_info() argument
2203 info->domain = domain; in dmar_insert_dev_info()
2213 found = info2->domain; in dmar_insert_dev_info()
2222 list_add(&info->link, &domain->devices); in dmar_insert_dev_info()
2228 return domain; in dmar_insert_dev_info()
2240 struct dmar_domain *domain, *tmp; in get_domain_for_dev() local
2247 domain = find_domain(dev); in get_domain_for_dev()
2248 if (domain) in get_domain_for_dev()
2249 return domain; in get_domain_for_dev()
2266 domain = info->domain; in get_domain_for_dev()
2276 domain = alloc_domain(0); in get_domain_for_dev()
2277 if (!domain) in get_domain_for_dev()
2279 domain->id = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2280 if (domain->id < 0) { in get_domain_for_dev()
2281 free_domain_mem(domain); in get_domain_for_dev()
2284 domain_attach_iommu(domain, iommu); in get_domain_for_dev()
2285 if (domain_init(domain, gaw)) { in get_domain_for_dev()
2286 domain_exit(domain); in get_domain_for_dev()
2293 dma_alias & 0xff, NULL, domain); in get_domain_for_dev()
2295 if (!tmp || tmp != domain) { in get_domain_for_dev()
2296 domain_exit(domain); in get_domain_for_dev()
2297 domain = tmp; in get_domain_for_dev()
2300 if (!domain) in get_domain_for_dev()
2305 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in get_domain_for_dev()
2307 if (!tmp || tmp != domain) { in get_domain_for_dev()
2308 domain_exit(domain); in get_domain_for_dev()
2309 domain = tmp; in get_domain_for_dev()
2312 return domain; in get_domain_for_dev()
2320 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2327 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), in iommu_domain_identity_map()
2334 start, end, domain->id); in iommu_domain_identity_map()
2339 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2341 return domain_pfn_mapping(domain, first_vpfn, first_vpfn, in iommu_domain_identity_map()
2350 struct dmar_domain *domain; in iommu_prepare_identity_map() local
2353 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_prepare_identity_map()
2354 if (!domain) in iommu_prepare_identity_map()
2361 if (domain == si_domain && hw_pass_through) { in iommu_prepare_identity_map()
2381 if (end >> agaw_to_width(domain->agaw)) { in iommu_prepare_identity_map()
2384 agaw_to_width(domain->agaw), in iommu_prepare_identity_map()
2392 ret = iommu_domain_identity_map(domain, start, end); in iommu_prepare_identity_map()
2397 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); in iommu_prepare_identity_map()
2404 domain_exit(domain); in iommu_prepare_identity_map()
2443 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2506 return (info->domain == si_domain); in identity_mapping()
2511 static int domain_add_dev_info(struct dmar_domain *domain, in domain_add_dev_info() argument
2523 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2524 if (ndomain != domain) in domain_add_dev_info()
2527 ret = domain_context_mapping(domain, dev, translation); in domain_add_dev_info()
2529 domain_remove_one_dev_info(domain, dev); in domain_add_dev_info()
2932 struct dmar_domain *domain, in intel_alloc_iova() argument
2938 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); in intel_alloc_iova()
2946 iova = alloc_iova(&domain->iovad, nrpages, in intel_alloc_iova()
2951 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); in intel_alloc_iova()
2963 struct dmar_domain *domain; in __get_valid_domain_for_dev() local
2966 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in __get_valid_domain_for_dev()
2967 if (!domain) { in __get_valid_domain_for_dev()
2975 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); in __get_valid_domain_for_dev()
2983 return domain; in __get_valid_domain_for_dev()
2993 return info->domain; in get_valid_domain_for_dev()
3053 struct dmar_domain *domain; in __intel_map_single() local
3066 domain = get_valid_domain_for_dev(dev); in __intel_map_single()
3067 if (!domain) in __intel_map_single()
3070 iommu = domain_get_iommu(domain); in __intel_map_single()
3073 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); in __intel_map_single()
3092 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), in __intel_map_single()
3099 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); in __intel_map_single()
3109 __free_iova(&domain->iovad, iova); in __intel_map_single()
3146 struct dmar_domain *domain = deferred_flush[i].domain[j]; in flush_unmaps() local
3150 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
3155 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], in flush_unmaps()
3158 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); in flush_unmaps()
3191 deferred_flush[iommu_id].domain[next] = dom; in add_unmap()
3206 struct dmar_domain *domain; in intel_unmap() local
3215 domain = find_domain(dev); in intel_unmap()
3216 BUG_ON(!domain); in intel_unmap()
3218 iommu = domain_get_iommu(domain); in intel_unmap()
3220 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); in intel_unmap()
3231 freelist = domain_unmap(domain, start_pfn, last_pfn); in intel_unmap()
3234 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap()
3237 __free_iova(&domain->iovad, iova); in intel_unmap()
3240 add_unmap(domain, iova, freelist); in intel_unmap()
3341 struct dmar_domain *domain; in intel_map_sg() local
3354 domain = get_valid_domain_for_dev(dev); in intel_map_sg()
3355 if (!domain) in intel_map_sg()
3358 iommu = domain_get_iommu(domain); in intel_map_sg()
3363 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), in intel_map_sg()
3382 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); in intel_map_sg()
3384 dma_pte_free_pagetable(domain, start_vpfn, in intel_map_sg()
3386 __free_iova(&domain->iovad, iova); in intel_map_sg()
3392 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); in intel_map_sg()
3880 struct dmar_domain *domain; in device_notifier() local
3896 domain = find_domain(dev); in device_notifier()
3897 if (!domain) in device_notifier()
3901 domain_remove_one_dev_info(domain, dev); in device_notifier()
3902 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) in device_notifier()
3903 domain_exit(domain); in device_notifier()
4155 static void domain_remove_one_dev_info(struct dmar_domain *domain, in domain_remove_one_dev_info() argument
4169 list_for_each_entry_safe(info, tmp, &domain->devices, link) { in domain_remove_one_dev_info()
4199 domain_detach_iommu(domain, iommu); in domain_remove_one_dev_info()
4200 if (!domain_type_is_vm_or_si(domain)) in domain_remove_one_dev_info()
4201 iommu_detach_domain(domain, iommu); in domain_remove_one_dev_info()
4205 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
4209 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); in md_domain_init()
4210 domain_reserve_special_ranges(domain); in md_domain_init()
4213 domain->gaw = guest_width; in md_domain_init()
4215 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
4217 domain->iommu_coherency = 0; in md_domain_init()
4218 domain->iommu_snooping = 0; in md_domain_init()
4219 domain->iommu_superpage = 0; in md_domain_init()
4220 domain->max_addr = 0; in md_domain_init()
4223 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in md_domain_init()
4224 if (!domain->pgd) in md_domain_init()
4226 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
4230 static int intel_iommu_domain_init(struct iommu_domain *domain) in intel_iommu_domain_init() argument
4247 domain->priv = dmar_domain; in intel_iommu_domain_init()
4249 domain->geometry.aperture_start = 0; in intel_iommu_domain_init()
4250 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); in intel_iommu_domain_init()
4251 domain->geometry.force_aperture = true; in intel_iommu_domain_init()
4256 static void intel_iommu_domain_destroy(struct iommu_domain *domain) in intel_iommu_domain_destroy() argument
4258 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_domain_destroy()
4260 domain->priv = NULL; in intel_iommu_domain_destroy()
4264 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
4267 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_attach_device()
4329 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
4332 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_detach_device()
4337 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
4341 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_map()
4375 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
4378 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_unmap()
4423 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
4426 struct dmar_domain *dmar_domain = domain->priv; in intel_iommu_iova_to_phys()