Lines Matching refs:domain
310 #define for_each_domain_iommu(idx, domain) \ argument
312 if (domain->iommu_refcnt[idx])
340 static void domain_exit(struct dmar_domain *domain);
341 static void domain_remove_dev_info(struct dmar_domain *domain);
346 static int domain_detach_iommu(struct dmar_domain *domain,
349 static int intel_iommu_attach_device(struct iommu_domain *domain,
351 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
434 return container_of(dom, struct dmar_domain, domain); in to_dmar_domain()
497 struct dmar_domain *domain) in set_iommu_domain() argument
511 domains[did & 0xff] = domain; in set_iommu_domain()
550 static inline int domain_type_is_si(struct dmar_domain *domain) in domain_type_is_si() argument
552 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; in domain_type_is_si()
555 static inline int domain_pfn_supported(struct dmar_domain *domain, in domain_pfn_supported() argument
558 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
597 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
602 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA)) in domain_get_iommu()
605 for_each_domain_iommu(iommu_id, domain) in domain_get_iommu()
614 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
621 domain->iommu_coherency = 1; in domain_update_iommu_coherency()
623 for_each_domain_iommu(i, domain) { in domain_update_iommu_coherency()
626 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
637 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
689 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
691 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
692 domain->iommu_snooping = domain_update_iommu_snooping(NULL); in domain_update_iommu_cap()
693 domain->iommu_superpage = domain_update_iommu_superpage(NULL); in domain_update_iommu_cap()
831 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
834 if (!domain->iommu_coherency) in domain_flush_cache()
881 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
885 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
888 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
890 if (!domain_pfn_supported(domain, pfn)) in pfn_to_dma_pte()
894 parent = domain->pgd; in pfn_to_dma_pte()
909 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
914 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
920 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
936 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
941 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
944 parent = domain->pgd; in dma_pfn_level_pte()
968 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
975 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range()
976 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_clear_range()
982 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
993 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
999 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level() argument
1018 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
1030 domain_flush_cache(domain, pte, sizeof(*pte)); in dma_pte_free_level()
1042 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
1047 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_free_pagetable()
1048 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_free_pagetable()
1051 dma_pte_clear_range(domain, start_pfn, last_pfn); in dma_pte_free_pagetable()
1054 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
1055 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
1058 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1059 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
1060 domain->pgd = NULL; in dma_pte_free_pagetable()
1070 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables() argument
1086 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables()
1094 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level() argument
1119 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
1127 freelist = dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
1137 domain_flush_cache(domain, first_pte, in dma_pte_clear_level()
1146 static struct page *domain_unmap(struct dmar_domain *domain, in domain_unmap() argument
1152 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in domain_unmap()
1153 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in domain_unmap()
1157 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
1158 domain->pgd, 0, start_pfn, last_pfn, NULL); in domain_unmap()
1161 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1162 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
1166 domain->pgd = NULL; in domain_unmap()
1343 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1353 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1364 static void domain_update_iotlb(struct dmar_domain *domain) in domain_update_iotlb() argument
1371 list_for_each_entry(info, &domain->devices, link) { in domain_update_iotlb()
1384 domain->has_iotlb_device = has_iotlb_device; in domain_update_iotlb()
1430 domain_update_iotlb(info->domain); in iommu_enable_dev_iotlb()
1449 domain_update_iotlb(info->domain); in iommu_disable_dev_iotlb()
1463 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1470 if (!domain->has_iotlb_device) in iommu_flush_dev_iotlb()
1474 list_for_each_entry(info, &domain->devices, link) { in iommu_flush_dev_iotlb()
1487 struct dmar_domain *domain, in iommu_flush_iotlb_psi() argument
1493 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1517 iommu_flush_dev_iotlb(domain, addr, mask); in iommu_flush_iotlb_psi()
1522 struct dmar_domain *domain, in __mapping_notify_one() argument
1527 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1534 struct dmar_domain *domain; in iommu_flush_iova() local
1537 domain = container_of(iovad, struct dmar_domain, iovad); in iommu_flush_iova()
1539 for_each_domain_iommu(idx, domain) { in iommu_flush_iova()
1541 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iova()
1674 if (!info->dev || !info->domain) in disable_dmar_iommu()
1714 struct dmar_domain *domain; in alloc_domain() local
1716 domain = alloc_domain_mem(); in alloc_domain()
1717 if (!domain) in alloc_domain()
1720 memset(domain, 0, sizeof(*domain)); in alloc_domain()
1721 domain->nid = NUMA_NO_NODE; in alloc_domain()
1722 domain->flags = flags; in alloc_domain()
1723 domain->has_iotlb_device = false; in alloc_domain()
1724 INIT_LIST_HEAD(&domain->devices); in alloc_domain()
1726 return domain; in alloc_domain()
1730 static int domain_attach_iommu(struct dmar_domain *domain, in domain_attach_iommu() argument
1739 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1740 domain->iommu_count += 1; in domain_attach_iommu()
1741 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1747 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1748 domain->iommu_count -= 1; in domain_attach_iommu()
1753 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1755 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1756 domain->nid = iommu->node; in domain_attach_iommu()
1758 domain_update_iommu_cap(domain); in domain_attach_iommu()
1764 static int domain_detach_iommu(struct dmar_domain *domain, in domain_detach_iommu() argument
1772 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1773 count = --domain->iommu_count; in domain_detach_iommu()
1774 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1775 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
1779 domain_update_iommu_cap(domain); in domain_detach_iommu()
1780 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
1828 static void domain_reserve_special_ranges(struct dmar_domain *domain) in domain_reserve_special_ranges() argument
1830 copy_reserved_iova(&reserved_iova_list, &domain->iovad); in domain_reserve_special_ranges()
1847 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, in domain_init() argument
1854 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); in domain_init()
1856 err = init_iova_flush_queue(&domain->iovad, in domain_init()
1861 domain_reserve_special_ranges(domain); in domain_init()
1866 domain->gaw = guest_width; in domain_init()
1877 domain->agaw = agaw; in domain_init()
1880 domain->iommu_coherency = 1; in domain_init()
1882 domain->iommu_coherency = 0; in domain_init()
1885 domain->iommu_snooping = 1; in domain_init()
1887 domain->iommu_snooping = 0; in domain_init()
1890 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1892 domain->iommu_superpage = 0; in domain_init()
1894 domain->nid = iommu->node; in domain_init()
1897 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in domain_init()
1898 if (!domain->pgd) in domain_init()
1900 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1904 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
1908 domain_remove_dev_info(domain); in domain_exit()
1911 put_iova_domain(&domain->iovad); in domain_exit()
1913 if (domain->pgd) { in domain_exit()
1916 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
1920 free_domain_mem(domain); in domain_exit()
1973 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one() argument
1978 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
1987 if (hw_pass_through && domain_type_is_si(domain)) in domain_context_mapping_one()
1993 BUG_ON(!domain->pgd); in domain_context_mapping_one()
2048 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2054 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
2064 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2071 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2093 domain_flush_cache(domain, context, sizeof(*context)); in domain_context_mapping_one()
2122 struct dmar_domain *domain; member
2132 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2138 domain_context_mapping(struct dmar_domain *domain, struct device *dev) in domain_context_mapping() argument
2152 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2155 data.domain = domain; in domain_context_mapping()
2196 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
2204 support = domain->iommu_superpage; in hardware_largepage_caps()
2223 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
2233 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping()
2259 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); in __domain_mapping()
2261 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); in __domain_mapping()
2280 dma_pte_free_pagetable(domain, iov_pfn, end_pfn, in __domain_mapping()
2327 domain_flush_cache(domain, first_pte, in __domain_mapping()
2338 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_mapping() argument
2346 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); in domain_mapping()
2350 for_each_domain_iommu(iommu_id, domain) { in domain_mapping()
2352 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); in domain_mapping()
2358 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_sg_mapping() argument
2362 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping()
2365 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_pfn_mapping() argument
2369 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); in domain_pfn_mapping()
2412 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
2418 list_for_each_entry_safe(info, tmp, &domain->devices, link) in domain_remove_dev_info()
2432 struct iommu_domain *domain; in find_domain() local
2435 domain = iommu_get_domain_for_dev(dev); in find_domain()
2436 if (domain) in find_domain()
2437 intel_iommu_attach_device(domain, dev); in find_domain()
2444 return info->domain; in find_domain()
2464 struct dmar_domain *domain) in dmar_insert_one_dev_info() argument
2481 info->domain = domain; in dmar_insert_one_dev_info()
2518 found = info2->domain; in dmar_insert_one_dev_info()
2531 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2540 list_add(&info->link, &domain->devices); in dmar_insert_one_dev_info()
2557 if (hw_pass_through && domain_type_is_si(domain)) in dmar_insert_one_dev_info()
2558 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2561 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2571 if (dev && domain_context_mapping(domain, dev)) { in dmar_insert_one_dev_info()
2577 return domain; in dmar_insert_one_dev_info()
2589 struct dmar_domain *domain = NULL; in find_or_alloc_domain() local
2610 domain = info->domain; in find_or_alloc_domain()
2620 domain = alloc_domain(0); in find_or_alloc_domain()
2621 if (!domain) in find_or_alloc_domain()
2623 if (domain_init(domain, iommu, gaw)) { in find_or_alloc_domain()
2624 domain_exit(domain); in find_or_alloc_domain()
2629 return domain; in find_or_alloc_domain()
2633 struct dmar_domain *domain) in set_domain_for_dev() argument
2654 dma_alias & 0xff, NULL, domain); in set_domain_for_dev()
2656 if (!tmp || tmp != domain) in set_domain_for_dev()
2661 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in set_domain_for_dev()
2662 if (!tmp || tmp != domain) in set_domain_for_dev()
2665 return domain; in set_domain_for_dev()
2668 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2675 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), in iommu_domain_identity_map()
2686 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2688 return __domain_mapping(domain, first_vpfn, NULL, in iommu_domain_identity_map()
2694 struct dmar_domain *domain, in domain_prepare_identity_map() argument
2702 if (domain == si_domain && hw_pass_through) { in domain_prepare_identity_map()
2719 if (end >> agaw_to_width(domain->agaw)) { in domain_prepare_identity_map()
2722 agaw_to_width(domain->agaw), in domain_prepare_identity_map()
2729 return iommu_domain_identity_map(domain, start, end); in domain_prepare_identity_map()
2732 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2798 return (info->domain == si_domain); in identity_mapping()
2803 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) in domain_add_dev_info() argument
2813 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2814 if (ndomain != domain) in domain_add_dev_info()
3380 struct dmar_domain *domain, in intel_alloc_iova() argument
3386 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); in intel_alloc_iova()
3396 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, in intel_alloc_iova()
3401 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, in intel_alloc_iova()
3413 struct dmar_domain *domain, *tmp; in get_private_domain_for_dev() local
3419 domain = find_domain(dev); in get_private_domain_for_dev()
3420 if (domain) in get_private_domain_for_dev()
3423 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in get_private_domain_for_dev()
3424 if (!domain) in get_private_domain_for_dev()
3435 ret = domain_prepare_identity_map(dev, domain, in get_private_domain_for_dev()
3444 tmp = set_domain_for_dev(dev, domain); in get_private_domain_for_dev()
3445 if (!tmp || domain != tmp) { in get_private_domain_for_dev()
3446 domain_exit(domain); in get_private_domain_for_dev()
3447 domain = tmp; in get_private_domain_for_dev()
3451 if (!domain) in get_private_domain_for_dev()
3454 domain->domain.type = IOMMU_DOMAIN_DMA; in get_private_domain_for_dev()
3456 return domain; in get_private_domain_for_dev()
3484 struct iommu_domain *domain; in iommu_need_mapping() local
3487 domain = iommu_get_domain_for_dev(dev); in iommu_need_mapping()
3488 if (domain) { in iommu_need_mapping()
3489 dmar_domain = to_dmar_domain(domain); in iommu_need_mapping()
3505 struct dmar_domain *domain; in __intel_map_single() local
3515 domain = find_domain(dev); in __intel_map_single()
3516 if (!domain) in __intel_map_single()
3519 iommu = domain_get_iommu(domain); in __intel_map_single()
3522 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); in __intel_map_single()
3541 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), in __intel_map_single()
3555 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); in __intel_map_single()
3584 struct dmar_domain *domain; in intel_unmap() local
3592 domain = find_domain(dev); in intel_unmap()
3593 BUG_ON(!domain); in intel_unmap()
3595 iommu = domain_get_iommu(domain); in intel_unmap()
3606 freelist = domain_unmap(domain, start_pfn, last_pfn); in intel_unmap()
3608 !has_iova_flush_queue(&domain->iovad)) { in intel_unmap()
3609 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3612 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); in intel_unmap()
3615 queue_iova(&domain->iovad, iova_pfn, nrpages, in intel_unmap()
3722 struct dmar_domain *domain; in intel_map_sg() local
3735 domain = find_domain(dev); in intel_map_sg()
3736 if (!domain) in intel_map_sg()
3739 iommu = domain_get_iommu(domain); in intel_map_sg()
3744 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), in intel_map_sg()
3763 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); in intel_map_sg()
3765 dma_pte_free_pagetable(domain, start_vpfn, in intel_map_sg()
3767 agaw_to_level(domain->agaw) + 1); in intel_map_sg()
3768 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); in intel_map_sg()
3804 struct dmar_domain *domain; in bounce_sync_single() local
3807 domain = find_domain(dev); in bounce_sync_single()
3808 if (WARN_ON(!domain)) in bounce_sync_single()
3811 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); in bounce_sync_single()
3822 struct dmar_domain *domain; in bounce_map_single() local
3830 domain = find_domain(dev); in bounce_map_single()
3831 if (WARN_ON(dir == DMA_NONE || !domain)) in bounce_map_single()
3834 iommu = domain_get_iommu(domain); in bounce_map_single()
3839 iova_pfn = intel_alloc_iova(dev, domain, in bounce_map_single()
3882 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), in bounce_map_single()
3896 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); in bounce_map_single()
3908 struct dmar_domain *domain; in bounce_unmap_single() local
3911 domain = find_domain(dev); in bounce_unmap_single()
3912 if (WARN_ON(!domain)) in bounce_unmap_single()
3915 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); in bounce_unmap_single()
4731 struct dmar_domain *domain; in free_all_cpu_cached_iovas() local
4738 domain = get_iommu_domain(iommu, (u16)did); in free_all_cpu_cached_iovas()
4740 if (!domain) in free_all_cpu_cached_iovas()
4742 free_cpu_cached_iovas(cpu, &domain->iovad); in free_all_cpu_cached_iovas()
5091 struct dmar_domain *domain; in __dmar_remove_one_dev_info() local
5101 domain = info->domain; in __dmar_remove_one_dev_info()
5116 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
5120 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && in __dmar_remove_one_dev_info()
5121 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && in __dmar_remove_one_dev_info()
5122 list_empty(&domain->devices)) in __dmar_remove_one_dev_info()
5123 domain_exit(info->domain); in __dmar_remove_one_dev_info()
5141 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
5145 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); in md_domain_init()
5146 domain_reserve_special_ranges(domain); in md_domain_init()
5149 domain->gaw = guest_width; in md_domain_init()
5151 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
5153 domain->iommu_coherency = 0; in md_domain_init()
5154 domain->iommu_snooping = 0; in md_domain_init()
5155 domain->iommu_superpage = 0; in md_domain_init()
5156 domain->max_addr = 0; in md_domain_init()
5159 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in md_domain_init()
5160 if (!domain->pgd) in md_domain_init()
5162 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
5169 struct iommu_domain *domain; in intel_iommu_domain_alloc() local
5195 domain = &dmar_domain->domain; in intel_iommu_domain_alloc()
5196 domain->geometry.aperture_start = 0; in intel_iommu_domain_alloc()
5197 domain->geometry.aperture_end = in intel_iommu_domain_alloc()
5199 domain->geometry.force_aperture = true; in intel_iommu_domain_alloc()
5201 return domain; in intel_iommu_domain_alloc()
5203 return &si_domain->domain; in intel_iommu_domain_alloc()
5211 static void intel_iommu_domain_free(struct iommu_domain *domain) in intel_iommu_domain_free() argument
5213 if (domain != &si_domain->domain) in intel_iommu_domain_free()
5214 domain_exit(to_dmar_domain(domain)); in intel_iommu_domain_free()
5222 is_aux_domain(struct device *dev, struct iommu_domain *domain) in is_aux_domain() argument
5227 domain->type == IOMMU_DOMAIN_UNMANAGED; in is_aux_domain()
5230 static void auxiliary_link_device(struct dmar_domain *domain, in auxiliary_link_device() argument
5239 domain->auxd_refcnt++; in auxiliary_link_device()
5240 list_add(&domain->auxd, &info->auxiliary_domains); in auxiliary_link_device()
5243 static void auxiliary_unlink_device(struct dmar_domain *domain, in auxiliary_unlink_device() argument
5252 list_del(&domain->auxd); in auxiliary_unlink_device()
5253 domain->auxd_refcnt--; in auxiliary_unlink_device()
5255 if (!domain->auxd_refcnt && domain->default_pasid > 0) in auxiliary_unlink_device()
5256 intel_pasid_free_id(domain->default_pasid); in auxiliary_unlink_device()
5259 static int aux_domain_add_dev(struct dmar_domain *domain, in aux_domain_add_dev() argument
5271 if (domain->default_pasid <= 0) { in aux_domain_add_dev()
5274 pasid = intel_pasid_alloc_id(domain, PASID_MIN, in aux_domain_add_dev()
5281 domain->default_pasid = pasid; in aux_domain_add_dev()
5290 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
5295 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
5296 domain->default_pasid); in aux_domain_add_dev()
5301 auxiliary_link_device(domain, dev); in aux_domain_add_dev()
5308 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
5312 if (!domain->auxd_refcnt && domain->default_pasid > 0) in aux_domain_add_dev()
5313 intel_pasid_free_id(domain->default_pasid); in aux_domain_add_dev()
5318 static void aux_domain_remove_dev(struct dmar_domain *domain, in aux_domain_remove_dev() argument
5325 if (!is_aux_domain(dev, &domain->domain)) in aux_domain_remove_dev()
5332 auxiliary_unlink_device(domain, dev); in aux_domain_remove_dev()
5335 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid); in aux_domain_remove_dev()
5336 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
5342 static int prepare_domain_attach_device(struct iommu_domain *domain, in prepare_domain_attach_device() argument
5345 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in prepare_domain_attach_device()
5385 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
5390 if (domain->type == IOMMU_DOMAIN_UNMANAGED && in intel_iommu_attach_device()
5396 if (is_aux_domain(dev, domain)) in intel_iommu_attach_device()
5408 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_attach_device()
5412 return domain_add_dev_info(to_dmar_domain(domain), dev); in intel_iommu_attach_device()
5415 static int intel_iommu_aux_attach_device(struct iommu_domain *domain, in intel_iommu_aux_attach_device() argument
5420 if (!is_aux_domain(dev, domain)) in intel_iommu_aux_attach_device()
5423 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_aux_attach_device()
5427 return aux_domain_add_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_attach_device()
5430 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
5436 static void intel_iommu_aux_detach_device(struct iommu_domain *domain, in intel_iommu_aux_detach_device() argument
5439 aux_domain_remove_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_detach_device()
5442 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
5446 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_map()
5480 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
5484 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_unmap()
5516 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
5519 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iova_to_phys()
5580 struct iommu_domain *domain; in intel_iommu_add_device() local
5604 domain = iommu_get_domain_for_dev(dev); in intel_iommu_add_device()
5605 dmar_domain = to_dmar_domain(domain); in intel_iommu_add_device()
5606 if (domain->type == IOMMU_DOMAIN_DMA) { in intel_iommu_add_device()
5737 struct dmar_domain *domain; in intel_iommu_enable_pasid() local
5742 domain = find_domain(dev); in intel_iommu_enable_pasid()
5743 if (!domain) in intel_iommu_enable_pasid()
5765 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5785 struct iommu_domain *domain, in intel_iommu_apply_resv_region() argument
5788 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_apply_resv_region()
5945 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in intel_iommu_aux_get_pasid() argument
5947 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_aux_get_pasid()
5953 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, in intel_iommu_is_attach_deferred() argument