Lines Matching full:iommu
33 #include <linux/iommu.h>
34 #include <linux/intel-iommu.h>
47 #include <asm/iommu.h>
90 * to the IOMMU core, which will then use this information to split
94 * Traditionally the IOMMU core just handed us the mappings directly,
101 * If at some point we'd like to utilize the IOMMU core's new behavior,
171 /* global iommu list, set NULL for ignored DMAR units */
293 * 2. It maps to each iommu if successful.
294 * 3. Each iommu mapps to this domain if successful.
414 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
416 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
419 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
421 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
424 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
428 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
430 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
440 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
444 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
458 pr_info("Intel-IOMMU: scalable mode supported\n"); in intel_iommu_setup()
461 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
464 pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); in intel_iommu_setup()
479 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
484 domains = iommu->domains[idx]; in get_iommu_domain()
491 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
497 if (!iommu->domains[idx]) { in set_iommu_domain()
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
502 domains = iommu->domains[idx]; in set_iommu_domain()
564 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
568 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
572 fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
573 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
576 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
580 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
586 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
591 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
601 * Calculate max SAGAW for each iommu.
603 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
605 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
609 * calculate agaw for each iommu.
613 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
615 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
618 /* This functionin only returns single iommu in a domain */
636 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
638 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
639 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
645 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
663 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
664 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
675 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
679 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
680 if (iommu != skip) { in domain_update_iommu_snooping()
687 if (!sm_supported(iommu) && in domain_update_iommu_snooping()
688 !ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
703 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
712 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
713 if (iommu != skip) { in domain_update_iommu_superpage()
715 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
718 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
785 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
788 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
793 if (sm_supported(iommu)) { in iommu_context_addr()
807 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
811 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
814 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
857 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
858 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
859 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
870 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
881 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
883 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
902 struct intel_iommu *iommu; in device_to_iommu() local
916 * the PF instead to find the IOMMU. */ in device_to_iommu()
924 for_each_iommu(iommu, drhd) { in device_to_iommu()
932 * which we used for the IOMMU lookup. Strictly speaking in device_to_iommu()
958 iommu = NULL; in device_to_iommu()
960 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
961 iommu = NULL; in device_to_iommu()
965 return iommu; in device_to_iommu()
975 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
981 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
982 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
985 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
989 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
995 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
996 if (!iommu->root_entry) { in free_context_table()
1000 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
1004 if (!sm_supported(iommu)) in free_context_table()
1007 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
1012 free_pgtable_page(iommu->root_entry); in free_context_table()
1013 iommu->root_entry = NULL; in free_context_table()
1015 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1028 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1285 /* We can't just free the pages because the IOMMU may still be walking
1331 /* iommu handling */
1332 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1337 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1340 iommu->name); in iommu_alloc_root_entry()
1344 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1346 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1347 iommu->root_entry = root; in iommu_alloc_root_entry()
1348 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1353 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1359 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1360 if (sm_supported(iommu)) in iommu_set_root_entry()
1363 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1364 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1366 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1369 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1374 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1375 if (sm_supported(iommu)) in iommu_set_root_entry()
1376 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1377 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1380 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1385 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1388 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1389 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1392 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1395 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1399 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1422 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1423 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1426 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1429 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1433 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1436 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1462 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1465 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1468 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1471 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1472 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1475 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1478 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1490 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1497 if (!iommu->qi) in iommu_support_dev_iotlb()
1501 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1544 /* For IOMMU that supports device IOTLB throttling (DIT), we assign in iommu_enable_dev_iotlb()
1545 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge in iommu_enable_dev_iotlb()
1549 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_dev_iotlb()
1626 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in iommu_flush_dev_iotlb()
1632 static void domain_flush_piotlb(struct intel_iommu *iommu, in domain_flush_piotlb() argument
1636 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1639 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1643 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); in domain_flush_piotlb()
1646 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1654 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1662 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1689 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1690 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1691 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1694 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1702 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1707 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1715 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1716 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1718 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1729 struct intel_iommu *iommu = g_iommus[idx]; in iommu_flush_iova() local
1730 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iova()
1733 domain_flush_piotlb(iommu, domain, 0, -1, 0); in iommu_flush_iova()
1735 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iova()
1738 if (!cap_caching_mode(iommu->cap)) in iommu_flush_iova()
1739 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in iommu_flush_iova()
1744 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1749 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1752 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1753 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1755 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1758 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1761 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1764 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1769 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1770 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1771 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1774 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1777 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1780 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1785 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1786 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1789 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1790 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1791 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1794 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1797 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1800 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1805 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1807 iommu->name, ndomains); in iommu_init_domains()
1810 spin_lock_init(&iommu->lock); in iommu_init_domains()
1812 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1813 if (!iommu->domain_ids) { in iommu_init_domains()
1815 iommu->name); in iommu_init_domains()
1820 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1822 if (iommu->domains) { in iommu_init_domains()
1824 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1827 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1829 iommu->name); in iommu_init_domains()
1830 kfree(iommu->domain_ids); in iommu_init_domains()
1831 kfree(iommu->domains); in iommu_init_domains()
1832 iommu->domain_ids = NULL; in iommu_init_domains()
1833 iommu->domains = NULL; in iommu_init_domains()
1843 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1852 if (sm_supported(iommu)) in iommu_init_domains()
1853 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1858 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1863 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1868 if (info->iommu != iommu) in disable_dmar_iommu()
1878 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1879 iommu_disable_translation(iommu); in disable_dmar_iommu()
1882 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1884 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1885 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; in free_dmar_iommu()
1889 kfree(iommu->domains[i]); in free_dmar_iommu()
1890 kfree(iommu->domains); in free_dmar_iommu()
1891 kfree(iommu->domain_ids); in free_dmar_iommu()
1892 iommu->domains = NULL; in free_dmar_iommu()
1893 iommu->domain_ids = NULL; in free_dmar_iommu()
1896 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1899 free_context_table(iommu); in free_dmar_iommu()
1902 if (pasid_supported(iommu)) { in free_dmar_iommu()
1903 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1904 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1906 if (vccap_pasid(iommu->vccap)) in free_dmar_iommu()
1907 ioasid_unregister_allocator(&iommu->pasid_allocator); in free_dmar_iommu()
1919 struct intel_iommu *iommu; in first_level_by_default() local
1928 for_each_active_iommu(iommu, drhd) { in first_level_by_default()
1929 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) { in first_level_by_default()
1958 /* Must be called with iommu->lock */
1960 struct intel_iommu *iommu) in domain_attach_iommu() argument
1966 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
1968 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1970 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1971 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1972 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1975 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1976 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1981 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1982 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1984 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1985 domain->nid = iommu->node; in domain_attach_iommu()
1994 struct intel_iommu *iommu) in domain_detach_iommu() argument
1999 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
2001 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
2003 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
2004 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
2005 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
2006 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
2009 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2110 * IOMMU hardware will use the PASID value set in this field for
2141 struct intel_iommu *iommu, in domain_context_mapping_one() argument
2145 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2163 spin_lock(&iommu->lock); in domain_context_mapping_one()
2166 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
2186 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
2187 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
2191 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
2198 if (sm_supported(iommu)) { in domain_context_mapping_one()
2215 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2228 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
2231 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2238 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2252 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2260 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2269 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2270 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2274 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2276 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2283 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2291 struct intel_iommu *iommu; member
2300 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2310 struct intel_iommu *iommu; in domain_context_mapping() local
2313 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2314 if (!iommu) in domain_context_mapping()
2320 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2324 data.iommu = iommu; in domain_context_mapping()
2334 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2336 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2341 struct intel_iommu *iommu; in domain_context_mapped() local
2344 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2345 if (!iommu) in domain_context_mapped()
2349 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2352 domain_context_mapped_cb, iommu); in domain_context_mapped()
2522 struct intel_iommu *iommu; in domain_mapping() local
2530 iommu = g_iommus[iommu_id]; in domain_mapping()
2531 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); in domain_mapping()
2551 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_context_clear_one() argument
2557 if (!iommu) in domain_context_clear_one()
2560 spin_lock_irqsave(&iommu->lock, flags); in domain_context_clear_one()
2561 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2563 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2568 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2569 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2570 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2576 if (sm_supported(iommu)) in domain_context_clear_one()
2577 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2579 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2610 if (unlikely(!dev || !dev->iommu)) in find_domain()
2647 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2657 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2660 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2678 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2679 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2689 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2706 info->segment = iommu->segment; in dmar_insert_one_dev_info()
2720 info->iommu = iommu; in dmar_insert_one_dev_info()
2728 if (ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2733 if (sm_supported(iommu)) { in dmar_insert_one_dev_info()
2734 if (pasid_supported(iommu)) { in dmar_insert_one_dev_info()
2740 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2767 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2768 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2769 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2784 if (dev && dev_is_pci(dev) && sm_supported(iommu)) { in dmar_insert_one_dev_info()
2793 spin_lock_irqsave(&iommu->lock, flags); in dmar_insert_one_dev_info()
2795 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2798 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2801 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2803 spin_unlock_irqrestore(&iommu->lock, flags); in dmar_insert_one_dev_info()
2897 struct intel_iommu *iommu; in domain_add_dev_info() local
2900 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2901 if (!iommu) in domain_add_dev_info()
2904 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2970 * The second is use of the device through the IOMMU API. This interface
2974 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2976 * the IOMMU API, which eliminates them from device assignment.
3025 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
3028 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
3033 if (!iommu->qi) { in intel_iommu_init_qi()
3037 dmar_fault(-1, iommu); in intel_iommu_init_qi()
3042 dmar_disable_qi(iommu); in intel_iommu_init_qi()
3045 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
3049 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
3050 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
3052 iommu->name); in intel_iommu_init_qi()
3054 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
3055 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
3056 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
3060 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
3082 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
3112 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
3126 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
3127 set_bit(did, iommu->domain_ids); in copy_context_table()
3153 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
3162 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3173 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3175 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3202 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3206 iommu->name, bus); in copy_translation_tables()
3211 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3220 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3227 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3230 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3234 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3247 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_alloc() local
3250 if (!iommu) in intel_vcmd_ioasid_alloc()
3260 if (vcmd_alloc_pasid(iommu, &ioasid)) in intel_vcmd_ioasid_alloc()
3268 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_free() local
3270 if (!iommu) in intel_vcmd_ioasid_free()
3280 vcmd_free_pasid(iommu, ioasid); in intel_vcmd_ioasid_free()
3283 static void register_pasid_allocator(struct intel_iommu *iommu) in register_pasid_allocator() argument
3289 if (!cap_caching_mode(iommu->cap)) in register_pasid_allocator()
3292 if (!sm_supported(iommu)) { in register_pasid_allocator()
3304 if (!vccap_pasid(iommu->vccap)) in register_pasid_allocator()
3308 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc; in register_pasid_allocator()
3309 iommu->pasid_allocator.free = intel_vcmd_ioasid_free; in register_pasid_allocator()
3310 iommu->pasid_allocator.pdata = (void *)iommu; in register_pasid_allocator()
3311 if (ioasid_register_allocator(&iommu->pasid_allocator)) { in register_pasid_allocator()
3314 * Disable scalable mode on this IOMMU if there in register_pasid_allocator()
3326 struct intel_iommu *iommu; in init_dmars() local
3348 /* Preallocate enough resources for IOMMU hot-addition */ in init_dmars()
3355 pr_err("Allocating global iommu array failed\n"); in init_dmars()
3360 for_each_iommu(iommu, drhd) { in init_dmars()
3362 iommu_disable_translation(iommu); in init_dmars()
3367 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
3371 if (pasid_supported(iommu)) { in init_dmars()
3372 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
3378 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3380 intel_iommu_init_qi(iommu); in init_dmars()
3382 ret = iommu_init_domains(iommu); in init_dmars()
3386 init_translation_status(iommu); in init_dmars()
3388 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3389 iommu_disable_translation(iommu); in init_dmars()
3390 clear_translation_pre_enabled(iommu); in init_dmars()
3392 iommu->name); in init_dmars()
3398 * among all IOMMU's. Need to Split it later. in init_dmars()
3400 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3404 if (translation_pre_enabled(iommu)) { in init_dmars()
3407 ret = copy_translation_tables(iommu); in init_dmars()
3410 * We found the IOMMU with translation in init_dmars()
3419 iommu->name); in init_dmars()
3420 iommu_disable_translation(iommu); in init_dmars()
3421 clear_translation_pre_enabled(iommu); in init_dmars()
3424 iommu->name); in init_dmars()
3428 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3431 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { in init_dmars()
3435 intel_svm_check(iommu); in init_dmars()
3443 for_each_active_iommu(iommu, drhd) { in init_dmars()
3444 iommu_flush_write_buffer(iommu); in init_dmars()
3446 register_pasid_allocator(iommu); in init_dmars()
3448 iommu_set_root_entry(iommu); in init_dmars()
3471 for_each_iommu(iommu, drhd) { in init_dmars()
3478 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3482 iommu_flush_write_buffer(iommu); in init_dmars()
3485 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3491 ret = intel_svm_enable_prq(iommu); in init_dmars()
3497 ret = dmar_set_interrupt(iommu); in init_dmars()
3505 for_each_active_iommu(iommu, drhd) { in init_dmars()
3506 disable_dmar_iommu(iommu); in init_dmars()
3507 free_dmar_iommu(iommu); in init_dmars()
3528 * Restrict dma_mask to the width that the iommu can handle. in intel_alloc_iova()
3575 struct intel_iommu *iommu; in __intel_map_single() local
3587 iommu = domain_get_iommu(domain); in __intel_map_single()
3599 !cap_zlr(iommu->cap)) in __intel_map_single()
3651 struct intel_iommu *iommu; in intel_unmap() local
3658 iommu = domain_get_iommu(domain); in intel_unmap()
3672 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3782 struct intel_iommu *iommu; in intel_map_sg() local
3793 iommu = domain_get_iommu(domain); in intel_map_sg()
3810 !cap_zlr(iommu->cap)) in intel_map_sg()
3877 struct intel_iommu *iommu; in bounce_map_single() local
3892 iommu = domain_get_iommu(domain); in bounce_map_single()
3893 if (WARN_ON(!iommu)) in bounce_map_single()
3907 !cap_zlr(iommu->cap)) in bounce_map_single()
4200 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
4212 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
4214 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
4215 if (iommu->qi) in init_iommu_hw()
4216 dmar_reenable_qi(iommu); in init_iommu_hw()
4218 for_each_iommu(iommu, drhd) { in init_iommu_hw()
4225 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4229 iommu_flush_write_buffer(iommu); in init_iommu_hw()
4230 iommu_set_root_entry(iommu); in init_iommu_hw()
4231 iommu_enable_translation(iommu); in init_iommu_hw()
4232 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4241 struct intel_iommu *iommu; in iommu_flush_all() local
4243 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
4244 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
4246 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
4254 struct intel_iommu *iommu = NULL; in iommu_suspend() local
4257 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4258 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
4260 if (!iommu->iommu_state) in iommu_suspend()
4266 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4267 iommu_disable_translation(iommu); in iommu_suspend()
4269 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
4271 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
4272 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
4273 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
4274 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
4275 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
4276 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
4277 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
4278 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
4280 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
4285 for_each_active_iommu(iommu, drhd) in iommu_suspend()
4286 kfree(iommu->iommu_state); in iommu_suspend()
4294 struct intel_iommu *iommu = NULL; in iommu_resume() local
4299 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
4301 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
4305 for_each_active_iommu(iommu, drhd) { in iommu_resume()
4307 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
4309 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
4310 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
4311 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
4312 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
4313 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
4314 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
4315 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
4316 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
4318 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
4321 for_each_active_iommu(iommu, drhd) in iommu_resume()
4322 kfree(iommu->iommu_state); in iommu_resume()
4496 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
4498 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
4501 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
4503 iommu->name); in intel_iommu_add()
4506 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
4507 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
4509 iommu->name); in intel_iommu_add()
4512 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
4513 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
4515 iommu->name); in intel_iommu_add()
4522 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
4523 iommu_disable_translation(iommu); in intel_iommu_add()
4525 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
4526 ret = iommu_init_domains(iommu); in intel_iommu_add()
4528 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
4532 intel_svm_check(iommu); in intel_iommu_add()
4539 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4543 intel_iommu_init_qi(iommu); in intel_iommu_add()
4544 iommu_flush_write_buffer(iommu); in intel_iommu_add()
4547 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
4548 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
4553 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
4557 iommu_set_root_entry(iommu); in intel_iommu_add()
4558 iommu_enable_translation(iommu); in intel_iommu_add()
4560 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4564 disable_dmar_iommu(iommu); in intel_iommu_add()
4566 free_dmar_iommu(iommu); in intel_iommu_add()
4573 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
4577 if (iommu == NULL) in dmar_iommu_hotplug()
4583 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
4584 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4724 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4731 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4732 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4754 struct intel_iommu *iommu = g_iommus[i]; in free_all_cpu_cached_iovas() local
4758 if (!iommu) in free_all_cpu_cached_iovas()
4761 for (did = 0; did < cap_ndoms(iommu->cap); did++) { in free_all_cpu_cached_iovas()
4762 domain = get_iommu_domain(iommu, (u16)did); in free_all_cpu_cached_iovas()
4780 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
4783 for_each_iommu(iommu, drhd) in intel_disable_iommus()
4784 iommu_disable_translation(iommu); in intel_disable_iommus()
4790 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
4798 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
4799 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
4811 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
4818 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_version() local
4819 u32 ver = readl(iommu->reg + DMAR_VER_REG); in intel_iommu_show_version()
4829 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_address() local
4830 return sprintf(buf, "%llx\n", iommu->reg_phys); in intel_iommu_show_address()
4838 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_cap() local
4839 return sprintf(buf, "%llx\n", iommu->cap); in intel_iommu_show_cap()
4847 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ecap() local
4848 return sprintf(buf, "%llx\n", iommu->ecap); in intel_iommu_show_ecap()
4856 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms() local
4857 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in intel_iommu_show_ndoms()
4865 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms_used() local
4866 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in intel_iommu_show_ndoms_used()
4867 cap_ndoms(iommu->cap))); in intel_iommu_show_ndoms_used()
4882 .name = "intel-iommu",
4910 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
4913 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
4929 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
4933 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
4972 struct intel_iommu *iommu; in intel_iommu_init() local
4975 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
4983 panic("tboot: Failed to initialize iommu memory\n"); in intel_iommu_init()
5015 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
5016 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
5023 for_each_iommu(iommu, drhd) in intel_iommu_init()
5024 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
5044 panic("tboot: Failed to reserve iommu ranges\n"); in intel_iommu_init()
5065 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
5066 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
5068 "%s", iommu->name); in intel_iommu_init()
5069 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); in intel_iommu_init()
5070 iommu_device_register(&iommu->iommu); in intel_iommu_init()
5077 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL, in intel_iommu_init()
5085 for_each_iommu(iommu, drhd) { in intel_iommu_init()
5086 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
5087 iommu_enable_translation(iommu); in intel_iommu_init()
5089 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
5110 struct intel_iommu *iommu = opaque; in domain_context_clear_one_cb() local
5112 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
5117 * NB - intel-iommu lacks any sort of reference counting for the users of
5122 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) in domain_context_clear() argument
5124 if (!iommu || !dev || !dev_is_pci(dev)) in domain_context_clear()
5127 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); in domain_context_clear()
5133 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
5141 iommu = info->iommu; in __dmar_remove_one_dev_info()
5145 if (dev_is_pci(info->dev) && sm_supported(iommu)) in __dmar_remove_one_dev_info()
5146 intel_pasid_tear_down_entry(iommu, info->dev, in __dmar_remove_one_dev_info()
5151 domain_context_clear(iommu, info->dev); in __dmar_remove_one_dev_info()
5157 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5158 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
5159 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5300 struct intel_iommu *iommu; in aux_domain_add_dev() local
5302 iommu = device_to_iommu(dev, NULL, NULL); in aux_domain_add_dev()
5303 if (!iommu) in aux_domain_add_dev()
5322 * iommu->lock must be held to attach domain to iommu and setup the in aux_domain_add_dev()
5325 spin_lock(&iommu->lock); in aux_domain_add_dev()
5326 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
5332 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
5335 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
5339 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5348 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
5350 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5362 struct intel_iommu *iommu; in aux_domain_remove_dev() local
5370 iommu = info->iommu; in aux_domain_remove_dev()
5374 spin_lock(&iommu->lock); in aux_domain_remove_dev()
5375 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false); in aux_domain_remove_dev()
5376 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
5377 spin_unlock(&iommu->lock); in aux_domain_remove_dev()
5386 struct intel_iommu *iommu; in prepare_domain_attach_device() local
5389 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
5390 if (!iommu) in prepare_domain_attach_device()
5393 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
5394 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
5395 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
5396 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
5399 dev_err(dev, "%s: iommu width (%d) is not " in prepare_domain_attach_device()
5409 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
5431 …dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Con… in intel_iommu_attach_device()
5483 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
5494 * X: indexed by iommu cache type
5522 * IOMMU cache invalidate API passes granu_size in bytes, and number of in to_vtd_size()
5534 struct intel_iommu *iommu; in intel_iommu_sva_invalidate() local
5548 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_sva_invalidate()
5549 if (!iommu) in intel_iommu_sva_invalidate()
5556 spin_lock(&iommu->lock); in intel_iommu_sva_invalidate()
5562 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_iommu_sva_invalidate()
5609 qi_flush_piotlb(iommu, did, pasid, in intel_iommu_sva_invalidate()
5639 qi_flush_dev_iotlb_pasid(iommu, sid, in intel_iommu_sva_invalidate()
5647 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", in intel_iommu_sva_invalidate()
5653 spin_unlock(&iommu->lock); in intel_iommu_sva_invalidate()
5683 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
5754 struct intel_iommu *iommu; in scalable_mode_support() local
5758 for_each_active_iommu(iommu, drhd) { in scalable_mode_support()
5759 if (!sm_supported(iommu)) { in scalable_mode_support()
5772 struct intel_iommu *iommu; in iommu_pasid_support() local
5776 for_each_active_iommu(iommu, drhd) { in iommu_pasid_support()
5777 if (!pasid_supported(iommu)) { in iommu_pasid_support()
5790 struct intel_iommu *iommu; in nested_mode_support() local
5794 for_each_active_iommu(iommu, drhd) { in nested_mode_support()
5795 if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) { in nested_mode_support()
5817 struct intel_iommu *iommu; in intel_iommu_probe_device() local
5819 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_probe_device()
5820 if (!iommu) in intel_iommu_probe_device()
5823 if (translation_pre_enabled(iommu)) in intel_iommu_probe_device()
5826 return &iommu->iommu; in intel_iommu_probe_device()
5831 struct intel_iommu *iommu; in intel_iommu_release_device() local
5833 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_release_device()
5834 if (!iommu) in intel_iommu_release_device()
5912 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) in intel_iommu_enable_pasid() argument
5926 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5933 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5943 iommu->flush.flush_context(iommu, in intel_iommu_enable_pasid()
5944 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5957 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5986 struct intel_iommu *iommu; in intel_iommu_enable_auxd() local
5990 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_enable_auxd()
5991 if (!iommu || dmar_disabled) in intel_iommu_enable_auxd()
5994 if (!sm_supported(iommu) || !pasid_supported(iommu)) in intel_iommu_enable_auxd()
5997 ret = intel_iommu_enable_pasid(iommu, dev); in intel_iommu_enable_auxd()
6071 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) && in intel_iommu_dev_has_feat()
6091 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) in intel_iommu_dev_enable_feat()
6167 * thus not be able to bypass the IOMMU restrictions.
6173 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
6224 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
6305 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
6334 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()