Lines Matching refs:iommu
344 static void domain_context_clear(struct intel_iommu *iommu,
347 struct intel_iommu *iommu);
412 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
414 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
417 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
419 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
422 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
426 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
428 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
484 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
489 domains = iommu->domains[idx]; in get_iommu_domain()
496 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
502 if (!iommu->domains[idx]) { in set_iommu_domain()
504 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
507 domains = iommu->domains[idx]; in set_iommu_domain()
563 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
568 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
581 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
583 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
591 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
593 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
617 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
635 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
636 if (!ecap_coherent(iommu->ecap)) { in domain_update_iommu_coherency()
647 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
651 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
652 if (iommu != skip) { in domain_update_iommu_snooping()
653 if (!ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
667 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
676 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
677 if (iommu != skip) { in domain_update_iommu_superpage()
678 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
696 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
699 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
704 if (sm_supported(iommu)) { in iommu_context_addr()
718 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
722 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
725 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
732 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; in iommu_dummy()
765 struct intel_iommu *iommu; in device_to_iommu() local
794 for_each_active_iommu(iommu, drhd) { in device_to_iommu()
824 iommu = NULL; in device_to_iommu()
828 return iommu; in device_to_iommu()
838 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
844 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
845 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
848 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
852 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
858 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
859 if (!iommu->root_entry) { in free_context_table()
863 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
867 if (!sm_supported(iommu)) in free_context_table()
870 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
875 free_pgtable_page(iommu->root_entry); in free_context_table()
876 iommu->root_entry = NULL; in free_context_table()
878 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1190 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1195 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1198 iommu->name); in iommu_alloc_root_entry()
1202 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1204 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1205 iommu->root_entry = root; in iommu_alloc_root_entry()
1206 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1211 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1217 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1218 if (sm_supported(iommu)) in iommu_set_root_entry()
1221 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1222 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1224 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1227 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1230 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1233 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1238 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1241 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1242 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1245 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1248 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1252 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1275 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1276 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1279 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1282 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1286 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1289 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1315 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1318 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1321 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1324 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1325 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1328 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1331 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1343 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1350 if (!iommu->qi) in iommu_support_dev_iotlb()
1354 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1402 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_dev_iotlb()
1480 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in iommu_flush_dev_iotlb()
1486 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1493 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1505 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1506 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1509 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1516 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1521 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1526 if (cap_caching_mode(iommu->cap)) in __mapping_notify_one()
1527 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1529 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1540 struct intel_iommu *iommu = g_iommus[idx]; in iommu_flush_iova() local
1541 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iova()
1543 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in iommu_flush_iova()
1545 if (!cap_caching_mode(iommu->cap)) in iommu_flush_iova()
1546 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in iommu_flush_iova()
1551 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1556 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1559 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1560 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1562 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1565 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1568 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1571 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1576 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1577 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1578 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1581 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1584 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1587 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1592 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1593 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1594 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1597 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1600 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1603 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1608 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1610 iommu->name, ndomains); in iommu_init_domains()
1613 spin_lock_init(&iommu->lock); in iommu_init_domains()
1615 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1616 if (!iommu->domain_ids) { in iommu_init_domains()
1618 iommu->name); in iommu_init_domains()
1623 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1625 if (iommu->domains) { in iommu_init_domains()
1627 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1630 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1632 iommu->name); in iommu_init_domains()
1633 kfree(iommu->domain_ids); in iommu_init_domains()
1634 kfree(iommu->domains); in iommu_init_domains()
1635 iommu->domain_ids = NULL; in iommu_init_domains()
1636 iommu->domains = NULL; in iommu_init_domains()
1646 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1655 if (sm_supported(iommu)) in iommu_init_domains()
1656 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1661 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1666 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1671 if (info->iommu != iommu) in disable_dmar_iommu()
1681 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1682 iommu_disable_translation(iommu); in disable_dmar_iommu()
1685 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1687 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1688 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; in free_dmar_iommu()
1692 kfree(iommu->domains[i]); in free_dmar_iommu()
1693 kfree(iommu->domains); in free_dmar_iommu()
1694 kfree(iommu->domain_ids); in free_dmar_iommu()
1695 iommu->domains = NULL; in free_dmar_iommu()
1696 iommu->domain_ids = NULL; in free_dmar_iommu()
1699 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1702 free_context_table(iommu); in free_dmar_iommu()
1705 if (pasid_supported(iommu)) { in free_dmar_iommu()
1706 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1707 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1731 struct intel_iommu *iommu) in domain_attach_iommu() argument
1737 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
1739 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1741 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1742 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1743 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1746 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1747 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1752 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1753 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1755 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1756 domain->nid = iommu->node; in domain_attach_iommu()
1765 struct intel_iommu *iommu) in domain_detach_iommu() argument
1770 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
1772 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1774 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1775 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
1776 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
1777 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
1780 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
1847 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, in domain_init() argument
1864 if (guest_width > cap_mgaw(iommu->cap)) in domain_init()
1865 guest_width = cap_mgaw(iommu->cap); in domain_init()
1869 sagaw = cap_sagaw(iommu->cap); in domain_init()
1879 if (ecap_coherent(iommu->ecap)) in domain_init()
1884 if (ecap_sc_support(iommu->ecap)) in domain_init()
1890 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1894 domain->nid = iommu->node; in domain_init()
1900 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1974 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1978 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
1996 spin_lock(&iommu->lock); in domain_context_mapping_one()
1999 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
2019 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
2020 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
2024 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
2031 if (sm_supported(iommu)) { in domain_context_mapping_one()
2048 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2064 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2071 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2085 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2101 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2102 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2106 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2108 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2115 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2123 struct intel_iommu *iommu; member
2132 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2142 struct intel_iommu *iommu; in domain_context_mapping() local
2145 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2146 if (!iommu) in domain_context_mapping()
2152 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2156 data.iommu = iommu; in domain_context_mapping()
2166 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2168 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2173 struct intel_iommu *iommu; in domain_context_mapped() local
2176 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2177 if (!iommu) in domain_context_mapped()
2181 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2184 domain_context_mapped_cb, iommu); in domain_context_mapped()
2343 struct intel_iommu *iommu; in domain_mapping() local
2351 iommu = g_iommus[iommu_id]; in domain_mapping()
2352 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); in domain_mapping()
2372 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_context_clear_one() argument
2378 if (!iommu) in domain_context_clear_one()
2381 spin_lock_irqsave(&iommu->lock, flags); in domain_context_clear_one()
2382 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2384 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2389 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2390 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2391 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2396 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2409 info->dev->archdata.iommu = NULL; in unlink_domain_info()
2431 if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { in find_domain()
2434 dev->archdata.iommu = NULL; in find_domain()
2441 info = dev->archdata.iommu; in find_domain()
2454 if (info->iommu->segment == segment && info->bus == bus && in dmar_search_domain_by_dev_info()
2461 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2482 info->iommu = iommu; in dmar_insert_one_dev_info()
2492 ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2497 if (sm_supported(iommu)) { in dmar_insert_one_dev_info()
2498 if (pasid_supported(iommu)) { in dmar_insert_one_dev_info()
2504 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2516 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); in dmar_insert_one_dev_info()
2530 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2531 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2532 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2543 dev->archdata.iommu = info; in dmar_insert_one_dev_info()
2547 if (dev && dev_is_pci(dev) && sm_supported(iommu)) { in dmar_insert_one_dev_info()
2556 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2558 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2561 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2563 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2590 struct intel_iommu *iommu; in find_or_alloc_domain() local
2595 iommu = device_to_iommu(dev, &bus, &devfn); in find_or_alloc_domain()
2596 if (!iommu) in find_or_alloc_domain()
2609 iommu = info->iommu; in find_or_alloc_domain()
2623 if (domain_init(domain, iommu, gaw)) { in find_or_alloc_domain()
2635 struct intel_iommu *iommu; in set_domain_for_dev() local
2640 iommu = device_to_iommu(dev, &bus, &devfn); in set_domain_for_dev()
2641 if (!iommu) in set_domain_for_dev()
2653 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), in set_domain_for_dev()
2661 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in set_domain_for_dev()
2796 info = dev->archdata.iommu; in identity_mapping()
2806 struct intel_iommu *iommu; in domain_add_dev_info() local
2809 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2810 if (!iommu) in domain_add_dev_info()
2813 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2966 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2974 if (!iommu->qi) { in intel_iommu_init_qi()
2978 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2983 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2986 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2990 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2991 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2993 iommu->name); in intel_iommu_init_qi()
2995 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2996 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2997 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
3001 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
3023 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
3053 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
3067 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
3068 set_bit(did, iommu->domain_ids); in copy_context_table()
3094 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
3103 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3114 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3116 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3143 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3147 iommu->name, bus); in copy_translation_tables()
3152 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3161 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3168 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3171 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3175 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3188 struct intel_iommu *iommu; in init_dmars() local
3222 for_each_iommu(iommu, drhd) { in init_dmars()
3224 iommu_disable_translation(iommu); in init_dmars()
3233 if (pasid_supported(iommu)) { in init_dmars()
3234 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
3240 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3242 intel_iommu_init_qi(iommu); in init_dmars()
3244 ret = iommu_init_domains(iommu); in init_dmars()
3248 init_translation_status(iommu); in init_dmars()
3250 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3251 iommu_disable_translation(iommu); in init_dmars()
3252 clear_translation_pre_enabled(iommu); in init_dmars()
3254 iommu->name); in init_dmars()
3262 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3266 if (translation_pre_enabled(iommu)) { in init_dmars()
3269 ret = copy_translation_tables(iommu); in init_dmars()
3281 iommu->name); in init_dmars()
3282 iommu_disable_translation(iommu); in init_dmars()
3283 clear_translation_pre_enabled(iommu); in init_dmars()
3286 iommu->name); in init_dmars()
3290 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3293 if (pasid_supported(iommu)) in init_dmars()
3294 intel_svm_init(iommu); in init_dmars()
3303 for_each_active_iommu(iommu, drhd) { in init_dmars()
3304 iommu_flush_write_buffer(iommu); in init_dmars()
3305 iommu_set_root_entry(iommu); in init_dmars()
3306 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
3307 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
3333 for_each_iommu(iommu, drhd) { in init_dmars()
3340 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3344 iommu_flush_write_buffer(iommu); in init_dmars()
3347 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3353 ret = intel_svm_enable_prq(iommu); in init_dmars()
3359 ret = dmar_set_interrupt(iommu); in init_dmars()
3367 for_each_active_iommu(iommu, drhd) { in init_dmars()
3368 disable_dmar_iommu(iommu); in init_dmars()
3369 free_dmar_iommu(iommu); in init_dmars()
3510 struct intel_iommu *iommu; in __intel_map_single() local
3519 iommu = domain_get_iommu(domain); in __intel_map_single()
3531 !cap_zlr(iommu->cap)) in __intel_map_single()
3588 struct intel_iommu *iommu; in intel_unmap() local
3595 iommu = domain_get_iommu(domain); in intel_unmap()
3609 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3729 struct intel_iommu *iommu; in intel_map_sg() local
3739 iommu = domain_get_iommu(domain); in intel_map_sg()
3756 !cap_zlr(iommu->cap)) in intel_map_sg()
3823 struct intel_iommu *iommu; in bounce_map_single() local
3834 iommu = domain_get_iommu(domain); in bounce_map_single()
3835 if (WARN_ON(!iommu)) in bounce_map_single()
3849 !cap_zlr(iommu->cap)) in bounce_map_single()
4134 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in quirk_ioat_snb_local_iommu()
4172 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in init_no_remapping_devices()
4181 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
4183 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
4184 if (iommu->qi) in init_iommu_hw()
4185 dmar_reenable_qi(iommu); in init_iommu_hw()
4187 for_each_iommu(iommu, drhd) { in init_iommu_hw()
4194 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4198 iommu_flush_write_buffer(iommu); in init_iommu_hw()
4200 iommu_set_root_entry(iommu); in init_iommu_hw()
4202 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
4204 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_iommu_hw()
4205 iommu_enable_translation(iommu); in init_iommu_hw()
4206 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4215 struct intel_iommu *iommu; in iommu_flush_all() local
4217 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
4218 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
4220 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
4228 struct intel_iommu *iommu = NULL; in iommu_suspend() local
4231 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4232 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
4234 if (!iommu->iommu_state) in iommu_suspend()
4240 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4241 iommu_disable_translation(iommu); in iommu_suspend()
4243 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
4245 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
4246 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
4247 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
4248 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
4249 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
4250 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
4251 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
4252 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
4254 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
4259 for_each_active_iommu(iommu, drhd) in iommu_suspend()
4260 kfree(iommu->iommu_state); in iommu_suspend()
4268 struct intel_iommu *iommu = NULL; in iommu_resume() local
4279 for_each_active_iommu(iommu, drhd) { in iommu_resume()
4281 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
4283 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
4284 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
4285 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
4286 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
4287 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
4288 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
4289 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
4290 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
4292 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
4295 for_each_active_iommu(iommu, drhd) in iommu_resume()
4296 kfree(iommu->iommu_state); in iommu_resume()
4446 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
4448 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
4451 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
4453 iommu->name); in intel_iommu_add()
4456 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
4457 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
4459 iommu->name); in intel_iommu_add()
4462 sp = domain_update_iommu_superpage(iommu) - 1; in intel_iommu_add()
4463 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
4465 iommu->name); in intel_iommu_add()
4472 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
4473 iommu_disable_translation(iommu); in intel_iommu_add()
4475 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
4476 ret = iommu_init_domains(iommu); in intel_iommu_add()
4478 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
4483 if (pasid_supported(iommu)) in intel_iommu_add()
4484 intel_svm_init(iommu); in intel_iommu_add()
4492 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4496 intel_iommu_init_qi(iommu); in intel_iommu_add()
4497 iommu_flush_write_buffer(iommu); in intel_iommu_add()
4500 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
4501 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
4506 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
4510 iommu_set_root_entry(iommu); in intel_iommu_add()
4511 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in intel_iommu_add()
4512 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in intel_iommu_add()
4513 iommu_enable_translation(iommu); in intel_iommu_add()
4515 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4519 disable_dmar_iommu(iommu); in intel_iommu_add()
4521 free_dmar_iommu(iommu); in intel_iommu_add()
4528 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
4532 if (iommu == NULL) in dmar_iommu_hotplug()
4538 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
4539 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4682 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4704 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4705 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4730 struct intel_iommu *iommu = g_iommus[i]; in free_all_cpu_cached_iovas() local
4734 if (!iommu) in free_all_cpu_cached_iovas()
4737 for (did = 0; did < cap_ndoms(iommu->cap); did++) { in free_all_cpu_cached_iovas()
4738 domain = get_iommu_domain(iommu, (u16)did); in free_all_cpu_cached_iovas()
4755 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
4758 for_each_iommu(iommu, drhd) in intel_disable_iommus()
4759 iommu_disable_translation(iommu); in intel_disable_iommus()
4766 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
4773 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_version() local
4774 u32 ver = readl(iommu->reg + DMAR_VER_REG); in intel_iommu_show_version()
4784 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_address() local
4785 return sprintf(buf, "%llx\n", iommu->reg_phys); in intel_iommu_show_address()
4793 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_cap() local
4794 return sprintf(buf, "%llx\n", iommu->cap); in intel_iommu_show_cap()
4802 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ecap() local
4803 return sprintf(buf, "%llx\n", iommu->ecap); in intel_iommu_show_ecap()
4811 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms() local
4812 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in intel_iommu_show_ndoms()
4820 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms_used() local
4821 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in intel_iommu_show_ndoms_used()
4822 cap_ndoms(iommu->cap))); in intel_iommu_show_ndoms_used()
4882 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
4886 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
4925 struct intel_iommu *iommu; in intel_iommu_init() local
4972 for_each_iommu(iommu, drhd) in intel_iommu_init()
4973 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
5025 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
5026 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
5028 "%s", iommu->name); in intel_iommu_init()
5029 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); in intel_iommu_init()
5030 iommu_device_register(&iommu->iommu); in intel_iommu_init()
5045 for_each_iommu(iommu, drhd) { in intel_iommu_init()
5046 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
5047 iommu_enable_translation(iommu); in intel_iommu_init()
5049 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
5069 struct intel_iommu *iommu = opaque; in domain_context_clear_one_cb() local
5071 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
5081 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) in domain_context_clear() argument
5083 if (!iommu || !dev || !dev_is_pci(dev)) in domain_context_clear()
5086 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); in domain_context_clear()
5092 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
5100 iommu = info->iommu; in __dmar_remove_one_dev_info()
5104 if (dev_is_pci(info->dev) && sm_supported(iommu)) in __dmar_remove_one_dev_info()
5105 intel_pasid_tear_down_entry(iommu, info->dev, in __dmar_remove_one_dev_info()
5109 domain_context_clear(iommu, info->dev); in __dmar_remove_one_dev_info()
5115 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5116 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
5117 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5134 info = dev->archdata.iommu; in dmar_remove_one_dev_info()
5224 struct device_domain_info *info = dev->archdata.iommu; in is_aux_domain()
5233 struct device_domain_info *info = dev->archdata.iommu; in auxiliary_link_device()
5246 struct device_domain_info *info = dev->archdata.iommu; in auxiliary_unlink_device()
5265 struct intel_iommu *iommu; in aux_domain_add_dev() local
5267 iommu = device_to_iommu(dev, &bus, &devfn); in aux_domain_add_dev()
5268 if (!iommu) in aux_domain_add_dev()
5289 spin_lock(&iommu->lock); in aux_domain_add_dev()
5290 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
5295 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
5299 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5308 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
5310 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5322 struct intel_iommu *iommu; in aux_domain_remove_dev() local
5329 info = dev->archdata.iommu; in aux_domain_remove_dev()
5330 iommu = info->iommu; in aux_domain_remove_dev()
5334 spin_lock(&iommu->lock); in aux_domain_remove_dev()
5335 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid); in aux_domain_remove_dev()
5336 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
5337 spin_unlock(&iommu->lock); in aux_domain_remove_dev()
5346 struct intel_iommu *iommu; in prepare_domain_attach_device() local
5350 iommu = device_to_iommu(dev, &bus, &devfn); in prepare_domain_attach_device()
5351 if (!iommu) in prepare_domain_attach_device()
5355 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
5356 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
5357 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
5370 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
5534 struct intel_iommu *iommu; in scalable_mode_support() local
5538 for_each_active_iommu(iommu, drhd) { in scalable_mode_support()
5539 if (!sm_supported(iommu)) { in scalable_mode_support()
5552 struct intel_iommu *iommu; in iommu_pasid_support() local
5556 for_each_active_iommu(iommu, drhd) { in iommu_pasid_support()
5557 if (!pasid_supported(iommu)) { in iommu_pasid_support()
5581 struct intel_iommu *iommu; in intel_iommu_add_device() local
5586 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_add_device()
5587 if (!iommu) in intel_iommu_add_device()
5590 iommu_device_link(&iommu->iommu, dev); in intel_iommu_add_device()
5592 if (translation_pre_enabled(iommu)) in intel_iommu_add_device()
5593 dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO; in intel_iommu_add_device()
5644 iommu_device_unlink(&iommu->iommu, dev); in intel_iommu_add_device()
5650 struct intel_iommu *iommu; in intel_iommu_remove_device() local
5653 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_remove_device()
5654 if (!iommu) in intel_iommu_remove_device()
5661 iommu_device_unlink(&iommu->iommu, dev); in intel_iommu_remove_device()
5733 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) in intel_iommu_enable_pasid() argument
5747 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5750 info = dev->archdata.iommu; in intel_iommu_enable_pasid()
5754 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5764 iommu->flush.flush_context(iommu, in intel_iommu_enable_pasid()
5765 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5778 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5807 struct intel_iommu *iommu; in intel_svm_device_to_iommu() local
5816 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_device_to_iommu()
5817 if ((!iommu)) { in intel_svm_device_to_iommu()
5822 return iommu; in intel_svm_device_to_iommu()
5829 struct intel_iommu *iommu; in intel_iommu_enable_auxd() local
5834 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_enable_auxd()
5835 if (!iommu || dmar_disabled) in intel_iommu_enable_auxd()
5838 if (!sm_supported(iommu) || !pasid_supported(iommu)) in intel_iommu_enable_auxd()
5841 ret = intel_iommu_enable_pasid(iommu, dev); in intel_iommu_enable_auxd()
5846 info = dev->archdata.iommu; in intel_iommu_enable_auxd()
5859 info = dev->archdata.iommu; in intel_iommu_disable_auxd()
5936 struct device_domain_info *info = dev->archdata.iommu; in intel_iommu_dev_feat_enabled()
5956 return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; in intel_iommu_is_attach_deferred()