• Home
  • Raw
  • Download

Lines Matching refs:iommu

403 	struct intel_iommu *iommu; /* IOMMU used by this device */  member
542 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
547 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
560 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
562 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
570 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
572 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
624 struct intel_iommu *iommu = NULL; in domain_update_iommu_superpage() local
633 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
634 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
665 return drhd->iommu; in device_to_iommu()
670 return drhd->iommu; in device_to_iommu()
674 return drhd->iommu; in device_to_iommu()
688 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, in device_to_context_entry() argument
696 spin_lock_irqsave(&iommu->lock, flags); in device_to_context_entry()
697 root = &iommu->root_entry[bus]; in device_to_context_entry()
701 alloc_pgtable_page(iommu->node); in device_to_context_entry()
703 spin_unlock_irqrestore(&iommu->lock, flags); in device_to_context_entry()
706 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in device_to_context_entry()
710 __iommu_flush_cache(iommu, root, sizeof(*root)); in device_to_context_entry()
712 spin_unlock_irqrestore(&iommu->lock, flags); in device_to_context_entry()
716 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
723 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
724 root = &iommu->root_entry[bus]; in device_context_mapped()
732 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
736 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_table() argument
742 spin_lock_irqsave(&iommu->lock, flags); in clear_context_table()
743 root = &iommu->root_entry[bus]; in clear_context_table()
747 __iommu_flush_cache(iommu, &context[devfn], \ in clear_context_table()
750 spin_unlock_irqrestore(&iommu->lock, flags); in clear_context_table()
753 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
760 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
761 if (!iommu->root_entry) { in free_context_table()
765 root = &iommu->root_entry[i]; in free_context_table()
770 free_pgtable_page(iommu->root_entry); in free_context_table()
771 iommu->root_entry = NULL; in free_context_table()
773 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
951 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
956 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
960 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
962 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
963 iommu->root_entry = root; in iommu_alloc_root_entry()
964 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
969 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
975 addr = iommu->root_entry; in iommu_set_root_entry()
977 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
978 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); in iommu_set_root_entry()
980 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
983 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
986 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
989 static void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
994 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
997 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
998 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1001 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1004 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1008 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1031 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1032 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1035 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1038 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1042 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1045 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1071 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1074 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1077 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1080 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1081 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1084 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1087 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1104 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); in iommu_support_dev_iotlb() local
1106 if (!ecap_dev_iotlb_support(iommu->ecap)) in iommu_support_dev_iotlb()
1109 if (!iommu->qi) in iommu_support_dev_iotlb()
1129 info->iommu = iommu; in iommu_support_dev_iotlb()
1164 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); in iommu_flush_dev_iotlb()
1169 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, in iommu_flush_iotlb_psi() argument
1183 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1184 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1187 iommu->flush.flush_iotlb(iommu, did, addr, mask, in iommu_flush_iotlb_psi()
1194 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1195 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); in iommu_flush_iotlb_psi()
1198 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1203 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1204 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1206 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1209 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1212 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1215 static int iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1220 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1221 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1222 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1225 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1228 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1232 static int iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1237 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1238 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1239 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1242 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1245 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1250 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1255 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1256 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id, in iommu_init_domains()
1260 spin_lock_init(&iommu->lock); in iommu_init_domains()
1265 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1266 if (!iommu->domain_ids) { in iommu_init_domains()
1270 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), in iommu_init_domains()
1272 if (!iommu->domains) { in iommu_init_domains()
1281 if (cap_caching_mode(iommu->cap)) in iommu_init_domains()
1282 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1290 void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1296 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1297 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { in free_dmar_iommu()
1298 domain = iommu->domains[i]; in free_dmar_iommu()
1299 clear_bit(i, iommu->domain_ids); in free_dmar_iommu()
1312 if (iommu->gcmd & DMA_GCMD_TE) in free_dmar_iommu()
1313 iommu_disable_translation(iommu); in free_dmar_iommu()
1315 if (iommu->irq) { in free_dmar_iommu()
1316 irq_set_handler_data(iommu->irq, NULL); in free_dmar_iommu()
1318 free_irq(iommu->irq, iommu); in free_dmar_iommu()
1319 destroy_irq(iommu->irq); in free_dmar_iommu()
1322 kfree(iommu->domains); in free_dmar_iommu()
1323 kfree(iommu->domain_ids); in free_dmar_iommu()
1325 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1337 free_context_table(iommu); in free_dmar_iommu()
1356 struct intel_iommu *iommu) in iommu_attach_domain() argument
1362 ndomains = cap_ndoms(iommu->cap); in iommu_attach_domain()
1364 spin_lock_irqsave(&iommu->lock, flags); in iommu_attach_domain()
1366 num = find_first_zero_bit(iommu->domain_ids, ndomains); in iommu_attach_domain()
1368 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_attach_domain()
1374 set_bit(num, iommu->domain_ids); in iommu_attach_domain()
1375 set_bit(iommu->seq_id, domain->iommu_bmp); in iommu_attach_domain()
1376 iommu->domains[num] = domain; in iommu_attach_domain()
1377 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_attach_domain()
1383 struct intel_iommu *iommu) in iommu_detach_domain() argument
1389 spin_lock_irqsave(&iommu->lock, flags); in iommu_detach_domain()
1390 ndomains = cap_ndoms(iommu->cap); in iommu_detach_domain()
1391 for_each_set_bit(num, iommu->domain_ids, ndomains) { in iommu_detach_domain()
1392 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1399 clear_bit(num, iommu->domain_ids); in iommu_detach_domain()
1400 clear_bit(iommu->seq_id, domain->iommu_bmp); in iommu_detach_domain()
1401 iommu->domains[num] = NULL; in iommu_detach_domain()
1403 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_detach_domain()
1469 struct intel_iommu *iommu; in domain_init() local
1479 iommu = domain_get_iommu(domain); in domain_init()
1480 if (guest_width > cap_mgaw(iommu->cap)) in domain_init()
1481 guest_width = cap_mgaw(iommu->cap); in domain_init()
1485 sagaw = cap_sagaw(iommu->cap); in domain_init()
1496 if (ecap_coherent(iommu->ecap)) in domain_init()
1501 if (ecap_sc_support(iommu->ecap)) in domain_init()
1506 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1508 domain->nid = iommu->node; in domain_init()
1514 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1521 struct intel_iommu *iommu; in domain_exit() local
1541 for_each_active_iommu(iommu, drhd) in domain_exit()
1542 if (test_bit(iommu->seq_id, domain->iommu_bmp)) in domain_exit()
1543 iommu_detach_domain(domain, iommu); in domain_exit()
1553 struct intel_iommu *iommu; in domain_context_mapping_one() local
1568 iommu = device_to_iommu(segment, bus, devfn); in domain_context_mapping_one()
1569 if (!iommu) in domain_context_mapping_one()
1572 context = device_to_context_entry(iommu, bus, devfn); in domain_context_mapping_one()
1575 spin_lock_irqsave(&iommu->lock, flags); in domain_context_mapping_one()
1577 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1589 ndomains = cap_ndoms(iommu->cap); in domain_context_mapping_one()
1590 for_each_set_bit(num, iommu->domain_ids, ndomains) { in domain_context_mapping_one()
1591 if (iommu->domains[num] == domain) { in domain_context_mapping_one()
1599 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_context_mapping_one()
1601 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1606 set_bit(num, iommu->domain_ids); in domain_context_mapping_one()
1607 iommu->domains[num] = domain; in domain_context_mapping_one()
1616 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1619 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1638 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
1641 context_set_address_width(context, iommu->agaw); in domain_context_mapping_one()
1655 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
1656 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
1660 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1662 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
1665 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1668 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_context_mapping_one()
1671 domain->nid = iommu->node; in domain_context_mapping_one()
1723 struct intel_iommu *iommu; in domain_context_mapped() local
1725 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in domain_context_mapped()
1727 if (!iommu) in domain_context_mapped()
1730 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); in domain_context_mapped()
1740 ret = device_context_mapped(iommu, parent->bus->number, in domain_context_mapped()
1747 return device_context_mapped(iommu, tmp->subordinate->number, in domain_context_mapped()
1750 return device_context_mapped(iommu, tmp->bus->number, in domain_context_mapped()
1911 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) in iommu_detach_dev() argument
1913 if (!iommu) in iommu_detach_dev()
1916 clear_context_table(iommu, bus, devfn); in iommu_detach_dev()
1917 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_detach_dev()
1919 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_detach_dev()
1928 info->dev->dev.archdata.iommu = NULL; in unlink_domain_info()
1935 struct intel_iommu *iommu; in domain_remove_dev_info() local
1945 iommu = device_to_iommu(info->segment, info->bus, info->devfn); in domain_remove_dev_info()
1946 iommu_detach_dev(iommu, info->bus, info->devfn); in domain_remove_dev_info()
1964 info = pdev->dev.archdata.iommu; in find_domain()
1974 struct intel_iommu *iommu; in get_domain_for_dev() local
2026 iommu = drhd->iommu; in get_domain_for_dev()
2028 ret = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2099 pdev->dev.archdata.iommu = info; in get_domain_for_dev()
2203 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) in iommu_prepare_rmrr_dev()
2239 struct intel_iommu *iommu; in si_domain_init() local
2248 for_each_active_iommu(iommu, drhd) { in si_domain_init()
2249 ret = iommu_attach_domain(si_domain, iommu); in si_domain_init()
2290 info = pdev->dev.archdata.iommu; in identity_mapping()
2318 pdev->dev.archdata.iommu = info; in domain_add_dev_info()
2455 struct intel_iommu *iommu; in init_dmars() local
2497 iommu = drhd->iommu; in init_dmars()
2498 g_iommus[iommu->seq_id] = iommu; in init_dmars()
2500 ret = iommu_init_domains(iommu); in init_dmars()
2509 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2514 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2525 iommu = drhd->iommu; in init_dmars()
2532 if (iommu->qi) in init_dmars()
2538 dmar_fault(-1, iommu); in init_dmars()
2543 dmar_disable_qi(iommu); in init_dmars()
2550 iommu = drhd->iommu; in init_dmars()
2552 if (dmar_enable_qi(iommu)) { in init_dmars()
2557 iommu->flush.flush_context = __iommu_flush_context; in init_dmars()
2558 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in init_dmars()
2561 iommu->seq_id, in init_dmars()
2564 iommu->flush.flush_context = qi_flush_context; in init_dmars()
2565 iommu->flush.flush_iotlb = qi_flush_iotlb; in init_dmars()
2568 iommu->seq_id, in init_dmars()
2641 iommu_disable_protect_mem_regions(drhd->iommu); in init_dmars()
2644 iommu = drhd->iommu; in init_dmars()
2646 iommu_flush_write_buffer(iommu); in init_dmars()
2648 ret = dmar_set_interrupt(iommu); in init_dmars()
2652 iommu_set_root_entry(iommu); in init_dmars()
2654 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
2655 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
2657 ret = iommu_enable_translation(iommu); in init_dmars()
2661 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2669 iommu = drhd->iommu; in init_dmars()
2670 free_iommu(iommu); in init_dmars()
2741 info = dev->dev.archdata.iommu; in get_valid_domain_for_dev()
2750 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; in iommu_dummy()
2814 struct intel_iommu *iommu; in __intel_map_single() local
2826 iommu = domain_get_iommu(domain); in __intel_map_single()
2838 !cap_zlr(iommu->cap)) in __intel_map_single()
2854 if (cap_caching_mode(iommu->cap)) in __intel_map_single()
2855 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); in __intel_map_single()
2857 iommu_flush_write_buffer(iommu); in __intel_map_single()
2888 struct intel_iommu *iommu = g_iommus[i]; in flush_unmaps() local
2889 if (!iommu) in flush_unmaps()
2896 if (!cap_caching_mode(iommu->cap)) in flush_unmaps()
2897 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in flush_unmaps()
2905 if (cap_caching_mode(iommu->cap)) in flush_unmaps()
2906 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
2934 struct intel_iommu *iommu; in add_unmap() local
2940 iommu = domain_get_iommu(dom); in add_unmap()
2941 iommu_id = iommu->seq_id; in add_unmap()
2964 struct intel_iommu *iommu; in intel_unmap_page() local
2972 iommu = domain_get_iommu(domain); in intel_unmap_page()
2992 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_page()
3058 struct intel_iommu *iommu; in intel_unmap_sg() local
3066 iommu = domain_get_iommu(domain); in intel_unmap_sg()
3083 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap_sg()
3122 struct intel_iommu *iommu; in intel_map_sg() local
3132 iommu = domain_get_iommu(domain); in intel_map_sg()
3149 !cap_zlr(iommu->cap)) in intel_map_sg()
3170 if (cap_caching_mode(iommu->cap)) in intel_map_sg()
3171 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); in intel_map_sg()
3173 iommu_flush_write_buffer(iommu); in intel_map_sg()
3299 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in quirk_ioat_snb_local_iommu()
3341 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in init_no_remapping_devices()
3351 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3353 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3354 if (iommu->qi) in init_iommu_hw()
3355 dmar_reenable_qi(iommu); in init_iommu_hw()
3357 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3364 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3368 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3370 iommu_set_root_entry(iommu); in init_iommu_hw()
3372 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
3374 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in init_iommu_hw()
3376 if (iommu_enable_translation(iommu)) in init_iommu_hw()
3378 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3387 struct intel_iommu *iommu; in iommu_flush_all() local
3389 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3390 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3392 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3400 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3403 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3404 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, in iommu_suspend()
3406 if (!iommu->iommu_state) in iommu_suspend()
3412 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3413 iommu_disable_translation(iommu); in iommu_suspend()
3415 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3417 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3418 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3419 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3420 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3421 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3422 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3423 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3424 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3426 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3431 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3432 kfree(iommu->iommu_state); in iommu_suspend()
3440 struct intel_iommu *iommu = NULL; in iommu_resume() local
3451 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3453 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3455 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3456 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3457 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3458 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3459 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3460 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3461 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3462 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3464 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3467 for_each_active_iommu(iommu, drhd) in iommu_resume()
3468 kfree(iommu->iommu_state); in iommu_resume()
3684 struct intel_iommu *iommu; in intel_iommu_init() local
3689 iommu = drhd->iommu; in intel_iommu_init()
3690 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_init()
3691 iommu_disable_translation(iommu); in intel_iommu_init()
3752 static void iommu_detach_dependent_devices(struct intel_iommu *iommu, in iommu_detach_dependent_devices() argument
3757 if (!iommu || !pdev) in iommu_detach_dependent_devices()
3766 iommu_detach_dev(iommu, parent->bus->number, in iommu_detach_dependent_devices()
3771 iommu_detach_dev(iommu, in iommu_detach_dependent_devices()
3774 iommu_detach_dev(iommu, tmp->bus->number, in iommu_detach_dependent_devices()
3783 struct intel_iommu *iommu; in domain_remove_one_dev_info() local
3788 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in domain_remove_one_dev_info()
3790 if (!iommu) in domain_remove_one_dev_info()
3803 iommu_detach_dev(iommu, info->bus, info->devfn); in domain_remove_one_dev_info()
3804 iommu_detach_dependent_devices(iommu, pdev); in domain_remove_one_dev_info()
3819 if (iommu == device_to_iommu(info->segment, info->bus, in domain_remove_one_dev_info()
3829 clear_bit(iommu->seq_id, domain->iommu_bmp); in domain_remove_one_dev_info()
3836 spin_lock_irqsave(&iommu->lock, tmp_flags); in domain_remove_one_dev_info()
3837 clear_bit(domain->id, iommu->domain_ids); in domain_remove_one_dev_info()
3838 iommu->domains[domain->id] = NULL; in domain_remove_one_dev_info()
3839 spin_unlock_irqrestore(&iommu->lock, tmp_flags); in domain_remove_one_dev_info()
3847 struct intel_iommu *iommu; in vm_domain_remove_all_dev_info() local
3858 iommu = device_to_iommu(info->segment, info->bus, info->devfn); in vm_domain_remove_all_dev_info()
3859 iommu_detach_dev(iommu, info->bus, info->devfn); in vm_domain_remove_all_dev_info()
3860 iommu_detach_dependent_devices(iommu, info->dev); in vm_domain_remove_all_dev_info()
3866 if (test_and_clear_bit(iommu->seq_id, in vm_domain_remove_all_dev_info()
3933 struct intel_iommu *iommu; in iommu_free_vm_domain() local
3940 iommu = drhd->iommu; in iommu_free_vm_domain()
3942 ndomains = cap_ndoms(iommu->cap); in iommu_free_vm_domain()
3943 for_each_set_bit(i, iommu->domain_ids, ndomains) { in iommu_free_vm_domain()
3944 if (iommu->domains[i] == domain) { in iommu_free_vm_domain()
3945 spin_lock_irqsave(&iommu->lock, flags); in iommu_free_vm_domain()
3946 clear_bit(i, iommu->domain_ids); in iommu_free_vm_domain()
3947 iommu->domains[i] = NULL; in iommu_free_vm_domain()
3948 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_free_vm_domain()
4014 struct intel_iommu *iommu; in intel_iommu_attach_device() local
4031 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, in intel_iommu_attach_device()
4033 if (!iommu) in intel_iommu_attach_device()
4037 addr_width = agaw_to_width(iommu->agaw); in intel_iommu_attach_device()
4038 if (addr_width > cap_mgaw(iommu->cap)) in intel_iommu_attach_device()
4039 addr_width = cap_mgaw(iommu->cap); in intel_iommu_attach_device()
4052 while (iommu->agaw < dmar_domain->agaw) { in intel_iommu_attach_device()