• Home
  • Raw
  • Download

Lines Matching full:iommu

22 #include <linux/intel-iommu.h>
29 #include <linux/iommu.h>
65 static void free_iommu(struct intel_iommu *iommu);
462 if (dmaru->iommu) in dmar_free_drhd()
463 free_iommu(dmaru->iommu); in dmar_free_drhd()
502 drhd->iommu->node = node; in dmar_parse_one_rhsa()
761 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", in dmar_acpi_insert_dev_scope()
932 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
947 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
949 iounmap(iommu->reg); in unmap_iommu()
950 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
954 * map_iommu: map the iommu's registers
955 * @iommu: the iommu to map
958 * Memory map the iommu's registers. Start w/ a single page, and
961 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) in map_iommu() argument
965 iommu->reg_phys = phys_addr; in map_iommu()
966 iommu->reg_size = VTD_PAGE_SIZE; in map_iommu()
968 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { in map_iommu()
974 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
975 if (!iommu->reg) { in map_iommu()
981 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); in map_iommu()
982 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); in map_iommu()
984 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { in map_iommu()
989 if (ecap_vcs(iommu->ecap)) in map_iommu()
990 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG); in map_iommu()
993 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), in map_iommu()
994 cap_max_fault_reg_offset(iommu->cap)); in map_iommu()
996 if (map_size > iommu->reg_size) { in map_iommu()
997 iounmap(iommu->reg); in map_iommu()
998 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
999 iommu->reg_size = map_size; in map_iommu()
1000 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, in map_iommu()
1001 iommu->name)) { in map_iommu()
1006 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
1007 if (!iommu->reg) { in map_iommu()
1017 iounmap(iommu->reg); in map_iommu()
1019 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
1024 static int dmar_alloc_seq_id(struct intel_iommu *iommu) in dmar_alloc_seq_id() argument
1026 iommu->seq_id = find_first_zero_bit(dmar_seq_ids, in dmar_alloc_seq_id()
1028 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { in dmar_alloc_seq_id()
1029 iommu->seq_id = -1; in dmar_alloc_seq_id()
1031 set_bit(iommu->seq_id, dmar_seq_ids); in dmar_alloc_seq_id()
1032 sprintf(iommu->name, "dmar%d", iommu->seq_id); in dmar_alloc_seq_id()
1035 return iommu->seq_id; in dmar_alloc_seq_id()
1038 static void dmar_free_seq_id(struct intel_iommu *iommu) in dmar_free_seq_id() argument
1040 if (iommu->seq_id >= 0) { in dmar_free_seq_id()
1041 clear_bit(iommu->seq_id, dmar_seq_ids); in dmar_free_seq_id()
1042 iommu->seq_id = -1; in dmar_free_seq_id()
1048 struct intel_iommu *iommu; in alloc_iommu() local
1059 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in alloc_iommu()
1060 if (!iommu) in alloc_iommu()
1063 if (dmar_alloc_seq_id(iommu) < 0) { in alloc_iommu()
1069 err = map_iommu(iommu, drhd->reg_base_addr); in alloc_iommu()
1071 pr_err("Failed to map %s\n", iommu->name); in alloc_iommu()
1076 if (cap_sagaw(iommu->cap) == 0) { in alloc_iommu()
1078 iommu->name); in alloc_iommu()
1083 agaw = iommu_calculate_agaw(iommu); in alloc_iommu()
1085 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1086 iommu->seq_id); in alloc_iommu()
1091 msagaw = iommu_calculate_max_sagaw(iommu); in alloc_iommu()
1093 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1094 iommu->seq_id); in alloc_iommu()
1099 iommu->agaw = agaw; in alloc_iommu()
1100 iommu->msagaw = msagaw; in alloc_iommu()
1101 iommu->segment = drhd->segment; in alloc_iommu()
1103 iommu->node = NUMA_NO_NODE; in alloc_iommu()
1105 ver = readl(iommu->reg + DMAR_VER_REG); in alloc_iommu()
1107 iommu->name, in alloc_iommu()
1110 (unsigned long long)iommu->cap, in alloc_iommu()
1111 (unsigned long long)iommu->ecap); in alloc_iommu()
1114 sts = readl(iommu->reg + DMAR_GSTS_REG); in alloc_iommu()
1116 iommu->gcmd |= DMA_GCMD_IRE; in alloc_iommu()
1118 iommu->gcmd |= DMA_GCMD_TE; in alloc_iommu()
1120 iommu->gcmd |= DMA_GCMD_QIE; in alloc_iommu()
1122 raw_spin_lock_init(&iommu->register_lock); in alloc_iommu()
1130 err = iommu_device_sysfs_add(&iommu->iommu, NULL, in alloc_iommu()
1132 "%s", iommu->name); in alloc_iommu()
1136 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); in alloc_iommu()
1138 err = iommu_device_register(&iommu->iommu); in alloc_iommu()
1143 drhd->iommu = iommu; in alloc_iommu()
1144 iommu->drhd = drhd; in alloc_iommu()
1149 iommu_device_sysfs_remove(&iommu->iommu); in alloc_iommu()
1151 unmap_iommu(iommu); in alloc_iommu()
1153 dmar_free_seq_id(iommu); in alloc_iommu()
1155 kfree(iommu); in alloc_iommu()
1159 static void free_iommu(struct intel_iommu *iommu) in free_iommu() argument
1161 if (intel_iommu_enabled && !iommu->drhd->ignored) { in free_iommu()
1162 iommu_device_unregister(&iommu->iommu); in free_iommu()
1163 iommu_device_sysfs_remove(&iommu->iommu); in free_iommu()
1166 if (iommu->irq) { in free_iommu()
1167 if (iommu->pr_irq) { in free_iommu()
1168 free_irq(iommu->pr_irq, iommu); in free_iommu()
1169 dmar_free_hwirq(iommu->pr_irq); in free_iommu()
1170 iommu->pr_irq = 0; in free_iommu()
1172 free_irq(iommu->irq, iommu); in free_iommu()
1173 dmar_free_hwirq(iommu->irq); in free_iommu()
1174 iommu->irq = 0; in free_iommu()
1177 if (iommu->qi) { in free_iommu()
1178 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1179 kfree(iommu->qi->desc_status); in free_iommu()
1180 kfree(iommu->qi); in free_iommu()
1183 if (iommu->reg) in free_iommu()
1184 unmap_iommu(iommu); in free_iommu()
1186 dmar_free_seq_id(iommu); in free_iommu()
1187 kfree(iommu); in free_iommu()
1203 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) in qi_check_fault() argument
1207 struct q_inval *qi = iommu->qi; in qi_check_fault()
1208 int shift = qi_shift(iommu); in qi_check_fault()
1213 fault = readl(iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1221 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1235 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1245 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1248 tail = readl(iommu->reg + DMAR_IQT_REG); in qi_check_fault()
1251 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1264 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1276 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, in qi_submit_sync() argument
1279 struct q_inval *qi = iommu->qi; in qi_submit_sync()
1306 shift = qi_shift(iommu); in qi_submit_sync()
1333 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1343 rc = qi_check_fault(iommu, index, wait_index); in qi_submit_sync()
1367 void qi_global_iec(struct intel_iommu *iommu) in qi_global_iec() argument
1377 qi_submit_sync(iommu, &desc, 1, 0); in qi_global_iec()
1380 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, in qi_flush_context() argument
1391 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_context()
1394 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_flush_iotlb() argument
1402 if (cap_write_drain(iommu->cap)) in qi_flush_iotlb()
1405 if (cap_read_drain(iommu->cap)) in qi_flush_iotlb()
1415 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iotlb()
1418 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb() argument
1437 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb()
1441 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, in qi_flush_piotlb() argument
1478 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_piotlb()
1482 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb_pasid() argument
1522 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb_pasid()
1525 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, in qi_flush_pasid_cache() argument
1532 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_pasid_cache()
1538 void dmar_disable_qi(struct intel_iommu *iommu) in dmar_disable_qi() argument
1544 if (!ecap_qis(iommu->ecap)) in dmar_disable_qi()
1547 raw_spin_lock_irqsave(&iommu->register_lock, flags); in dmar_disable_qi()
1549 sts = readl(iommu->reg + DMAR_GSTS_REG); in dmar_disable_qi()
1556 while ((readl(iommu->reg + DMAR_IQT_REG) != in dmar_disable_qi()
1557 readl(iommu->reg + DMAR_IQH_REG)) && in dmar_disable_qi()
1561 iommu->gcmd &= ~DMA_GCMD_QIE; in dmar_disable_qi()
1562 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in dmar_disable_qi()
1564 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, in dmar_disable_qi()
1567 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in dmar_disable_qi()
1573 static void __dmar_enable_qi(struct intel_iommu *iommu) in __dmar_enable_qi() argument
1577 struct q_inval *qi = iommu->qi; in __dmar_enable_qi()
1587 if (ecap_smts(iommu->ecap)) in __dmar_enable_qi()
1590 raw_spin_lock_irqsave(&iommu->register_lock, flags); in __dmar_enable_qi()
1593 writel(0, iommu->reg + DMAR_IQT_REG); in __dmar_enable_qi()
1595 dmar_writeq(iommu->reg + DMAR_IQA_REG, val); in __dmar_enable_qi()
1597 iommu->gcmd |= DMA_GCMD_QIE; in __dmar_enable_qi()
1598 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in __dmar_enable_qi()
1601 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); in __dmar_enable_qi()
1603 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in __dmar_enable_qi()
1611 int dmar_enable_qi(struct intel_iommu *iommu) in dmar_enable_qi() argument
1616 if (!ecap_qis(iommu->ecap)) in dmar_enable_qi()
1622 if (iommu->qi) in dmar_enable_qi()
1625 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1626 if (!iommu->qi) in dmar_enable_qi()
1629 qi = iommu->qi; in dmar_enable_qi()
1635 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, in dmar_enable_qi()
1636 !!ecap_smts(iommu->ecap)); in dmar_enable_qi()
1639 iommu->qi = NULL; in dmar_enable_qi()
1649 iommu->qi = NULL; in dmar_enable_qi()
1655 __dmar_enable_qi(iommu); in dmar_enable_qi()
1660 /* iommu interrupt handling. Most stuff are MSI-like. */
1775 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) in dmar_msi_reg() argument
1777 if (iommu->irq == irq) in dmar_msi_reg()
1779 else if (iommu->pr_irq == irq) in dmar_msi_reg()
1787 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_unmask() local
1788 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_unmask()
1792 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_unmask()
1793 writel(0, iommu->reg + reg); in dmar_msi_unmask()
1795 readl(iommu->reg + reg); in dmar_msi_unmask()
1796 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_unmask()
1801 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_mask() local
1802 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_mask()
1806 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_mask()
1807 writel(DMA_FECTL_IM, iommu->reg + reg); in dmar_msi_mask()
1809 readl(iommu->reg + reg); in dmar_msi_mask()
1810 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_mask()
1815 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_write() local
1816 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_write()
1819 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_write()
1820 writel(msg->data, iommu->reg + reg + 4); in dmar_msi_write()
1821 writel(msg->address_lo, iommu->reg + reg + 8); in dmar_msi_write()
1822 writel(msg->address_hi, iommu->reg + reg + 12); in dmar_msi_write()
1823 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_write()
1828 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_read() local
1829 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_read()
1832 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_read()
1833 msg->data = readl(iommu->reg + reg + 4); in dmar_msi_read()
1834 msg->address_lo = readl(iommu->reg + reg + 8); in dmar_msi_read()
1835 msg->address_hi = readl(iommu->reg + reg + 12); in dmar_msi_read()
1836 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_read()
1839 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, in dmar_fault_do_one() argument
1865 struct intel_iommu *iommu = dev_id; in dmar_fault() local
1873 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1874 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1883 reg = cap_fault_reg_offset(iommu->cap); in dmar_fault()
1896 data = readl(iommu->reg + reg + in dmar_fault()
1906 data = readl(iommu->reg + reg + in dmar_fault()
1911 guest_addr = dmar_readq(iommu->reg + reg + in dmar_fault()
1917 writel(DMA_FRCD_F, iommu->reg + reg + in dmar_fault()
1920 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1924 dmar_fault_do_one(iommu, type, fault_reason, in dmar_fault()
1929 if (fault_index >= cap_num_fault_regs(iommu->cap)) in dmar_fault()
1931 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1935 iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1938 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1942 int dmar_set_interrupt(struct intel_iommu *iommu) in dmar_set_interrupt() argument
1949 if (iommu->irq) in dmar_set_interrupt()
1952 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); in dmar_set_interrupt()
1954 iommu->irq = irq; in dmar_set_interrupt()
1960 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); in dmar_set_interrupt()
1969 struct intel_iommu *iommu; in enable_drhd_fault_handling() local
1974 for_each_iommu(iommu, drhd) { in enable_drhd_fault_handling()
1976 int ret = dmar_set_interrupt(iommu); in enable_drhd_fault_handling()
1987 dmar_fault(iommu->irq, iommu); in enable_drhd_fault_handling()
1988 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1989 writel(fault_status, iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1998 int dmar_reenable_qi(struct intel_iommu *iommu) in dmar_reenable_qi() argument
2000 if (!ecap_qis(iommu->ecap)) in dmar_reenable_qi()
2003 if (!iommu->qi) in dmar_reenable_qi()
2009 dmar_disable_qi(iommu); in dmar_reenable_qi()
2015 __dmar_enable_qi(iommu); in dmar_reenable_qi()