Lines Matching refs:iommu
231 drhd->iommu->node = node; in dmar_parse_one_rhsa()
561 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
571 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
573 iounmap(iommu->reg); in unmap_iommu()
574 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
585 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) in map_iommu() argument
589 iommu->reg_phys = phys_addr; in map_iommu()
590 iommu->reg_size = VTD_PAGE_SIZE; in map_iommu()
592 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { in map_iommu()
598 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
599 if (!iommu->reg) { in map_iommu()
605 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); in map_iommu()
606 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); in map_iommu()
608 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { in map_iommu()
615 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), in map_iommu()
616 cap_max_fault_reg_offset(iommu->cap)); in map_iommu()
618 if (map_size > iommu->reg_size) { in map_iommu()
619 iounmap(iommu->reg); in map_iommu()
620 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
621 iommu->reg_size = map_size; in map_iommu()
622 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, in map_iommu()
623 iommu->name)) { in map_iommu()
628 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
629 if (!iommu->reg) { in map_iommu()
639 iounmap(iommu->reg); in map_iommu()
641 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
648 struct intel_iommu *iommu; in alloc_iommu() local
660 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in alloc_iommu()
661 if (!iommu) in alloc_iommu()
664 iommu->seq_id = iommu_allocated++; in alloc_iommu()
665 sprintf (iommu->name, "dmar%d", iommu->seq_id); in alloc_iommu()
667 err = map_iommu(iommu, drhd->reg_base_addr); in alloc_iommu()
669 pr_err("IOMMU: failed to map %s\n", iommu->name); in alloc_iommu()
674 agaw = iommu_calculate_agaw(iommu); in alloc_iommu()
677 iommu->seq_id); in alloc_iommu()
680 msagaw = iommu_calculate_max_sagaw(iommu); in alloc_iommu()
683 iommu->seq_id); in alloc_iommu()
686 iommu->agaw = agaw; in alloc_iommu()
687 iommu->msagaw = msagaw; in alloc_iommu()
689 iommu->node = -1; in alloc_iommu()
691 ver = readl(iommu->reg + DMAR_VER_REG); in alloc_iommu()
693 iommu->seq_id, in alloc_iommu()
696 (unsigned long long)iommu->cap, in alloc_iommu()
697 (unsigned long long)iommu->ecap); in alloc_iommu()
700 sts = readl(iommu->reg + DMAR_GSTS_REG); in alloc_iommu()
702 iommu->gcmd |= DMA_GCMD_IRE; in alloc_iommu()
704 iommu->gcmd |= DMA_GCMD_TE; in alloc_iommu()
706 iommu->gcmd |= DMA_GCMD_QIE; in alloc_iommu()
708 raw_spin_lock_init(&iommu->register_lock); in alloc_iommu()
710 drhd->iommu = iommu; in alloc_iommu()
714 unmap_iommu(iommu); in alloc_iommu()
716 kfree(iommu); in alloc_iommu()
720 void free_iommu(struct intel_iommu *iommu) in free_iommu() argument
722 if (!iommu) in free_iommu()
725 free_dmar_iommu(iommu); in free_iommu()
727 if (iommu->reg) in free_iommu()
728 unmap_iommu(iommu); in free_iommu()
730 kfree(iommu); in free_iommu()
746 static int qi_check_fault(struct intel_iommu *iommu, int index) in qi_check_fault() argument
750 struct q_inval *qi = iommu->qi; in qi_check_fault()
756 fault = readl(iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
764 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
772 __iommu_flush_cache(iommu, &qi->desc[index], in qi_check_fault()
774 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
784 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
787 tail = readl(iommu->reg + DMAR_IQT_REG); in qi_check_fault()
790 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
803 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
812 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) in qi_submit_sync() argument
815 struct q_inval *qi = iommu->qi; in qi_submit_sync()
848 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); in qi_submit_sync()
849 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); in qi_submit_sync()
858 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
868 rc = qi_check_fault(iommu, index); in qi_submit_sync()
891 void qi_global_iec(struct intel_iommu *iommu) in qi_global_iec() argument
899 qi_submit_sync(&desc, iommu); in qi_global_iec()
902 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, in qi_flush_context() argument
911 qi_submit_sync(&desc, iommu); in qi_flush_context()
914 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_flush_iotlb() argument
922 if (cap_write_drain(iommu->cap)) in qi_flush_iotlb()
925 if (cap_read_drain(iommu->cap)) in qi_flush_iotlb()
933 qi_submit_sync(&desc, iommu); in qi_flush_iotlb()
936 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, in qi_flush_dev_iotlb() argument
954 qi_submit_sync(&desc, iommu); in qi_flush_dev_iotlb()
960 void dmar_disable_qi(struct intel_iommu *iommu) in dmar_disable_qi() argument
966 if (!ecap_qis(iommu->ecap)) in dmar_disable_qi()
969 raw_spin_lock_irqsave(&iommu->register_lock, flags); in dmar_disable_qi()
971 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); in dmar_disable_qi()
978 while ((readl(iommu->reg + DMAR_IQT_REG) != in dmar_disable_qi()
979 readl(iommu->reg + DMAR_IQH_REG)) && in dmar_disable_qi()
983 iommu->gcmd &= ~DMA_GCMD_QIE; in dmar_disable_qi()
984 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in dmar_disable_qi()
986 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, in dmar_disable_qi()
989 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in dmar_disable_qi()
995 static void __dmar_enable_qi(struct intel_iommu *iommu) in __dmar_enable_qi() argument
999 struct q_inval *qi = iommu->qi; in __dmar_enable_qi()
1004 raw_spin_lock_irqsave(&iommu->register_lock, flags); in __dmar_enable_qi()
1007 writel(0, iommu->reg + DMAR_IQT_REG); in __dmar_enable_qi()
1009 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); in __dmar_enable_qi()
1011 iommu->gcmd |= DMA_GCMD_QIE; in __dmar_enable_qi()
1012 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in __dmar_enable_qi()
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); in __dmar_enable_qi()
1017 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in __dmar_enable_qi()
1025 int dmar_enable_qi(struct intel_iommu *iommu) in dmar_enable_qi() argument
1030 if (!ecap_qis(iommu->ecap)) in dmar_enable_qi()
1036 if (iommu->qi) in dmar_enable_qi()
1039 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1040 if (!iommu->qi) in dmar_enable_qi()
1043 qi = iommu->qi; in dmar_enable_qi()
1046 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); in dmar_enable_qi()
1049 iommu->qi = 0; in dmar_enable_qi()
1059 iommu->qi = 0; in dmar_enable_qi()
1068 __dmar_enable_qi(iommu); in dmar_enable_qi()
1129 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_unmask() local
1133 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_unmask()
1134 writel(0, iommu->reg + DMAR_FECTL_REG); in dmar_msi_unmask()
1136 readl(iommu->reg + DMAR_FECTL_REG); in dmar_msi_unmask()
1137 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_unmask()
1143 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_mask() local
1146 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_mask()
1147 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); in dmar_msi_mask()
1149 readl(iommu->reg + DMAR_FECTL_REG); in dmar_msi_mask()
1150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_mask()
1155 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_write() local
1158 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_write()
1159 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); in dmar_msi_write()
1160 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); in dmar_msi_write()
1161 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); in dmar_msi_write()
1162 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_write()
1167 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_read() local
1170 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_read()
1171 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); in dmar_msi_read()
1172 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); in dmar_msi_read()
1173 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); in dmar_msi_read()
1174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_read()
1177 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, in dmar_fault_do_one() argument
1205 struct intel_iommu *iommu = dev_id; in dmar_fault() local
1210 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1211 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1220 reg = cap_fault_reg_offset(iommu->cap); in dmar_fault()
1229 data = readl(iommu->reg + reg + in dmar_fault()
1237 data = readl(iommu->reg + reg + in dmar_fault()
1241 guest_addr = dmar_readq(iommu->reg + reg + in dmar_fault()
1245 writel(DMA_FRCD_F, iommu->reg + reg + in dmar_fault()
1248 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1250 dmar_fault_do_one(iommu, type, fault_reason, in dmar_fault()
1254 if (fault_index >= cap_num_fault_regs(iommu->cap)) in dmar_fault()
1256 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1259 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1266 int dmar_set_interrupt(struct intel_iommu *iommu) in dmar_set_interrupt() argument
1273 if (iommu->irq) in dmar_set_interrupt()
1282 irq_set_handler_data(irq, iommu); in dmar_set_interrupt()
1283 iommu->irq = irq; in dmar_set_interrupt()
1288 iommu->irq = 0; in dmar_set_interrupt()
1293 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); in dmar_set_interrupt()
1308 struct intel_iommu *iommu = drhd->iommu; in enable_drhd_fault_handling() local
1310 ret = dmar_set_interrupt(iommu); in enable_drhd_fault_handling()
1321 dmar_fault(iommu->irq, iommu); in enable_drhd_fault_handling()
1322 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1332 int dmar_reenable_qi(struct intel_iommu *iommu) in dmar_reenable_qi() argument
1334 if (!ecap_qis(iommu->ecap)) in dmar_reenable_qi()
1337 if (!iommu->qi) in dmar_reenable_qi()
1343 dmar_disable_qi(iommu); in dmar_reenable_qi()
1349 __dmar_enable_qi(iommu); in dmar_reenable_qi()