Lines Matching refs:iommu
85 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
94 iommu->name); in intel_svm_enable_prq()
97 iommu->prq = page_address(pages); in intel_svm_enable_prq()
99 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
102 iommu->name); in intel_svm_enable_prq()
106 iommu->pr_irq = irq; in intel_svm_enable_prq()
108 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
109 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
110 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
112 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
116 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
118 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
121 iommu->prq_name, iommu); in intel_svm_enable_prq()
124 iommu->name); in intel_svm_enable_prq()
127 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
128 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
129 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
131 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
136 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
137 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
140 iommu->pr_irq = 0; in intel_svm_enable_prq()
142 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
143 iommu->prq = NULL; in intel_svm_enable_prq()
148 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
150 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
151 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
152 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
154 if (iommu->pr_irq) { in intel_svm_finish_prq()
155 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
156 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
157 iommu->pr_irq = 0; in intel_svm_finish_prq()
160 if (iommu->iopf_queue) { in intel_svm_finish_prq()
161 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
162 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
165 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
166 iommu->prq = NULL; in intel_svm_finish_prq()
171 static inline bool intel_svm_capable(struct intel_iommu *iommu) in intel_svm_capable() argument
173 return iommu->flags & VTD_FLAG_SVM_CAPABLE; in intel_svm_capable()
176 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
178 if (!pasid_supported(iommu)) in intel_svm_check()
182 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
184 iommu->name); in intel_svm_check()
189 !cap_5lp_support(iommu->cap)) { in intel_svm_check()
191 iommu->name); in intel_svm_check()
195 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
208 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
210 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
272 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
324 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind_gpasid() local
332 if (WARN_ON(!iommu) || !data) in intel_svm_bind_gpasid()
412 sdev->iommu = iommu; in intel_svm_bind_gpasid()
419 ret = intel_iommu_enable_pasid(iommu, sdev->dev); in intel_svm_bind_gpasid()
431 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_gpasid()
432 ret = intel_pasid_setup_nested(iommu, dev, in intel_svm_bind_gpasid()
436 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_gpasid()
465 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_gpasid() local
470 if (WARN_ON(!iommu)) in intel_svm_unbind_gpasid()
483 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_gpasid()
537 static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, in intel_svm_bind_mm() argument
591 sdev->iommu = iommu; in intel_svm_bind_mm()
609 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
610 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
612 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
642 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
647 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_mm()
648 if (!iommu) in intel_svm_unbind_mm()
667 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_mm()
750 struct intel_iommu *iommu; in intel_svm_drain_prq() local
764 iommu = info->iommu; in intel_svm_drain_prq()
768 did = domain->iommu_did[iommu->seq_id]; in intel_svm_drain_prq()
776 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
777 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
778 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
782 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
788 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
824 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
825 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_svm_drain_prq()
826 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
827 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
848 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
881 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
892 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
898 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
928 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
934 struct intel_iommu *iommu = d; in prq_event_thread() local
944 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
946 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
947 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
950 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
955 iommu->name); in prq_event_thread()
959 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
965 iommu->name); in prq_event_thread()
971 iommu->name); in prq_event_thread()
977 iommu->name); in prq_event_thread()
1007 if (intel_svm_prq_report(iommu, sdev->dev, req)) in prq_event_thread()
1008 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
1010 trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
1017 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
1023 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
1025 iommu->name); in prq_event_thread()
1026 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
1027 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
1029 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
1030 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
1032 iommu->name); in prq_event_thread()
1036 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
1037 complete(&iommu->prq_complete); in prq_event_thread()
1044 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind() local
1053 if (!ecap_srs(iommu->ecap)) { in intel_svm_bind()
1055 iommu->name); in intel_svm_bind()
1061 iommu->name); in intel_svm_bind()
1075 sva = intel_svm_bind_mm(iommu, dev, mm, flags); in intel_svm_bind()
1112 struct intel_iommu *iommu; in intel_svm_page_response() local
1123 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
1124 if (!iommu) in intel_svm_page_response()
1198 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
1202 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()