• Home
  • Raw
  • Download

Lines Matching +full:additional +full:- +full:devs

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/pci-ats.h>
25 #include "../iommu-sva.h"
53 list_for_each_entry_rcu(t, &svm->devs, list) { in svm_lookup_device_by_dev()
54 if (t->dev == dev) { in svm_lookup_device_by_dev()
70 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); in intel_svm_enable_prq()
73 iommu->name); in intel_svm_enable_prq()
74 return -ENOMEM; in intel_svm_enable_prq()
76 iommu->prq = page_address(pages); in intel_svm_enable_prq()
78 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
81 iommu->name); in intel_svm_enable_prq()
82 ret = -EINVAL; in intel_svm_enable_prq()
85 iommu->pr_irq = irq; in intel_svm_enable_prq()
87 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
88 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
89 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
91 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
92 ret = -ENOMEM; in intel_svm_enable_prq()
95 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
97 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
100 iommu->prq_name, iommu); in intel_svm_enable_prq()
103 iommu->name); in intel_svm_enable_prq()
106 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
107 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
108 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
110 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
115 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
116 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
119 iommu->pr_irq = 0; in intel_svm_enable_prq()
121 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
122 iommu->prq = NULL; in intel_svm_enable_prq()
129 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
130 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
131 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
133 if (iommu->pr_irq) { in intel_svm_finish_prq()
134 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
135 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
136 iommu->pr_irq = 0; in intel_svm_finish_prq()
139 if (iommu->iopf_queue) { in intel_svm_finish_prq()
140 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
141 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
144 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
145 iommu->prq = NULL; in intel_svm_finish_prq()
156 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
158 iommu->name); in intel_svm_check()
163 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
165 iommu->name); in intel_svm_check()
169 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
177 struct device_domain_info *info = dev_iommu_priv_get(sdev->dev); in __flush_svm_range_dev()
182 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
183 if (info->ats_enabled) { in __flush_svm_range_dev()
184 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
185 svm->pasid, sdev->qdep, address, in __flush_svm_range_dev()
188 svm->pasid, sdev->qdep); in __flush_svm_range_dev()
214 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_flush_svm_range()
225 list_for_each_entry_rcu(sdev, &svm->devs, list) { in intel_flush_svm_all()
226 info = dev_iommu_priv_get(sdev->dev); in intel_flush_svm_all()
228 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0); in intel_flush_svm_all()
229 if (info->ats_enabled) { in intel_flush_svm_all()
230 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in intel_flush_svm_all()
231 svm->pasid, sdev->qdep, in intel_flush_svm_all()
232 0, 64 - VTD_PAGE_SHIFT); in intel_flush_svm_all()
233 quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT, in intel_flush_svm_all()
234 svm->pasid, sdev->qdep); in intel_flush_svm_all()
247 if (start == 0 && end == -1UL) { in intel_arch_invalidate_secondary_tlbs()
253 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); in intel_arch_invalidate_secondary_tlbs()
274 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_mm_release()
275 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
276 svm->pasid, true); in intel_mm_release()
294 return -EINVAL; in pasid_to_svm_sdev()
307 if (WARN_ON(list_empty(&svm->devs))) in pasid_to_svm_sdev()
308 return -EINVAL; in pasid_to_svm_sdev()
327 svm = pasid_private_find(mm->pasid); in intel_svm_bind_mm()
331 return -ENOMEM; in intel_svm_bind_mm()
333 svm->pasid = mm->pasid; in intel_svm_bind_mm()
334 svm->mm = mm; in intel_svm_bind_mm()
335 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_mm()
337 svm->notifier.ops = &intel_mmuops; in intel_svm_bind_mm()
338 ret = mmu_notifier_register(&svm->notifier, mm); in intel_svm_bind_mm()
344 ret = pasid_private_add(svm->pasid, svm); in intel_svm_bind_mm()
346 mmu_notifier_unregister(&svm->notifier, mm); in intel_svm_bind_mm()
354 ret = -ENOMEM; in intel_svm_bind_mm()
358 sdev->dev = dev; in intel_svm_bind_mm()
359 sdev->iommu = iommu; in intel_svm_bind_mm()
360 sdev->did = FLPT_DEFAULT_DID; in intel_svm_bind_mm()
361 sdev->sid = PCI_DEVID(info->bus, info->devfn); in intel_svm_bind_mm()
362 init_rcu_head(&sdev->rcu); in intel_svm_bind_mm()
363 if (info->ats_enabled) { in intel_svm_bind_mm()
364 sdev->qdep = info->ats_qdep; in intel_svm_bind_mm()
365 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) in intel_svm_bind_mm()
366 sdev->qdep = 0; in intel_svm_bind_mm()
371 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
376 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_mm()
383 if (list_empty(&svm->devs)) { in intel_svm_bind_mm()
384 mmu_notifier_unregister(&svm->notifier, mm); in intel_svm_bind_mm()
385 pasid_private_remove(mm->pasid); in intel_svm_bind_mm()
405 mm = svm->mm; in intel_svm_remove_dev_pasid()
408 list_del_rcu(&sdev->list); in intel_svm_remove_dev_pasid()
411 if (list_empty(&svm->devs)) { in intel_svm_remove_dev_pasid()
412 if (svm->notifier.ops) in intel_svm_remove_dev_pasid()
413 mmu_notifier_unregister(&svm->notifier, mm); in intel_svm_remove_dev_pasid()
414 pasid_private_remove(svm->pasid); in intel_svm_remove_dev_pasid()
458 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); in is_canonical_address()
465 * intel_drain_pasid_prq - Drain page requests and responses for a pasid
476 * described in VT-d spec CH7.10 to drain all page requests and page
494 if (!info->pri_enabled) in intel_drain_pasid_prq()
497 iommu = info->iommu; in intel_drain_pasid_prq()
498 domain = info->domain; in intel_drain_pasid_prq()
500 sid = PCI_DEVID(info->bus, info->devfn); in intel_drain_pasid_prq()
509 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
515 req = &iommu->prq[head / sizeof(*req)]; in intel_drain_pasid_prq()
516 if (!req->pasid_present || req->pasid != pasid) { in intel_drain_pasid_prq()
521 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
528 * Perform steps described in VT-d spec CH7.10 to drain page in intel_drain_pasid_prq()
543 QI_DEV_IOTLB_PFSID(info->pfsid); in intel_drain_pasid_prq()
545 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
547 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_drain_pasid_prq()
548 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
557 if (req->rd_req) in prq_to_iommu_prot()
559 if (req->wr_req) in prq_to_iommu_prot()
561 if (req->exe_req) in prq_to_iommu_prot()
563 if (req->pm_req) in prq_to_iommu_prot()
575 return -ENODEV; in intel_svm_prq_report()
580 event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; in intel_svm_prq_report()
581 event.fault.prm.pasid = desc->pasid; in intel_svm_prq_report()
582 event.fault.prm.grpid = desc->prg_index; in intel_svm_prq_report()
585 if (desc->lpig) in intel_svm_prq_report()
587 if (desc->pasid_present) { in intel_svm_prq_report()
591 if (desc->priv_data_present) { in intel_svm_prq_report()
600 event.fault.prm.private_data[0] = desc->priv_data[0]; in intel_svm_prq_report()
601 event.fault.prm.private_data[1] = desc->priv_data[1]; in intel_svm_prq_report()
619 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
623 * Per VT-d spec. v3.0 ch7.7, system software must in handle_bad_prq_event()
626 * is set. This is an additional VT-d feature beyond in handle_bad_prq_event()
629 if (!req->lpig && !req->priv_data_present) in handle_bad_prq_event()
632 desc.qw0 = QI_PGRP_PASID(req->pasid) | in handle_bad_prq_event()
633 QI_PGRP_DID(req->rid) | in handle_bad_prq_event()
634 QI_PGRP_PASID_P(req->pasid_present) | in handle_bad_prq_event()
635 QI_PGRP_PDP(req->priv_data_present) | in handle_bad_prq_event()
638 desc.qw1 = QI_PGRP_IDX(req->prg_index) | in handle_bad_prq_event()
639 QI_PGRP_LPIG(req->lpig); in handle_bad_prq_event()
641 if (req->priv_data_present) { in handle_bad_prq_event()
642 desc.qw2 = req->priv_data[0]; in handle_bad_prq_event()
643 desc.qw3 = req->priv_data[1]; in handle_bad_prq_event()
664 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
666 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
667 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
670 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
671 address = (u64)req->addr << VTD_PAGE_SHIFT; in prq_event_thread()
673 if (unlikely(!req->pasid_present)) { in prq_event_thread()
675 iommu->name); in prq_event_thread()
683 iommu->name); in prq_event_thread()
687 if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) { in prq_event_thread()
689 iommu->name); in prq_event_thread()
693 if (unlikely(req->exe_req && req->rd_req)) { in prq_event_thread()
695 iommu->name); in prq_event_thread()
700 if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) in prq_event_thread()
703 pdev = pci_get_domain_bus_and_slot(iommu->segment, in prq_event_thread()
704 PCI_BUS_NUM(req->rid), in prq_event_thread()
705 req->rid & 0xff); in prq_event_thread()
713 if (intel_svm_prq_report(iommu, &pdev->dev, req)) in prq_event_thread()
716 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
717 req->priv_data[0], req->priv_data[1], in prq_event_thread()
718 iommu->prq_seq_number++); in prq_event_thread()
724 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
730 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
732 iommu->name); in prq_event_thread()
733 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
734 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
736 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
737 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
739 iommu->name); in prq_event_thread()
743 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
744 complete(&iommu->prq_complete); in prq_event_thread()
763 return -ENODEV; in intel_svm_page_response()
767 return -ENODEV; in intel_svm_page_response()
770 return -EINVAL; in intel_svm_page_response()
772 prm = &evt->fault.prm; in intel_svm_page_response()
774 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; in intel_svm_page_response()
775 private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; in intel_svm_page_response()
776 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; in intel_svm_page_response()
779 ret = -EINVAL; in intel_svm_page_response()
783 if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { in intel_svm_page_response()
784 ret = -EINVAL; in intel_svm_page_response()
789 * Per VT-d spec. v3.0 ch7.7, system software must respond in intel_svm_page_response()
792 * additional VT-d requirement beyond PCI ATS spec. in intel_svm_page_response()
797 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | in intel_svm_page_response()
800 QI_PGRP_RESP_CODE(msg->code) | in intel_svm_page_response()
802 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); in intel_svm_page_response()
807 desc.qw2 = prm->private_data[0]; in intel_svm_page_response()
808 desc.qw3 = prm->private_data[1]; in intel_svm_page_response()
809 } else if (prm->private_data[0]) { in intel_svm_page_response()
811 ktime_to_ns(ktime_get()) - prm->private_data[0]); in intel_svm_page_response()
824 struct intel_iommu *iommu = info->iommu; in intel_svm_set_dev_pasid()
825 struct mm_struct *mm = domain->mm; in intel_svm_set_dev_pasid()
847 domain->domain.ops = &intel_svm_domain_ops; in intel_svm_domain_alloc()
849 return &domain->domain; in intel_svm_domain_alloc()