• Home
  • Raw
  • Download

Lines Matching +full:additional +full:- +full:devs

1 // SPDX-License-Identifier: GPL-2.0-only
8 #include <linux/intel-iommu.h>
13 #include <linux/intel-svm.h>
16 #include <linux/pci-ats.h>
39 iommu->name); in intel_svm_enable_prq()
40 return -ENOMEM; in intel_svm_enable_prq()
42 iommu->prq = page_address(pages); in intel_svm_enable_prq()
44 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
47 iommu->name); in intel_svm_enable_prq()
48 ret = -EINVAL; in intel_svm_enable_prq()
50 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
51 iommu->prq = NULL; in intel_svm_enable_prq()
54 iommu->pr_irq = irq; in intel_svm_enable_prq()
56 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
59 iommu->prq_name, iommu); in intel_svm_enable_prq()
62 iommu->name); in intel_svm_enable_prq()
64 iommu->pr_irq = 0; in intel_svm_enable_prq()
67 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
68 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
69 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
71 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
78 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
79 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
80 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
82 if (iommu->pr_irq) { in intel_svm_finish_prq()
83 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
84 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
85 iommu->pr_irq = 0; in intel_svm_finish_prq()
88 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
89 iommu->prq = NULL; in intel_svm_finish_prq()
96 return iommu->flags & VTD_FLAG_SVM_CAPABLE; in intel_svm_capable()
105 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
107 iommu->name); in intel_svm_check()
112 !cap_5lp_support(iommu->cap)) { in intel_svm_check()
114 iommu->name); in intel_svm_check()
118 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
126 struct device_domain_info *info = get_domain_info(sdev->dev); in __flush_svm_range_dev()
131 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
132 if (info->ats_enabled) in __flush_svm_range_dev()
133 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
134 svm->pasid, sdev->qdep, address, in __flush_svm_range_dev()
160 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_flush_svm_range()
173 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); in intel_invalidate_range()
194 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_mm_release()
195 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
196 svm->pasid, true); in intel_mm_release()
210 list_for_each_entry((sdev), &(svm)->devs, list) \
211 if ((d) != (sdev)->dev) {} else
222 return -EINVAL; in pasid_to_svm_sdev()
225 return -EINVAL; in pasid_to_svm_sdev()
238 if (WARN_ON(list_empty(&svm->devs))) in pasid_to_svm_sdev()
239 return -EINVAL; in pasid_to_svm_sdev()
242 list_for_each_entry_rcu(d, &svm->devs, list) { in pasid_to_svm_sdev()
243 if (d->dev == dev) { in pasid_to_svm_sdev()
269 return -EINVAL; in intel_svm_bind_gpasid()
271 if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD) in intel_svm_bind_gpasid()
272 return -EINVAL; in intel_svm_bind_gpasid()
275 if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd)) in intel_svm_bind_gpasid()
276 return -EINVAL; in intel_svm_bind_gpasid()
279 if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1)) in intel_svm_bind_gpasid()
280 return -EINVAL; in intel_svm_bind_gpasid()
283 return -ENOTSUPP; in intel_svm_bind_gpasid()
285 /* VT-d supports devices with full 20 bit PASIDs only */ in intel_svm_bind_gpasid()
287 return -EINVAL; in intel_svm_bind_gpasid()
293 if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) in intel_svm_bind_gpasid()
294 return -EINVAL; in intel_svm_bind_gpasid()
298 return -EINVAL; in intel_svm_bind_gpasid()
303 ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); in intel_svm_bind_gpasid()
309 * Do not allow multiple bindings of the same device-PASID since in intel_svm_bind_gpasid()
314 svm->pasid); in intel_svm_bind_gpasid()
315 ret = -EBUSY; in intel_svm_bind_gpasid()
323 ret = -ENOMEM; in intel_svm_bind_gpasid()
331 svm->mm = get_task_mm(current); in intel_svm_bind_gpasid()
332 svm->pasid = data->hpasid; in intel_svm_bind_gpasid()
333 if (data->flags & IOMMU_SVA_GPASID_VAL) { in intel_svm_bind_gpasid()
334 svm->gpasid = data->gpasid; in intel_svm_bind_gpasid()
335 svm->flags |= SVM_FLAG_GUEST_PASID; in intel_svm_bind_gpasid()
337 ioasid_set_data(data->hpasid, svm); in intel_svm_bind_gpasid()
338 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_gpasid()
339 mmput(svm->mm); in intel_svm_bind_gpasid()
343 ret = -ENOMEM; in intel_svm_bind_gpasid()
346 sdev->dev = dev; in intel_svm_bind_gpasid()
347 sdev->sid = PCI_DEVID(info->bus, info->devfn); in intel_svm_bind_gpasid()
348 sdev->iommu = iommu; in intel_svm_bind_gpasid()
352 sdev->users = 1; in intel_svm_bind_gpasid()
355 ret = intel_iommu_enable_pasid(iommu, sdev->dev); in intel_svm_bind_gpasid()
367 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_gpasid()
369 (pgd_t *)(uintptr_t)data->gpgd, in intel_svm_bind_gpasid()
370 data->hpasid, &data->vendor.vtd, dmar_domain, in intel_svm_bind_gpasid()
371 data->addr_width); in intel_svm_bind_gpasid()
372 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_gpasid()
375 data->hpasid, ret); in intel_svm_bind_gpasid()
385 svm->flags |= SVM_FLAG_GUEST_MODE; in intel_svm_bind_gpasid()
387 init_rcu_head(&sdev->rcu); in intel_svm_bind_gpasid()
388 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_gpasid()
390 if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { in intel_svm_bind_gpasid()
391 ioasid_set_data(data->hpasid, NULL); in intel_svm_bind_gpasid()
407 return -EINVAL; in intel_svm_unbind_gpasid()
416 sdev->users--; in intel_svm_unbind_gpasid()
417 if (!sdev->users) { in intel_svm_unbind_gpasid()
418 list_del_rcu(&sdev->list); in intel_svm_unbind_gpasid()
420 svm->pasid, false); in intel_svm_unbind_gpasid()
421 intel_svm_drain_prq(dev, svm->pasid); in intel_svm_unbind_gpasid()
424 if (list_empty(&svm->devs)) { in intel_svm_unbind_gpasid()
451 mutex_lock(&mm->context.lock); in load_pasid()
454 smp_store_release(&mm->pasid, pasid); in load_pasid()
459 mutex_unlock(&mm->context.lock); in load_pasid()
477 return -EINVAL; in intel_svm_bind_mm()
480 return -ENOTSUPP; in intel_svm_bind_mm()
485 return -EINVAL; in intel_svm_bind_mm()
491 if (!ecap_srs(iommu->ecap) || mm) { in intel_svm_bind_mm()
493 return -EINVAL; in intel_svm_bind_mm()
501 if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID)) in intel_svm_bind_mm()
505 if (svm->pasid >= pasid_max) { in intel_svm_bind_mm()
508 svm->pasid); in intel_svm_bind_mm()
509 ret = -ENOSPC; in intel_svm_bind_mm()
515 if (sdev->ops != ops) { in intel_svm_bind_mm()
516 ret = -EBUSY; in intel_svm_bind_mm()
519 sdev->users++; in intel_svm_bind_mm()
529 ret = -ENOMEM; in intel_svm_bind_mm()
532 sdev->dev = dev; in intel_svm_bind_mm()
533 sdev->iommu = iommu; in intel_svm_bind_mm()
542 sdev->did = FLPT_DEFAULT_DID; in intel_svm_bind_mm()
543 sdev->sid = PCI_DEVID(info->bus, info->devfn); in intel_svm_bind_mm()
544 if (info->ats_enabled) { in intel_svm_bind_mm()
545 sdev->dev_iotlb = 1; in intel_svm_bind_mm()
546 sdev->qdep = info->ats_qdep; in intel_svm_bind_mm()
547 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) in intel_svm_bind_mm()
548 sdev->qdep = 0; in intel_svm_bind_mm()
552 sdev->users = 1; in intel_svm_bind_mm()
553 sdev->ops = ops; in intel_svm_bind_mm()
554 init_rcu_head(&sdev->rcu); in intel_svm_bind_mm()
559 ret = -ENOMEM; in intel_svm_bind_mm()
568 svm->pasid = ioasid_alloc(NULL, PASID_MIN, in intel_svm_bind_mm()
569 pasid_max - 1, svm); in intel_svm_bind_mm()
570 if (svm->pasid == INVALID_IOASID) { in intel_svm_bind_mm()
573 ret = -ENOSPC; in intel_svm_bind_mm()
576 svm->notifier.ops = &intel_mmuops; in intel_svm_bind_mm()
577 svm->mm = mm; in intel_svm_bind_mm()
578 svm->flags = flags; in intel_svm_bind_mm()
579 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_mm()
580 INIT_LIST_HEAD(&svm->list); in intel_svm_bind_mm()
581 ret = -ENOMEM; in intel_svm_bind_mm()
583 ret = mmu_notifier_register(&svm->notifier, mm); in intel_svm_bind_mm()
585 ioasid_free(svm->pasid); in intel_svm_bind_mm()
592 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
594 mm ? mm->pgd : init_mm.pgd, in intel_svm_bind_mm()
595 svm->pasid, FLPT_DEFAULT_DID, in intel_svm_bind_mm()
599 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
602 mmu_notifier_unregister(&svm->notifier, mm); in intel_svm_bind_mm()
603 ioasid_free(svm->pasid); in intel_svm_bind_mm()
609 list_add_tail(&svm->list, &global_svm_list); in intel_svm_bind_mm()
612 load_pasid(mm, svm->pasid); in intel_svm_bind_mm()
619 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
621 mm ? mm->pgd : init_mm.pgd, in intel_svm_bind_mm()
622 svm->pasid, FLPT_DEFAULT_DID, in intel_svm_bind_mm()
626 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
632 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_mm()
634 sdev->pasid = svm->pasid; in intel_svm_bind_mm()
635 sdev->sva.dev = dev; in intel_svm_bind_mm()
649 int ret = -EINVAL; in intel_svm_unbind_mm()
660 sdev->users--; in intel_svm_unbind_mm()
661 if (!sdev->users) { in intel_svm_unbind_mm()
662 list_del_rcu(&sdev->list); in intel_svm_unbind_mm()
671 svm->pasid, false); in intel_svm_unbind_mm()
672 intel_svm_drain_prq(dev, svm->pasid); in intel_svm_unbind_mm()
675 if (list_empty(&svm->devs)) { in intel_svm_unbind_mm()
676 ioasid_free(svm->pasid); in intel_svm_unbind_mm()
677 if (svm->mm) { in intel_svm_unbind_mm()
678 mmu_notifier_unregister(&svm->notifier, svm->mm); in intel_svm_unbind_mm()
680 load_pasid(svm->mm, PASID_DISABLED); in intel_svm_unbind_mm()
682 list_del(&svm->list); in intel_svm_unbind_mm()
725 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
731 if (req->exe_req) in access_error()
734 if (req->rd_req) in access_error()
737 if (req->wr_req) in access_error()
740 return (requested & ~vma->vm_flags) != 0; in access_error()
745 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); in is_canonical_address()
752 * intel_svm_drain_prq - Drain page requests and responses for a pasid
763 * described in VT-d spec CH7.10 to drain all page requests and page
781 if (!info->pri_enabled) in intel_svm_drain_prq()
784 iommu = info->iommu; in intel_svm_drain_prq()
785 domain = info->domain; in intel_svm_drain_prq()
787 sid = PCI_DEVID(info->bus, info->devfn); in intel_svm_drain_prq()
788 did = domain->iommu_did[iommu->seq_id]; in intel_svm_drain_prq()
796 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
797 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
798 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
802 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
803 if (!req->pasid_present || req->pasid != pasid) { in intel_svm_drain_prq()
808 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
813 * Perform steps described in VT-d spec CH7.10 to drain page in intel_svm_drain_prq()
828 QI_DEV_IOTLB_PFSID(info->pfsid); in intel_svm_drain_prq()
830 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
832 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
833 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
842 if (req->rd_req) in prq_to_iommu_prot()
844 if (req->wr_req) in prq_to_iommu_prot()
846 if (req->exe_req) in prq_to_iommu_prot()
848 if (req->pm_req) in prq_to_iommu_prot()
860 return -ENODEV; in intel_svm_prq_report()
865 event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; in intel_svm_prq_report()
866 event.fault.prm.pasid = desc->pasid; in intel_svm_prq_report()
867 event.fault.prm.grpid = desc->prg_index; in intel_svm_prq_report()
870 if (desc->lpig) in intel_svm_prq_report()
872 if (desc->pasid_present) { in intel_svm_prq_report()
876 if (desc->priv_data_present) { in intel_svm_prq_report()
885 memcpy(event.fault.prm.private_data, desc->priv_data, in intel_svm_prq_report()
886 sizeof(desc->priv_data)); in intel_svm_prq_report()
901 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
903 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
904 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
915 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
918 address = (u64)req->addr << VTD_PAGE_SHIFT; in prq_event_thread()
919 if (!req->pasid_present) { in prq_event_thread()
921 iommu->name, ((unsigned long long *)req)[0], in prq_event_thread()
926 if (req->pm_req && (req->rd_req | req->wr_req)) { in prq_event_thread()
932 if (req->exe_req && req->rd_req) { in prq_event_thread()
936 if (!svm || svm->pasid != req->pasid) { in prq_event_thread()
938 svm = ioasid_find(NULL, req->pasid, NULL); in prq_event_thread()
945 iommu->name, req->pasid, ((unsigned long long *)req)[0], in prq_event_thread()
951 if (!sdev || sdev->sid != req->rid) { in prq_event_thread()
956 list_for_each_entry_rcu(t, &svm->devs, list) { in prq_event_thread()
957 if (t->sid == req->rid) { in prq_event_thread()
968 if (!svm->mm) in prq_event_thread()
979 if (svm->flags & SVM_FLAG_GUEST_MODE) { in prq_event_thread()
980 if (sdev && !intel_svm_prq_report(sdev->dev, req)) in prq_event_thread()
987 if (!mmget_not_zero(svm->mm)) in prq_event_thread()
990 mmap_read_lock(svm->mm); in prq_event_thread()
991 vma = find_extend_vma(svm->mm, address); in prq_event_thread()
992 if (!vma || address < vma->vm_start) in prq_event_thread()
999 req->wr_req ? FAULT_FLAG_WRITE : 0, in prq_event_thread()
1006 mmap_read_unlock(svm->mm); in prq_event_thread()
1007 mmput(svm->mm); in prq_event_thread()
1010 if (sdev && sdev->ops && sdev->ops->fault_cb) { in prq_event_thread()
1011 int rwxp = (req->rd_req << 3) | (req->wr_req << 2) | in prq_event_thread()
1012 (req->exe_req << 1) | (req->pm_req); in prq_event_thread()
1013 sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, in prq_event_thread()
1014 req->priv_data, rwxp, result); in prq_event_thread()
1021 if (req->lpig || req->priv_data_present) { in prq_event_thread()
1023 * Per VT-d spec. v3.0 ch7.7, system software must in prq_event_thread()
1026 * is set. This is an additional VT-d feature beyond in prq_event_thread()
1029 resp.qw0 = QI_PGRP_PASID(req->pasid) | in prq_event_thread()
1030 QI_PGRP_DID(req->rid) | in prq_event_thread()
1031 QI_PGRP_PASID_P(req->pasid_present) | in prq_event_thread()
1032 QI_PGRP_PDP(req->priv_data_present) | in prq_event_thread()
1035 resp.qw1 = QI_PGRP_IDX(req->prg_index) | in prq_event_thread()
1036 QI_PGRP_LPIG(req->lpig); in prq_event_thread()
1040 if (req->priv_data_present) in prq_event_thread()
1041 memcpy(&resp.qw2, req->priv_data, in prq_event_thread()
1042 sizeof(req->priv_data)); in prq_event_thread()
1049 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
1055 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
1057 iommu->name); in prq_event_thread()
1058 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
1059 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
1061 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
1063 iommu->name); in prq_event_thread()
1067 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
1068 complete(&iommu->prq_complete); in prq_event_thread()
1077 struct iommu_sva *sva = ERR_PTR(-EINVAL); in intel_svm_bind()
1083 * TODO: Consolidate with generic iommu-sva bind after it is merged. in intel_svm_bind()
1094 sva = &sdev->sva; in intel_svm_bind()
1109 intel_svm_unbind_mm(sdev->dev, sdev->pasid); in intel_svm_unbind()
1120 pasid = sdev->pasid; in intel_svm_get_pasid()
1142 return -ENODEV; in intel_svm_page_response()
1146 return -ENODEV; in intel_svm_page_response()
1149 return -EINVAL; in intel_svm_page_response()
1153 prm = &evt->fault.prm; in intel_svm_page_response()
1155 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; in intel_svm_page_response()
1156 private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; in intel_svm_page_response()
1157 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; in intel_svm_page_response()
1160 ret = -EINVAL; in intel_svm_page_response()
1164 if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { in intel_svm_page_response()
1165 ret = -EINVAL; in intel_svm_page_response()
1169 ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); in intel_svm_page_response()
1171 ret = -ENODEV; in intel_svm_page_response()
1179 if (svm->flags & SVM_FLAG_GUEST_MODE) { in intel_svm_page_response()
1184 ret = -EINVAL; in intel_svm_page_response()
1188 if (mm != svm->mm) { in intel_svm_page_response()
1189 ret = -ENODEV; in intel_svm_page_response()
1198 * Per VT-d spec. v3.0 ch7.7, system software must respond in intel_svm_page_response()
1201 * additional VT-d requirement beyond PCI ATS spec. in intel_svm_page_response()
1206 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | in intel_svm_page_response()
1209 QI_PGRP_RESP_CODE(msg->code) | in intel_svm_page_response()
1211 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); in intel_svm_page_response()
1215 memcpy(&desc.qw2, prm->private_data, in intel_svm_page_response()
1216 sizeof(prm->private_data)); in intel_svm_page_response()