Lines Matching +full:iommu +full:- +full:ctx
1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/io-64-nonatomic-lo-hi.h>
13 #include <linux/iommu.h>
56 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx);
67 struct idxd_user_context *ctx = dev_to_uctx(dev); in cr_faults_show() local
69 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]); in cr_faults_show()
76 struct idxd_user_context *ctx = dev_to_uctx(dev); in cr_fault_failures_show() local
78 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]); in cr_fault_failures_show()
84 struct idxd_user_context *ctx = dev_to_uctx(dev); in pid_show() local
86 return sysfs_emit(buf, "%u\n", ctx->pid); in pid_show()
100 struct idxd_user_context *ctx = dev_to_uctx(dev); in cdev_file_attr_visible() local
101 struct idxd_wq *wq = ctx->wq; in cdev_file_attr_visible()
106 return a->mode; in cdev_file_attr_visible()
121 struct idxd_user_context *ctx = dev_to_uctx(dev); in idxd_file_dev_release() local
122 struct idxd_wq *wq = ctx->wq; in idxd_file_dev_release()
123 struct idxd_device *idxd = wq->idxd; in idxd_file_dev_release()
127 ida_free(&file_ida, ctx->id); in idxd_file_dev_release()
130 /* Wait for in-flight operations to complete. */ in idxd_file_dev_release()
132 idxd_device_drain_pasid(idxd, ctx->pasid); in idxd_file_dev_release()
144 if (ctx->sva) { in idxd_file_dev_release()
145 idxd_cdev_evl_drain_pasid(wq, ctx->pasid); in idxd_file_dev_release()
146 iommu_sva_unbind_device(ctx->sva); in idxd_file_dev_release()
147 idxd_xa_pasid_remove(ctx); in idxd_file_dev_release()
149 kfree(ctx); in idxd_file_dev_release()
150 mutex_lock(&wq->wq_lock); in idxd_file_dev_release()
152 mutex_unlock(&wq->wq_lock); in idxd_file_dev_release()
165 struct idxd_wq *wq = idxd_cdev->wq; in idxd_cdev_dev_release()
167 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_cdev_dev_release()
168 ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); in idxd_cdev_dev_release()
179 struct cdev *cdev = inode->i_cdev; in inode_idxd_cdev()
188 return idxd_cdev->wq; in inode_wq()
191 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx) in idxd_xa_pasid_remove() argument
193 struct idxd_wq *wq = ctx->wq; in idxd_xa_pasid_remove()
196 mutex_lock(&wq->uc_lock); in idxd_xa_pasid_remove()
197 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); in idxd_xa_pasid_remove()
198 if (ptr != (void *)ctx) in idxd_xa_pasid_remove()
199 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", in idxd_xa_pasid_remove()
200 ctx->pasid); in idxd_xa_pasid_remove()
201 mutex_unlock(&wq->uc_lock); in idxd_xa_pasid_remove()
206 struct idxd_user_context *ctx; in idxd_user_counter_increment() local
211 mutex_lock(&wq->uc_lock); in idxd_user_counter_increment()
212 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_user_counter_increment()
213 if (!ctx) { in idxd_user_counter_increment()
214 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
217 ctx->counters[index]++; in idxd_user_counter_increment()
218 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
223 struct idxd_user_context *ctx; in idxd_cdev_open() local
233 idxd = wq->idxd; in idxd_cdev_open()
234 dev = &idxd->pdev->dev; in idxd_cdev_open()
238 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in idxd_cdev_open()
239 if (!ctx) in idxd_cdev_open()
240 return -ENOMEM; in idxd_cdev_open()
242 mutex_lock(&wq->wq_lock); in idxd_cdev_open()
245 rc = -EBUSY; in idxd_cdev_open()
249 ctx->wq = wq; in idxd_cdev_open()
250 filp->private_data = ctx; in idxd_cdev_open()
251 ctx->pid = current->pid; in idxd_cdev_open()
254 sva = iommu_sva_bind_device(dev, current->mm); in idxd_cdev_open()
263 rc = -EINVAL; in idxd_cdev_open()
267 ctx->sva = sva; in idxd_cdev_open()
268 ctx->pasid = pasid; in idxd_cdev_open()
269 ctx->mm = current->mm; in idxd_cdev_open()
271 mutex_lock(&wq->uc_lock); in idxd_cdev_open()
272 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); in idxd_cdev_open()
273 mutex_unlock(&wq->uc_lock); in idxd_cdev_open()
286 idxd_cdev = wq->idxd_cdev; in idxd_cdev_open()
288 ctx->id = ida_alloc(&file_ida, GFP_KERNEL); in idxd_cdev_open()
290 if (ctx->id < 0) { in idxd_cdev_open()
294 ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE; in idxd_cdev_open()
295 fdev = user_ctx_dev(ctx); in idxd_cdev_open()
297 fdev->parent = cdev_dev(idxd_cdev); in idxd_cdev_open()
298 fdev->bus = &dsa_bus_type; in idxd_cdev_open()
299 fdev->type = &idxd_cdev_file_type; in idxd_cdev_open()
301 rc = dev_set_name(fdev, "file%d", ctx->id); in idxd_cdev_open()
314 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
323 idxd_xa_pasid_remove(ctx); in idxd_cdev_open()
328 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
329 kfree(ctx); in idxd_cdev_open()
335 struct idxd_device *idxd = wq->idxd; in idxd_cdev_evl_drain_pasid()
336 struct idxd_evl *evl = idxd->evl; in idxd_cdev_evl_drain_pasid()
345 mutex_lock(&evl->lock); in idxd_cdev_evl_drain_pasid()
346 status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); in idxd_cdev_evl_drain_pasid()
349 size = evl->size; in idxd_cdev_evl_drain_pasid()
352 entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); in idxd_cdev_evl_drain_pasid()
353 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id) in idxd_cdev_evl_drain_pasid()
354 set_bit(h, evl->bmap); in idxd_cdev_evl_drain_pasid()
357 if (wq->wq) in idxd_cdev_evl_drain_pasid()
358 drain_workqueue(wq->wq); in idxd_cdev_evl_drain_pasid()
360 mutex_unlock(&evl->lock); in idxd_cdev_evl_drain_pasid()
365 struct idxd_user_context *ctx = filep->private_data; in idxd_cdev_release() local
366 struct idxd_wq *wq = ctx->wq; in idxd_cdev_release()
367 struct idxd_device *idxd = wq->idxd; in idxd_cdev_release()
368 struct device *dev = &idxd->pdev->dev; in idxd_cdev_release()
371 filep->private_data = NULL; in idxd_cdev_release()
373 device_unregister(user_ctx_dev(ctx)); in idxd_cdev_release()
381 struct device *dev = &wq->idxd->pdev->dev; in check_vma()
383 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { in check_vma()
386 current->comm, func, in check_vma()
387 vma->vm_end - vma->vm_start); in check_vma()
388 return -EINVAL; in check_vma()
396 struct idxd_user_context *ctx = filp->private_data; in idxd_cdev_mmap() local
397 struct idxd_wq *wq = ctx->wq; in idxd_cdev_mmap()
398 struct idxd_device *idxd = wq->idxd; in idxd_cdev_mmap()
399 struct pci_dev *pdev = idxd->pdev; in idxd_cdev_mmap()
404 dev_dbg(&pdev->dev, "%s called\n", __func__); in idxd_cdev_mmap()
409 * (See the INTEL-SA-01084 security advisory) in idxd_cdev_mmap()
414 if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) in idxd_cdev_mmap()
415 return -EPERM; in idxd_cdev_mmap()
417 if (current->mm != ctx->mm) in idxd_cdev_mmap()
418 return -EPERM; in idxd_cdev_mmap()
425 pfn = (base + idxd_get_wq_portal_full_offset(wq->id, in idxd_cdev_mmap()
427 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in idxd_cdev_mmap()
428 vma->vm_private_data = ctx; in idxd_cdev_mmap()
430 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, in idxd_cdev_mmap()
431 vma->vm_page_prot); in idxd_cdev_mmap()
434 static int idxd_submit_user_descriptor(struct idxd_user_context *ctx, in idxd_submit_user_descriptor() argument
437 struct idxd_wq *wq = ctx->wq; in idxd_submit_user_descriptor()
438 struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; in idxd_submit_user_descriptor()
446 return -EFAULT; in idxd_submit_user_descriptor()
455 !wq->idxd->user_submission_safe) in idxd_submit_user_descriptor()
456 return -EINVAL; in idxd_submit_user_descriptor()
463 return -EINVAL; in idxd_submit_user_descriptor()
469 descriptor.pasid = ctx->pasid; in idxd_submit_user_descriptor()
482 struct idxd_user_context *ctx = filp->private_data; in idxd_cdev_write() local
486 if (current->mm != ctx->mm) in idxd_cdev_write()
487 return -EPERM; in idxd_cdev_write()
490 int rc = idxd_submit_user_descriptor(ctx, udesc + i); in idxd_cdev_write()
504 struct idxd_user_context *ctx = filp->private_data; in idxd_cdev_poll() local
505 struct idxd_wq *wq = ctx->wq; in idxd_cdev_poll()
506 struct idxd_device *idxd = wq->idxd; in idxd_cdev_poll()
509 if (current->mm != ctx->mm) in idxd_cdev_poll()
512 poll_wait(filp, &wq->err_queue, wait); in idxd_cdev_poll()
513 spin_lock(&idxd->dev_lock); in idxd_cdev_poll()
514 if (idxd->sw_err.valid) in idxd_cdev_poll()
516 spin_unlock(&idxd->dev_lock); in idxd_cdev_poll()
532 return MAJOR(ictx[idxd->data->type].devt); in idxd_cdev_get_major()
537 struct idxd_device *idxd = wq->idxd; in idxd_wq_add_cdev()
546 return -ENOMEM; in idxd_wq_add_cdev()
548 idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV; in idxd_wq_add_cdev()
549 idxd_cdev->wq = wq; in idxd_wq_add_cdev()
550 cdev = &idxd_cdev->cdev; in idxd_wq_add_cdev()
552 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_wq_add_cdev()
553 minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); in idxd_wq_add_cdev()
558 idxd_cdev->minor = minor; in idxd_wq_add_cdev()
561 dev->parent = wq_confdev(wq); in idxd_wq_add_cdev()
562 dev->bus = &dsa_bus_type; in idxd_wq_add_cdev()
563 dev->type = &idxd_cdev_device_type; in idxd_wq_add_cdev()
564 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); in idxd_wq_add_cdev()
566 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); in idxd_wq_add_cdev()
570 wq->idxd_cdev = idxd_cdev; in idxd_wq_add_cdev()
574 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); in idxd_wq_add_cdev()
582 wq->idxd_cdev = NULL; in idxd_wq_add_cdev()
590 idxd_cdev = wq->idxd_cdev; in idxd_wq_del_cdev()
591 wq->idxd_cdev = NULL; in idxd_wq_del_cdev()
592 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); in idxd_wq_del_cdev()
598 struct device *dev = &idxd_dev->conf_dev; in idxd_user_drv_probe()
600 struct idxd_device *idxd = wq->idxd; in idxd_user_drv_probe()
603 if (idxd->state != IDXD_DEV_ENABLED) in idxd_user_drv_probe()
604 return -ENXIO; in idxd_user_drv_probe()
608 * - If no IOMMU or IOMMU Passthrough without SVA, userspace in idxd_user_drv_probe()
610 * - The IDXD cdev driver does not provide any ways to pin in idxd_user_drv_probe()
612 * PA without IOMMU SVA. Therefore the application has no way in idxd_user_drv_probe()
617 idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; in idxd_user_drv_probe()
618 dev_dbg(&idxd->pdev->dev, in idxd_user_drv_probe()
621 return -EOPNOTSUPP; in idxd_user_drv_probe()
624 mutex_lock(&wq->wq_lock); in idxd_user_drv_probe()
627 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; in idxd_user_drv_probe()
628 rc = -ENODEV; in idxd_user_drv_probe()
632 wq->wq = create_workqueue(dev_name(wq_confdev(wq))); in idxd_user_drv_probe()
633 if (!wq->wq) { in idxd_user_drv_probe()
634 rc = -ENOMEM; in idxd_user_drv_probe()
638 wq->type = IDXD_WQT_USER; in idxd_user_drv_probe()
645 idxd->cmd_status = IDXD_SCMD_CDEV_ERR; in idxd_user_drv_probe()
649 idxd->cmd_status = 0; in idxd_user_drv_probe()
650 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
656 destroy_workqueue(wq->wq); in idxd_user_drv_probe()
657 wq->type = IDXD_WQT_NONE; in idxd_user_drv_probe()
659 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
667 mutex_lock(&wq->wq_lock); in idxd_user_drv_remove()
670 wq->type = IDXD_WQT_NONE; in idxd_user_drv_remove()
671 destroy_workqueue(wq->wq); in idxd_user_drv_remove()
672 wq->wq = NULL; in idxd_user_drv_remove()
673 mutex_unlock(&wq->wq_lock); in idxd_user_drv_remove()
704 for (i--; i >= 0; i--) in idxd_cdev_register()
721 * idxd_copy_cr - copy completion record to user address space found by wq and
736 struct device *dev = &wq->idxd->pdev->dev; in idxd_copy_cr()
738 struct idxd_user_context *ctx; in idxd_copy_cr() local
741 mutex_lock(&wq->uc_lock); in idxd_copy_cr()
743 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_copy_cr()
744 if (!ctx) { in idxd_copy_cr()
749 mm = ctx->mm; in idxd_copy_cr()
757 len - status_size); in idxd_copy_cr()
761 * when a non-zero status is polled. in idxd_copy_cr()
770 * record information once polling for a non-zero status. in idxd_copy_cr()
782 mutex_unlock(&wq->uc_lock); in idxd_copy_cr()
784 return len - left; in idxd_copy_cr()