• Home
  • Raw
  • Download

Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/dma-buf.h>
31 /* Pages are managed by the underlying dma-buf */ in prime_alloc_pages_locked()
37 /* Pages are managed by the underlying dma-buf */ in prime_free_pages_locked()
45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); in prime_map_pages_locked()
51 bo->sgt = sgt; in prime_map_pages_locked()
57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); in prime_unmap_pages_locked()
58 bo->sgt = NULL; in prime_unmap_pages_locked()
72 int npages = bo->base.size >> PAGE_SHIFT; in shmem_alloc_pages_locked()
75 pages = drm_gem_get_pages(&bo->base); in shmem_alloc_pages_locked()
79 if (bo->flags & DRM_IVPU_BO_WC) in shmem_alloc_pages_locked()
81 else if (bo->flags & DRM_IVPU_BO_UNCACHED) in shmem_alloc_pages_locked()
84 bo->pages = pages; in shmem_alloc_pages_locked()
91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); in shmem_free_pages_locked()
93 drm_gem_put_pages(&bo->base, bo->pages, true, false); in shmem_free_pages_locked()
94 bo->pages = NULL; in shmem_free_pages_locked()
99 int npages = bo->base.size >> PAGE_SHIFT; in ivpu_bo_map_pages_locked()
104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); in ivpu_bo_map_pages_locked()
110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); in ivpu_bo_map_pages_locked()
112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); in ivpu_bo_map_pages_locked()
116 bo->sgt = sgt; in ivpu_bo_map_pages_locked()
128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); in ivpu_bo_unmap_pages_locked()
129 sg_free_table(bo->sgt); in ivpu_bo_unmap_pages_locked()
130 kfree(bo->sgt); in ivpu_bo_unmap_pages_locked()
131 bo->sgt = NULL; in ivpu_bo_unmap_pages_locked()
145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; in internal_alloc_pages_locked()
149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); in internal_alloc_pages_locked()
151 return -ENOMEM; in internal_alloc_pages_locked()
156 ret = -ENOMEM; in internal_alloc_pages_locked()
162 bo->pages = pages; in internal_alloc_pages_locked()
166 while (i--) in internal_alloc_pages_locked()
174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; in internal_free_pages_locked()
177 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); in internal_free_pages_locked()
180 put_page(bo->pages[i]); in internal_free_pages_locked()
182 kvfree(bo->pages); in internal_free_pages_locked()
183 bo->pages = NULL; in internal_free_pages_locked()
200 lockdep_assert_held(&bo->lock); in ivpu_bo_alloc_and_map_pages_locked()
201 drm_WARN_ON(&vdev->drm, bo->sgt); in ivpu_bo_alloc_and_map_pages_locked()
203 ret = bo->ops->alloc_pages(bo); in ivpu_bo_alloc_and_map_pages_locked()
209 ret = bo->ops->map_pages(bo); in ivpu_bo_alloc_and_map_pages_locked()
217 bo->ops->free_pages(bo); in ivpu_bo_alloc_and_map_pages_locked()
223 mutex_lock(&bo->lock); in ivpu_bo_unmap_and_free_pages()
225 WARN_ON(!bo->sgt); in ivpu_bo_unmap_and_free_pages()
226 bo->ops->unmap_pages(bo); in ivpu_bo_unmap_and_free_pages()
227 WARN_ON(bo->sgt); in ivpu_bo_unmap_and_free_pages()
228 bo->ops->free_pages(bo); in ivpu_bo_unmap_and_free_pages()
229 WARN_ON(bo->pages); in ivpu_bo_unmap_and_free_pages()
231 mutex_unlock(&bo->lock); in ivpu_bo_unmap_and_free_pages()
235 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
238 * to IOMMU address space and finally updates the VPU MMU page tables
239 * to allow the VPU to translate VPU address to IOMMU address.
246 mutex_lock(&bo->lock); in ivpu_bo_pin()
248 if (!bo->vpu_addr) { in ivpu_bo_pin()
250 bo->ctx->id, bo->handle); in ivpu_bo_pin()
251 ret = -EINVAL; in ivpu_bo_pin()
255 if (!bo->sgt) { in ivpu_bo_pin()
261 if (!bo->mmu_mapped) { in ivpu_bo_pin()
262 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, in ivpu_bo_pin()
268 bo->mmu_mapped = true; in ivpu_bo_pin()
272 mutex_unlock(&bo->lock); in ivpu_bo_pin()
278 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, in ivpu_bo_alloc_vpu_addr() argument
285 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) in ivpu_bo_alloc_vpu_addr()
286 range = &vdev->hw->ranges.shave; in ivpu_bo_alloc_vpu_addr()
287 else if (bo->flags & DRM_IVPU_BO_DMA_MEM) in ivpu_bo_alloc_vpu_addr()
288 range = &vdev->hw->ranges.dma; in ivpu_bo_alloc_vpu_addr()
290 range = &vdev->hw->ranges.user; in ivpu_bo_alloc_vpu_addr()
293 mutex_lock(&ctx->lock); in ivpu_bo_alloc_vpu_addr()
294 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); in ivpu_bo_alloc_vpu_addr()
296 bo->ctx = ctx; in ivpu_bo_alloc_vpu_addr()
297 bo->vpu_addr = bo->mm_node.start; in ivpu_bo_alloc_vpu_addr()
298 list_add_tail(&bo->ctx_node, &ctx->bo_list); in ivpu_bo_alloc_vpu_addr()
300 mutex_unlock(&ctx->lock); in ivpu_bo_alloc_vpu_addr()
308 struct ivpu_mmu_context *ctx = bo->ctx; in ivpu_bo_free_vpu_addr() local
310 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", in ivpu_bo_free_vpu_addr()
311 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); in ivpu_bo_free_vpu_addr()
313 mutex_lock(&bo->lock); in ivpu_bo_free_vpu_addr()
315 if (bo->mmu_mapped) { in ivpu_bo_free_vpu_addr()
316 drm_WARN_ON(&vdev->drm, !bo->sgt); in ivpu_bo_free_vpu_addr()
317 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); in ivpu_bo_free_vpu_addr()
318 bo->mmu_mapped = false; in ivpu_bo_free_vpu_addr()
321 mutex_lock(&ctx->lock); in ivpu_bo_free_vpu_addr()
322 list_del(&bo->ctx_node); in ivpu_bo_free_vpu_addr()
323 bo->vpu_addr = 0; in ivpu_bo_free_vpu_addr()
324 bo->ctx = NULL; in ivpu_bo_free_vpu_addr()
325 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); in ivpu_bo_free_vpu_addr()
326 mutex_unlock(&ctx->lock); in ivpu_bo_free_vpu_addr()
328 mutex_unlock(&bo->lock); in ivpu_bo_free_vpu_addr()
331 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) in ivpu_bo_remove_all_bos_from_context() argument
335 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) in ivpu_bo_remove_all_bos_from_context()
347 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) in ivpu_bo_alloc()
348 return ERR_PTR(-EINVAL); in ivpu_bo_alloc()
356 return ERR_PTR(-EINVAL); in ivpu_bo_alloc()
361 return ERR_PTR(-ENOMEM); in ivpu_bo_alloc()
363 mutex_init(&bo->lock); in ivpu_bo_alloc()
364 bo->base.funcs = &ivpu_gem_funcs; in ivpu_bo_alloc()
365 bo->flags = flags; in ivpu_bo_alloc()
366 bo->ops = ops; in ivpu_bo_alloc()
367 bo->user_ptr = user_ptr; in ivpu_bo_alloc()
369 if (ops->type == IVPU_BO_TYPE_SHMEM) in ivpu_bo_alloc()
370 ret = drm_gem_object_init(&vdev->drm, &bo->base, size); in ivpu_bo_alloc()
372 drm_gem_private_object_init(&vdev->drm, &bo->base, size); in ivpu_bo_alloc()
380 ret = drm_gem_create_mmap_offset(&bo->base); in ivpu_bo_alloc()
398 drm_gem_object_release(&bo->base); in ivpu_bo_alloc()
409 if (bo->ctx) in ivpu_bo_free()
410 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", in ivpu_bo_free()
411 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); in ivpu_bo_free()
413 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", in ivpu_bo_free()
414 (bool)bo->sgt, bo->mmu_mapped); in ivpu_bo_free()
416 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); in ivpu_bo_free()
418 vunmap(bo->kvaddr); in ivpu_bo_free()
420 if (bo->ctx) in ivpu_bo_free()
423 if (bo->sgt) in ivpu_bo_free()
426 if (bo->base.import_attach) in ivpu_bo_free()
427 drm_prime_gem_destroy(&bo->base, bo->sgt); in ivpu_bo_free()
429 drm_gem_object_release(&bo->base); in ivpu_bo_free()
431 mutex_destroy(&bo->lock); in ivpu_bo_free()
440 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", in ivpu_bo_mmap()
441 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); in ivpu_bo_mmap()
443 if (obj->import_attach) { in ivpu_bo_mmap()
446 vma->vm_private_data = NULL; in ivpu_bo_mmap()
447 return dma_buf_mmap(obj->dma_buf, vma, 0); in ivpu_bo_mmap()
451 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); in ivpu_bo_mmap()
459 loff_t npages = obj->size >> PAGE_SHIFT; in ivpu_bo_get_sg_table()
462 mutex_lock(&bo->lock); in ivpu_bo_get_sg_table()
464 if (!bo->sgt) in ivpu_bo_get_sg_table()
467 mutex_unlock(&bo->lock); in ivpu_bo_get_sg_table()
472 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); in ivpu_bo_get_sg_table()
477 struct vm_area_struct *vma = vmf->vma; in ivpu_vm_fault()
478 struct drm_gem_object *obj = vma->vm_private_data; in ivpu_vm_fault()
480 loff_t npages = obj->size >> PAGE_SHIFT; in ivpu_vm_fault()
486 mutex_lock(&bo->lock); in ivpu_vm_fault()
488 if (!bo->sgt) { in ivpu_vm_fault()
496 /* We don't use vmf->pgoff since that has the fake offset */ in ivpu_vm_fault()
497 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in ivpu_vm_fault()
501 page = bo->pages[page_offset]; in ivpu_vm_fault()
502 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); in ivpu_vm_fault()
506 mutex_unlock(&bo->lock); in ivpu_vm_fault()
527 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_bo_create_ioctl()
528 struct ivpu_device *vdev = file_priv->vdev; in ivpu_bo_create_ioctl()
530 u64 size = PAGE_ALIGN(args->size); in ivpu_bo_create_ioctl()
534 if (args->flags & ~DRM_IVPU_BO_FLAGS) in ivpu_bo_create_ioctl()
535 return -EINVAL; in ivpu_bo_create_ioctl()
538 return -EINVAL; in ivpu_bo_create_ioctl()
540 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); in ivpu_bo_create_ioctl()
542 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", in ivpu_bo_create_ioctl()
543 bo, file_priv->ctx.id, args->size, args->flags); in ivpu_bo_create_ioctl()
547 ret = drm_gem_handle_create(file, &bo->base, &bo->handle); in ivpu_bo_create_ioctl()
549 args->vpu_addr = bo->vpu_addr; in ivpu_bo_create_ioctl()
550 args->handle = bo->handle; in ivpu_bo_create_ioctl()
553 drm_gem_object_put(&bo->base); in ivpu_bo_create_ioctl()
555 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", in ivpu_bo_create_ioctl()
556 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); in ivpu_bo_create_ioctl()
570 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); in ivpu_bo_alloc_internal()
571 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); in ivpu_bo_alloc_internal()
578 range = &vdev->hw->ranges.global; in ivpu_bo_alloc_internal()
581 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); in ivpu_bo_alloc_internal()
593 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); in ivpu_bo_alloc_internal()
595 if (bo->flags & DRM_IVPU_BO_WC) in ivpu_bo_alloc_internal()
596 set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT); in ivpu_bo_alloc_internal()
597 else if (bo->flags & DRM_IVPU_BO_UNCACHED) in ivpu_bo_alloc_internal()
598 set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT); in ivpu_bo_alloc_internal()
601 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); in ivpu_bo_alloc_internal()
602 if (!bo->kvaddr) { in ivpu_bo_alloc_internal()
607 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", in ivpu_bo_alloc_internal()
608 bo->vpu_addr, bo->base.size, flags); in ivpu_bo_alloc_internal()
613 drm_gem_object_put(&bo->base); in ivpu_bo_alloc_internal()
619 drm_gem_object_put(&bo->base); in ivpu_bo_free_internal()
628 attach = dma_buf_attach(buf, dev->dev); in ivpu_gem_prime_import()
634 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); in ivpu_gem_prime_import()
636 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); in ivpu_gem_prime_import()
640 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); in ivpu_gem_prime_import()
642 bo->base.import_attach = attach; in ivpu_gem_prime_import()
644 return &bo->base; in ivpu_gem_prime_import()
654 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_bo_info_ioctl()
661 obj = drm_gem_object_lookup(file, args->handle); in ivpu_bo_info_ioctl()
663 return -ENOENT; in ivpu_bo_info_ioctl()
667 mutex_lock(&bo->lock); in ivpu_bo_info_ioctl()
669 if (!bo->ctx) { in ivpu_bo_info_ioctl()
670 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); in ivpu_bo_info_ioctl()
677 args->flags = bo->flags; in ivpu_bo_info_ioctl()
678 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); in ivpu_bo_info_ioctl()
679 args->vpu_addr = bo->vpu_addr; in ivpu_bo_info_ioctl()
680 args->size = obj->size; in ivpu_bo_info_ioctl()
682 mutex_unlock(&bo->lock); in ivpu_bo_info_ioctl()
694 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); in ivpu_bo_wait_ioctl()
696 obj = drm_gem_object_lookup(file, args->handle); in ivpu_bo_wait_ioctl()
698 return -EINVAL; in ivpu_bo_wait_ioctl()
700 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); in ivpu_bo_wait_ioctl()
702 ret = -ETIMEDOUT; in ivpu_bo_wait_ioctl()
705 args->job_status = to_ivpu_bo(obj)->job_status; in ivpu_bo_wait_ioctl()
717 if (bo->base.dma_buf && bo->base.dma_buf->file) in ivpu_bo_print_info()
718 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); in ivpu_bo_print_info()
721 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, in ivpu_bo_print_info()
722 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); in ivpu_bo_print_info()
733 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); in ivpu_bo_list()
735 mutex_lock(&vdev->gctx.lock); in ivpu_bo_list()
736 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) in ivpu_bo_list()
738 mutex_unlock(&vdev->gctx.lock); in ivpu_bo_list()
740 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { in ivpu_bo_list()
745 mutex_lock(&file_priv->ctx.lock); in ivpu_bo_list()
746 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) in ivpu_bo_list()
748 mutex_unlock(&file_priv->ctx.lock); in ivpu_bo_list()
756 struct drm_printer p = drm_info_printer(dev->dev); in ivpu_bo_list_print()