• Home
  • Raw
  • Download

Lines Matching +full:shared +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
18 if (shm->pages) { in release_registered_pages()
19 if (shm->flags & TEE_SHM_USER_MAPPED) { in release_registered_pages()
20 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
24 for (n = 0; n < shm->num_pages; n++) in release_registered_pages()
25 put_page(shm->pages[n]); in release_registered_pages()
28 kfree(shm->pages); in release_registered_pages()
34 if (shm->flags & TEE_SHM_POOL) { in tee_shm_release()
37 if (shm->flags & TEE_SHM_DMA_BUF) in tee_shm_release()
38 poolm = teedev->pool->dma_buf_mgr; in tee_shm_release()
40 poolm = teedev->pool->private_mgr; in tee_shm_release()
42 poolm->ops->free(poolm, shm); in tee_shm_release()
43 } else if (shm->flags & TEE_SHM_REGISTER) { in tee_shm_release()
44 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); in tee_shm_release()
47 dev_err(teedev->dev.parent, in tee_shm_release()
53 teedev_ctx_put(shm->ctx); in tee_shm_release()
62 struct tee_device *teedev = ctx->teedev; in tee_shm_alloc()
69 dev_err(teedev->dev.parent, in tee_shm_alloc()
71 return ERR_PTR(-EINVAL); in tee_shm_alloc()
75 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); in tee_shm_alloc()
76 return ERR_PTR(-EINVAL); in tee_shm_alloc()
80 return ERR_PTR(-EINVAL); in tee_shm_alloc()
82 if (!teedev->pool) { in tee_shm_alloc()
84 ret = ERR_PTR(-EINVAL); in tee_shm_alloc()
90 ret = ERR_PTR(-ENOMEM); in tee_shm_alloc()
94 refcount_set(&shm->refcount, 1); in tee_shm_alloc()
95 shm->flags = flags | TEE_SHM_POOL; in tee_shm_alloc()
96 shm->ctx = ctx; in tee_shm_alloc()
98 poolm = teedev->pool->dma_buf_mgr; in tee_shm_alloc()
100 poolm = teedev->pool->private_mgr; in tee_shm_alloc()
102 rc = poolm->ops->alloc(poolm, shm, size); in tee_shm_alloc()
109 mutex_lock(&teedev->mutex); in tee_shm_alloc()
110 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); in tee_shm_alloc()
111 mutex_unlock(&teedev->mutex); in tee_shm_alloc()
112 if (shm->id < 0) { in tee_shm_alloc()
113 ret = ERR_PTR(shm->id); in tee_shm_alloc()
122 poolm->ops->free(poolm, shm); in tee_shm_alloc()
132 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
133 * @ctx: Context that allocates the shared memory
134 * @size: Requested size of shared memory
136 * The returned memory registered in secure world and is suitable to be
137 * passed as a memory buffer in parameter argument to
138 * tee_client_invoke_func(). The memory allocated is later freed with a
152 struct tee_device *teedev = ctx->teedev; in tee_shm_register()
162 return ERR_PTR(-ENOTSUPP); in tee_shm_register()
165 return ERR_PTR(-EINVAL); in tee_shm_register()
167 if (!teedev->desc->ops->shm_register || in tee_shm_register()
168 !teedev->desc->ops->shm_unregister) { in tee_shm_register()
170 return ERR_PTR(-ENOTSUPP); in tee_shm_register()
177 ret = ERR_PTR(-ENOMEM); in tee_shm_register()
181 refcount_set(&shm->refcount, 1); in tee_shm_register()
182 shm->flags = flags | TEE_SHM_REGISTER; in tee_shm_register()
183 shm->ctx = ctx; in tee_shm_register()
184 shm->id = -1; in tee_shm_register()
187 shm->offset = addr - start; in tee_shm_register()
188 shm->size = length; in tee_shm_register()
189 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; in tee_shm_register()
190 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in tee_shm_register()
191 if (!shm->pages) { in tee_shm_register()
192 ret = ERR_PTR(-ENOMEM); in tee_shm_register()
198 shm->pages); in tee_shm_register()
205 ret = ERR_PTR(-ENOMEM); in tee_shm_register()
214 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); in tee_shm_register()
218 shm->num_pages = rc; in tee_shm_register()
221 rc = -ENOMEM; in tee_shm_register()
226 mutex_lock(&teedev->mutex); in tee_shm_register()
227 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); in tee_shm_register()
228 mutex_unlock(&teedev->mutex); in tee_shm_register()
230 if (shm->id < 0) { in tee_shm_register()
231 ret = ERR_PTR(shm->id); in tee_shm_register()
235 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, in tee_shm_register()
236 shm->num_pages, start); in tee_shm_register()
245 if (shm->id >= 0) { in tee_shm_register()
246 mutex_lock(&teedev->mutex); in tee_shm_register()
247 idr_remove(&teedev->idr, shm->id); in tee_shm_register()
248 mutex_unlock(&teedev->mutex); in tee_shm_register()
261 tee_shm_put(filp->private_data); in tee_shm_fop_release()
267 struct tee_shm *shm = filp->private_data; in tee_shm_fop_mmap()
268 size_t size = vma->vm_end - vma->vm_start; in tee_shm_fop_mmap()
270 /* Refuse sharing shared memory provided by application */ in tee_shm_fop_mmap()
271 if (shm->flags & TEE_SHM_USER_MAPPED) in tee_shm_fop_mmap()
272 return -EINVAL; in tee_shm_fop_mmap()
275 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) in tee_shm_fop_mmap()
276 return -EINVAL; in tee_shm_fop_mmap()
278 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, in tee_shm_fop_mmap()
279 size, vma->vm_page_prot); in tee_shm_fop_mmap()
289 * tee_shm_get_fd() - Increase reference count and return file descriptor
290 * @shm: Shared memory handle
291 * @returns user space file descriptor to shared memory
297 if (!(shm->flags & TEE_SHM_DMA_BUF)) in tee_shm_get_fd()
298 return -EINVAL; in tee_shm_get_fd()
301 refcount_inc(&shm->refcount); in tee_shm_get_fd()
309 * tee_shm_free() - Free shared memory
310 * @shm: Handle to shared memory to free
319 * tee_shm_va2pa() - Get physical address of a virtual address
320 * @shm: Shared memory handle
327 if (!(shm->flags & TEE_SHM_MAPPED)) in tee_shm_va2pa()
328 return -EINVAL; in tee_shm_va2pa()
330 if ((char *)va < (char *)shm->kaddr) in tee_shm_va2pa()
331 return -EINVAL; in tee_shm_va2pa()
332 if ((char *)va >= ((char *)shm->kaddr + shm->size)) in tee_shm_va2pa()
333 return -EINVAL; in tee_shm_va2pa()
336 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); in tee_shm_va2pa()
341 * tee_shm_pa2va() - Get virtual address of a physical address
342 * @shm: Shared memory handle
349 if (!(shm->flags & TEE_SHM_MAPPED)) in tee_shm_pa2va()
350 return -EINVAL; in tee_shm_pa2va()
352 if (pa < shm->paddr) in tee_shm_pa2va()
353 return -EINVAL; in tee_shm_pa2va()
354 if (pa >= (shm->paddr + shm->size)) in tee_shm_pa2va()
355 return -EINVAL; in tee_shm_pa2va()
358 void *v = tee_shm_get_va(shm, pa - shm->paddr); in tee_shm_pa2va()
369 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
370 * @shm: Shared memory handle
371 * @offs: Offset from start of this shared memory
372 * @returns virtual address of the shared memory + offs if offs is within
373 * the bounds of this shared memory, else an ERR_PTR
377 if (!(shm->flags & TEE_SHM_MAPPED)) in tee_shm_get_va()
378 return ERR_PTR(-EINVAL); in tee_shm_get_va()
379 if (offs >= shm->size) in tee_shm_get_va()
380 return ERR_PTR(-EINVAL); in tee_shm_get_va()
381 return (char *)shm->kaddr + offs; in tee_shm_get_va()
386 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
387 * @shm: Shared memory handle
388 * @offs: Offset from start of this shared memory
390 * @returns 0 if offs is within the bounds of this shared memory, else an
395 if (offs >= shm->size) in tee_shm_get_pa()
396 return -EINVAL; in tee_shm_get_pa()
398 *pa = shm->paddr + offs; in tee_shm_get_pa()
404 * tee_shm_get_from_id() - Find shared memory object and increase reference
406 * @ctx: Context owning the shared memory
407 * @id: Id of shared memory object
416 return ERR_PTR(-EINVAL); in tee_shm_get_from_id()
418 teedev = ctx->teedev; in tee_shm_get_from_id()
419 mutex_lock(&teedev->mutex); in tee_shm_get_from_id()
420 shm = idr_find(&teedev->idr, id); in tee_shm_get_from_id()
426 if (!shm || shm->ctx != ctx) in tee_shm_get_from_id()
427 shm = ERR_PTR(-EINVAL); in tee_shm_get_from_id()
429 refcount_inc(&shm->refcount); in tee_shm_get_from_id()
430 mutex_unlock(&teedev->mutex); in tee_shm_get_from_id()
436 * tee_shm_put() - Decrease reference count on a shared memory handle
437 * @shm: Shared memory handle
441 struct tee_device *teedev = shm->ctx->teedev; in tee_shm_put()
444 mutex_lock(&teedev->mutex); in tee_shm_put()
445 if (refcount_dec_and_test(&shm->refcount)) { in tee_shm_put()
452 if (shm->flags & TEE_SHM_DMA_BUF) in tee_shm_put()
453 idr_remove(&teedev->idr, shm->id); in tee_shm_put()
456 mutex_unlock(&teedev->mutex); in tee_shm_put()