Home
last modified time | relevance | path

Searched refs:heap (Results 1 – 25 of 32) sorted by relevance

12

/drivers/dma-buf/
Ddma-heap.c81 struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, in dma_heap_buffer_alloc() argument
87 trace_android_vh_dmabuf_heap_flags_validation(heap, in dma_heap_buffer_alloc()
103 return heap->ops->allocate(heap, len, fd_flags, heap_flags); in dma_heap_buffer_alloc()
107 int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, in dma_heap_bufferfd_alloc() argument
114 dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags); in dma_heap_bufferfd_alloc()
131 struct dma_heap *heap; in dma_heap_open() local
133 heap = xa_load(&dma_heap_minors, iminor(inode)); in dma_heap_open()
134 if (!heap) { in dma_heap_open()
140 file->private_data = heap; in dma_heap_open()
149 struct dma_heap *heap = file->private_data; in dma_heap_ioctl_allocate() local
[all …]
DMakefile4 obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c255 nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) in nvkm_mmu_type() argument
257 if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) { in nvkm_mmu_type()
258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type()
259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type()
268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap()
269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap()
270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap()
282 int heap; in nvkm_mmu_host() local
285 heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL); in nvkm_mmu_host()
286 nvkm_mmu_type(mmu, heap, type); in nvkm_mmu_host()
[all …]
Dummu.c69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap()
90 args->v0.heap = mmu->type[index].heap; in nvkm_ummu_type()
/drivers/gpu/drm/nouveau/include/nvkm/core/
Dmm.h12 u8 heap; member
34 int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
36 int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
38 int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
44 nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap) in nvkm_mm_heap_size() argument
49 if (node->heap == heap) in nvkm_mm_heap_size()
Dgpuobj.h21 struct nvkm_mm heap; member
/drivers/gpu/drm/nouveau/nvkm/core/
Dmm.c99 b->heap = a->heap; in region_head()
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument
122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_head()
123 if (this->heap != heap) in nvkm_mm_head()
175 b->heap = a->heap; in region_tail()
186 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_tail() argument
198 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_tail()
199 if (this->heap != heap) in nvkm_mm_tail()
240 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block) in nvkm_mm_init() argument
277 node->heap = heap; in nvkm_mm_init()
Dgpuobj.c180 ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor()
183 ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor()
211 return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1); in nvkm_gpuobj_ctor()
220 nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); in nvkm_gpuobj_del()
221 nvkm_mm_fini(&gpuobj->heap); in nvkm_gpuobj_del()
/drivers/gpu/drm/nouveau/nvif/
Dmmu.c32 kfree(mmu->heap); in nvif_mmu_dtor()
50 mmu->heap = NULL; in nvif_mmu_ctor()
69 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_ctor()
73 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_ctor()
89 mmu->heap[i].size = args.size; in nvif_mmu_ctor()
109 mmu->type[i].heap = args.heap; in nvif_mmu_ctor()
/drivers/md/bcache/
Dmovinggc.c194 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; in bucket_heap_top()
212 ca->heap.used = 0; in bch_moving_gc()
221 if (!heap_full(&ca->heap)) { in bch_moving_gc()
223 heap_add(&ca->heap, b, bucket_cmp); in bch_moving_gc()
224 } else if (bucket_cmp(b, heap_peek(&ca->heap))) { in bch_moving_gc()
228 ca->heap.data[0] = b; in bch_moving_gc()
229 heap_sift(&ca->heap, 0, bucket_cmp); in bch_moving_gc()
234 heap_pop(&ca->heap, b, bucket_cmp); in bch_moving_gc()
238 while (heap_pop(&ca->heap, b, bucket_cmp)) in bch_moving_gc()
Dutil.h40 #define init_heap(heap, _size, gfp) \ argument
43 (heap)->used = 0; \
44 (heap)->size = (_size); \
45 _bytes = (heap)->size * sizeof(*(heap)->data); \
46 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
47 (heap)->data; \
50 #define free_heap(heap) \ argument
52 kvfree((heap)->data); \
53 (heap)->data = NULL; \
Dalloc.c184 ca->heap.used = 0; in invalidate_buckets_lru()
190 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
191 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
192 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
193 ca->heap.data[0] = b; in invalidate_buckets_lru()
194 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
198 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
199 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
202 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
/drivers/dma-buf/heaps/
Dcma_heap.c27 struct dma_heap *heap; member
32 struct cma_heap *heap; member
250 struct cma_heap *cma_heap = buffer->heap; in cma_heap_dma_buf_release()
278 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, in cma_heap_allocate() argument
283 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); in cma_heap_allocate()
342 buffer->heap = cma_heap; in cma_heap_allocate()
346 exp_info.exp_name = dma_heap_get_name(heap); in cma_heap_allocate()
386 cma_heap->heap = dma_heap_add(&exp_info); in __add_cma_heap()
387 if (IS_ERR(cma_heap->heap)) { in __add_cma_heap()
388 int ret = PTR_ERR(cma_heap->heap); in __add_cma_heap()
Dsystem_heap.c30 struct dma_heap *heap; member
385 static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap, in system_heap_do_allocate() argument
408 buffer->heap = heap; in system_heap_do_allocate()
446 exp_info.exp_name = dma_heap_get_name(heap); in system_heap_do_allocate()
464 dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); in system_heap_do_allocate()
465 dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); in system_heap_do_allocate()
485 static struct dma_buf *system_heap_allocate(struct dma_heap *heap, in system_heap_allocate() argument
490 return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false); in system_heap_allocate()
493 static long system_get_pool_size(struct dma_heap *heap) in system_get_pool_size() argument
510 static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap, in system_uncached_heap_allocate() argument
[all …]
DKconfig15 Choose this option to enable the system dmabuf heap. The system heap
22 Choose this option to enable dma-buf CMA heap. This heap is backed
/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dnv04.c31 struct nvkm_mm heap; member
103 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv04_instobj_dtor()
136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); in nv04_instobj_new()
167 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); in nv04_instmem_oneinit()
205 nvkm_mm_fini(&imem->heap); in nv04_instmem_dtor()
Dnv40.c32 struct nvkm_mm heap; member
103 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv40_instobj_dtor()
136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); in nv40_instobj_new()
179 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); in nv40_instmem_oneinit()
221 nvkm_mm_fini(&imem->heap); in nv40_instmem_dtor()
/drivers/gpu/drm/nouveau/include/nvif/
Dmmu.h16 } *heap; member
28 u8 heap; member
Dif0008.h25 __u8 heap; member
/drivers/gpu/drm/lima/
Dlima_gp.c147 task->heap = bo; in lima_gp_task_run()
225 if (fail_size == task->heap->heap_size) { in lima_gp_task_recover()
228 ret = lima_heap_alloc(task->heap, task->vm); in lima_gp_task_recover()
238 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size; in lima_gp_task_recover()
/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
Dram.c104 nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, in nvkm_ram_get() argument
133 ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r); in nvkm_ram_get()
135 ret = nvkm_mm_head(mm, heap, type, max, min, align, &r); in nvkm_ram_get()
/drivers/gpu/drm/nouveau/
Dnouveau_abi16.c118 nvkm_mm_free(&chan->heap, &ntfy->node); in nouveau_abi16_ntfy_fini()
145 if (chan->heap.block_size) in nouveau_abi16_chan_fini()
146 nvkm_mm_fini(&chan->heap); in nouveau_abi16_chan_fini()
349 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1); in nouveau_abi16_ioctl_channel_alloc()
547 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1, in nouveau_abi16_ioctl_notifierobj_alloc()
/drivers/gpu/drm/nouveau/include/nvkm/subdev/
Dmmu.h100 } heap[4]; member
109 u8 heap; member
/drivers/gpu/drm/ttm/
Dttm_bo.c938 const struct ttm_place *heap = &places[i]; in ttm_bo_places_compat() local
940 if ((mem->start < heap->fpfn || in ttm_bo_places_compat()
941 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) in ttm_bo_places_compat()
944 *new_flags = heap->flags; in ttm_bo_places_compat()
945 if ((mem->mem_type == heap->mem_type) && in ttm_bo_places_compat()
/drivers/misc/lkdtm/
DMakefile6 lkdtm-$(CONFIG_LKDTM) += heap.o

12