/drivers/staging/android/ion/ |
D | ion_heap.c | 28 void *ion_heap_map_kernel(struct ion_heap *heap, in ion_heap_map_kernel() argument 65 void ion_heap_unmap_kernel(struct ion_heap *heap, in ion_heap_unmap_kernel() argument 71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, in ion_heap_map_user() argument 163 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) in ion_heap_freelist_add() argument 165 spin_lock(&heap->free_lock); in ion_heap_freelist_add() 166 list_add(&buffer->list, &heap->free_list); in ion_heap_freelist_add() 167 heap->free_list_size += buffer->size; in ion_heap_freelist_add() 168 spin_unlock(&heap->free_lock); in ion_heap_freelist_add() 169 wake_up(&heap->waitqueue); in ion_heap_freelist_add() 172 size_t ion_heap_freelist_size(struct ion_heap *heap) in ion_heap_freelist_size() argument [all …]
|
D | ion_system_heap.c | 51 struct ion_heap heap; member 55 static struct page *alloc_buffer_page(struct ion_system_heap *heap, in alloc_buffer_page() argument 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() 80 static void free_buffer_page(struct ion_system_heap *heap, in free_buffer_page() argument 87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in free_buffer_page() 98 static struct page *alloc_largest_available(struct ion_system_heap *heap, in alloc_largest_available() argument 112 page = alloc_buffer_page(heap, buffer, orders[i]); in alloc_largest_available() 122 static int ion_system_heap_allocate(struct ion_heap *heap, in ion_system_heap_allocate() argument 127 struct ion_system_heap *sys_heap = container_of(heap, in ion_system_heap_allocate() 129 heap); in ion_system_heap_allocate() [all …]
|
D | ion_carveout_heap.c | 29 struct ion_heap heap; member 34 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, in ion_carveout_allocate() argument 39 container_of(heap, struct ion_carveout_heap, heap); in ion_carveout_allocate() 48 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, in ion_carveout_free() argument 52 container_of(heap, struct ion_carveout_heap, heap); in ion_carveout_free() 59 static int ion_carveout_heap_phys(struct ion_heap *heap, in ion_carveout_heap_phys() argument 72 static int ion_carveout_heap_allocate(struct ion_heap *heap, in ion_carveout_heap_allocate() argument 91 paddr = ion_carveout_allocate(heap, size, align); in ion_carveout_heap_allocate() 111 struct ion_heap *heap = buffer->heap; in ion_carveout_heap_free() local 122 ion_carveout_free(heap, paddr, buffer->size); in ion_carveout_heap_free() [all …]
|
D | ion_priv.h | 71 struct ion_heap *heap; member 113 int (*allocate)(struct ion_heap *heap, 117 int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, 119 struct sg_table * (*map_dma)(struct ion_heap *heap, 121 void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); 122 void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 123 void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 126 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 185 int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); 227 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); [all …]
|
D | ion_cma_heap.c | 30 struct ion_heap heap; member 34 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) 44 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, in ion_cma_allocate() argument 48 struct ion_cma_heap *cma_heap = to_cma_heap(heap); in ion_cma_allocate() 92 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); in ion_cma_free() 105 static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, in ion_cma_phys() argument 108 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); in ion_cma_phys() 121 static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, in ion_cma_heap_map_dma() argument 129 static void ion_cma_heap_unmap_dma(struct ion_heap *heap, in ion_cma_heap_unmap_dma() argument 137 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); in ion_cma_mmap() [all …]
|
D | ion_chunk_heap.c | 28 struct ion_heap heap; member 36 static int ion_chunk_heap_allocate(struct ion_heap *heap, in ion_chunk_heap_allocate() argument 42 container_of(heap, struct ion_chunk_heap, heap); in ion_chunk_heap_allocate() 95 struct ion_heap *heap = buffer->heap; in ion_chunk_heap_free() local 97 container_of(heap, struct ion_chunk_heap, heap); in ion_chunk_heap_free() 120 static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, in ion_chunk_heap_map_dma() argument 126 static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, in ion_chunk_heap_unmap_dma() argument 173 chunk_heap->heap.ops = &chunk_heap_ops; in ion_chunk_heap_create() 174 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; in ion_chunk_heap_create() 175 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; in ion_chunk_heap_create() [all …]
|
D | ion.c | 177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, in ion_buffer_create() argument 192 buffer->heap = heap; in ion_buffer_create() 196 ret = heap->ops->allocate(heap, buffer, len, align, flags); in ion_buffer_create() 199 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) in ion_buffer_create() 202 ion_heap_freelist_drain(heap, 0); in ion_buffer_create() 203 ret = heap->ops->allocate(heap, buffer, len, align, in ion_buffer_create() 212 table = heap->ops->map_dma(heap, buffer); in ion_buffer_create() 265 heap->ops->unmap_dma(heap, buffer); in ion_buffer_create() 267 heap->ops->free(buffer); in ion_buffer_create() 276 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); in ion_buffer_destroy() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_mem.c | 84 static struct mem_block *alloc_block(struct mem_block *heap, int size, in alloc_block() argument 90 list_for_each(p, heap) { in alloc_block() 99 static struct mem_block *find_block(struct mem_block *heap, int start) in find_block() argument 103 list_for_each(p, heap) in find_block() 136 static int init_heap(struct mem_block **heap, int start, int size) in init_heap() argument 143 *heap = kzalloc(sizeof(**heap), GFP_KERNEL); in init_heap() 144 if (!*heap) { in init_heap() 152 blocks->next = blocks->prev = *heap; in init_heap() 154 (*heap)->file_priv = (struct drm_file *) - 1; in init_heap() 155 (*heap)->next = (*heap)->prev = blocks; in init_heap() [all …]
|
D | radeon_drv.h | 393 extern void radeon_mem_takedown(struct mem_block **heap); 395 struct mem_block *heap);
|
/drivers/gpu/drm/nouveau/nvkm/core/ |
D | mm.c | 99 b->heap = a->heap; in region_head() 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument 122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_head() 123 if (this->heap != heap) in nvkm_mm_head() 174 b->heap = a->heap; in region_tail() 185 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_tail() argument 197 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_tail() 198 if (this->heap != heap) in nvkm_mm_tail() 275 node->heap = ++mm->heap_nodes; in nvkm_mm_init()
|
D | gpuobj.c | 157 ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor() 160 ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor() 188 return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); in nvkm_gpuobj_ctor() 197 nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); in nvkm_gpuobj_del() 198 nvkm_mm_fini(&gpuobj->heap); in nvkm_gpuobj_del()
|
/drivers/md/bcache/ |
D | movinggc.c | 196 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; in bucket_heap_top() 215 ca->heap.used = 0; in bch_moving_gc() 224 if (!heap_full(&ca->heap)) { in bch_moving_gc() 226 heap_add(&ca->heap, b, bucket_cmp); in bch_moving_gc() 227 } else if (bucket_cmp(b, heap_peek(&ca->heap))) { in bch_moving_gc() 231 ca->heap.data[0] = b; in bch_moving_gc() 232 heap_sift(&ca->heap, 0, bucket_cmp); in bch_moving_gc() 237 heap_pop(&ca->heap, b, bucket_cmp); in bch_moving_gc() 241 while (heap_pop(&ca->heap, b, bucket_cmp)) in bch_moving_gc()
|
D | util.h | 40 #define init_heap(heap, _size, gfp) \ argument 43 (heap)->used = 0; \ 44 (heap)->size = (_size); \ 45 _bytes = (heap)->size * sizeof(*(heap)->data); \ 46 (heap)->data = NULL; \ 48 (heap)->data = kmalloc(_bytes, (gfp)); \ 49 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \ 50 (heap)->data = vmalloc(_bytes); \ 51 (heap)->data; \ 54 #define free_heap(heap) \ argument [all …]
|
D | alloc.c | 183 ca->heap.used = 0; in invalidate_buckets_lru() 189 if (!heap_full(&ca->heap)) in invalidate_buckets_lru() 190 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru() 191 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru() 192 ca->heap.data[0] = b; in invalidate_buckets_lru() 193 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru() 197 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru() 198 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru() 201 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
|
D | bcache.h | 422 DECLARE_HEAP(struct bucket *, heap);
|
D | super.c | 1830 free_heap(&ca->heap); in bch_cache_release() 1876 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || in cache_alloc()
|
/drivers/gpu/drm/nouveau/include/nvkm/core/ |
D | mm.h | 11 u8 heap; member 35 int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, 37 int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
|
D | gpuobj.h | 21 struct nvkm_mm heap; member
|
/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/ |
D | nv04.c | 32 struct nvkm_mm heap; member 98 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv04_instobj_dtor() 131 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, in nv04_instobj_new() 163 ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); in nv04_instmem_oneinit() 201 nvkm_mm_fini(&imem->heap); in nv04_instmem_dtor()
|
D | nv40.c | 33 struct nvkm_mm heap; member 97 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv40_instobj_dtor() 130 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, in nv40_instobj_new() 174 ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); in nv40_instmem_oneinit() 216 nvkm_mm_fini(&imem->heap); in nv40_instmem_dtor()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_abi16.c | 112 nvkm_mm_free(&chan->heap, &ntfy->node); in nouveau_abi16_ntfy_fini() 139 if (chan->heap.block_size) in nouveau_abi16_chan_fini() 140 nvkm_mm_fini(&chan->heap); in nouveau_abi16_chan_fini() 330 ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1); in nouveau_abi16_ioctl_channel_alloc() 528 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1, in nouveau_abi16_ioctl_notifierobj_alloc()
|
D | nouveau_abi16.h | 27 struct nvkm_mm heap; member
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo.c | 1014 const struct ttm_place *heap = &placement->placement[i]; in ttm_bo_mem_compat() local 1016 (mem->start < heap->fpfn || in ttm_bo_mem_compat() 1017 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) in ttm_bo_mem_compat() 1020 *new_flags = heap->flags; in ttm_bo_mem_compat() 1027 const struct ttm_place *heap = &placement->busy_placement[i]; in ttm_bo_mem_compat() local 1029 (mem->start < heap->fpfn || in ttm_bo_mem_compat() 1030 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) in ttm_bo_mem_compat() 1033 *new_flags = heap->flags; in ttm_bo_mem_compat()
|
/drivers/soc/qcom/ |
D | Kconfig | 28 The driver provides an interface to items in a heap shared among all
|
/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
D | ramnv50.c | 531 struct nvkm_mm *heap = &ram->vram; in nv50_ram_get() local 569 ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); in nv50_ram_get() 571 ret = nvkm_mm_head(heap, 0, type, max, min, align, &r); in nv50_ram_get()
|